{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \r\n\r\n\r\n\r\n\r\n\r\n\"\"\" #% ( curQ, )\r\n\r\n'''\r\ndef toFile() :\r\n\r\n\t#string = form.getvalue('answer')\r\n\tprint( form.getvalue('answer') )\r\n\r\n\ta = open ( 'C:\\answer.py' , 'w' )\r\n\ta.write( string )\r\n\r\n\ta.close()\r\n'''\r\n\r\nif 'submit' in form :\r\n\r\n\t#string = form.getvalue('answer')\r\n\r\n\ta = open ( 'answer.py' , 'w' )\r\n\ta.write( form.getvalue('answer') )\r\n\r\n\ta.close()\r\n\r\nelse : a=open('answer2.py', 'w')\r\n\r\nprint (z)\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\n\r\ndef session_in():\r\n\tcur.execute(\"insert into session values(?,?)\",(hno,pwd))\r\n\tcon.commit()\r\n\t\r\ndef register():\r\n\tcur.execute(\"insert into studentbase values(?,?,?,?,?)\",(Roll,Name,Password,Mobile,Email))\r\n\tcon.commit()\r\n\tos.system('/var/www/html/stud_reg_success.py')\r\n\r\ndef login():\r\n\tcur.execute(\"select roll,password from studentbase\")\r\n\ttemp=cur.fetchall()\r\n\tflag=0\r\n\tfor i in temp:\r\n\r\n\t\tif i[0]==hno and i[1]==pwd:\r\n\t\t\tflag=1\r\n\t\t\tc['username']=hno \r\n\t\t\tos.system('/var/www/html/stud_main.py')\r\n\t\t\tbreak\r\n\t\t\t\r\n\tif flag==0:\r\n\t\t\tos.system('/var/www/html/stud_login_fail.py')\r\n\t\t\t\r\n\t\t\t\r\n\t \r\nif form.getvalue('pwd') == form.getvalue('cpwd') :\r\n\t\tName=form.getvalue('Name')\r\n\t\tRoll=form.getvalue('Roll_no')\r\n\t\tPassword=form.getvalue('')\r\n\t\tMobile=form.getvalue('mobile')\r\n\t\tEmail=form.getvalue('Email')\r\n \r\nelse :\r\n\t os.system('/var/www/html/stud_login_reg_fail.py')\r\n\t \r\n\r\nif 'register' in form:\r\n\tregister()\r\n\t\r\n\t\t\t \r\n \r\n \r\nhno=form.getvalue('@username')\r\npwd=form.getvalue('@password')\r\n\r\n\r\n\r\n\r\nif 'login' in form :\r\n\t\tlogin()\r\n\r\n\r\nif Name == None and hno==None :\r\n\t\tprint(z)\r\ncon.close()\r\n\r\nprint(c.js_output())\r\n\r\n'''\r\n"},"size":{"kind":"number","value":4526,"string":"4,526"}}},{"rowIdx":127556,"cells":{"max_stars_repo_path":{"kind":"string","value":"gbmc_v0/tests/test_create_imgs.py"},"max_stars_repo_name":{"kind":"string","value":"leilakhalili87/gbmc_v0"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171242"},"content":{"kind":"string","value":"\nimport numpy as np\nimport pytest\nimport gbmc_v0.pad_dump_file as pdf\nimport gbmc_v0.util_funcs as uf\n\n\n@pytest.mark.parametrize('filename0, rCut, lat_par, non_p',\n [(\"data/dump_1\", 8.1, 4.05, 2),\n (\"data/dump_1\", 30, 4.05, 2),\n (\"data/dump_2\", 8.1, 4.05, 1)])\ndef test_create_imgs(filename0, rCut, lat_par, non_p):\n data = uf.compute_ovito_data(filename0)\n arr = pdf.p_arr(non_p)\n GbRegion, GbIndex, GbWidth, w_bottom_SC, w_top_SC = pdf.GB_finder(data, lat_par, non_p)\n sim_cell = data.cell[...]\n sim_1vec = np.array(sim_cell[:, arr[0]])\n sim_2vec = np.array(sim_cell[:, arr[1]])\n\n p1_vec = np.array([sim_1vec[arr[0]], sim_1vec[arr[1]]])\n p2_vec = np.array([sim_2vec[arr[0]], sim_2vec[arr[1]]])\n [n1, n2] = pdf.num_rep_2d(p1_vec, p2_vec, rCut)\n pts1, gb1_inds = pdf.pad_gb_perp(data, GbRegion, GbIndex, rCut, non_p)\n pts_w_imgs, inds_array = pdf.create_imgs(pts1, n1, n2, sim_1vec, sim_2vec, non_p)\n\n num0 = pts_w_imgs.shape[0]/pts1.shape[0]\n num1 = np.power(n1+n2+1, 2)\n assert np.allclose(num0, num1)\n"},"size":{"kind":"number","value":1120,"string":"1,120"}}},{"rowIdx":127557,"cells":{"max_stars_repo_path":{"kind":"string","value":"test/test_procmaps.py"},"max_stars_repo_name":{"kind":"string","value":"woodruffw/procmaps.py"},"max_stars_count":{"kind":"number","value":30,"string":"30"},"id":{"kind":"string","value":"2170727"},"content":{"kind":"string","value":"import os\nimport unittest\n\nimport procmaps\n\n\nclass TestProcmaps(unittest.TestCase):\n def check_map_properties(self, map_):\n self.assertIsInstance(map_.begin_address, int)\n self.assertIsInstance(map_.end_address, int)\n\n self.assertTrue(map_.begin_address in map_)\n self.assertFalse(map_.end_address in map_)\n\n self.assertIsInstance(map_.is_readable, bool)\n self.assertIsInstance(map_.is_writable, bool)\n self.assertIsInstance(map_.is_executable, bool)\n self.assertIsInstance(map_.is_shared, bool)\n self.assertIsInstance(map_.is_private, bool)\n self.assertIsInstance(map_.offset, int)\n self.assertIsInstance(map_.device, tuple)\n self.assertIsInstance(map_.device[0], int)\n self.assertIsInstance(map_.device[1], int)\n self.assertIsInstance(map_.inode, int)\n\n if map_.is_shared:\n self.assertFalse(map_.is_private)\n\n if map_.is_private:\n self.assertFalse(map_.is_shared)\n\n self.assertTrue(isinstance(map_.pathname, str) or map_.pathname is None)\n\n def test_from_pid(self):\n maps = procmaps.from_pid(os.getpid())\n for map_ in maps:\n self.check_map_properties(map_)\n\n def test_from_path(self):\n maps = procmaps.from_path(\"/proc/self/maps\")\n for map_ in maps:\n self.check_map_properties(map_)\n\n def test_from_str(self):\n maps = procmaps.from_str(\"55d5564b4000-55d5564b6000 r--p 00000000 08:11 6553896 /bin/cat\")\n self.assertEqual(len(maps), 1)\n self.check_map_properties(maps[0])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"},"size":{"kind":"number","value":1645,"string":"1,645"}}},{"rowIdx":127558,"cells":{"max_stars_repo_path":{"kind":"string","value":"digital-curling/named/network/hogehoge.py"},"max_stars_repo_name":{"kind":"string","value":"km-t/dcpython"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171353"},"content":{"kind":"string","value":"import numpy as np\n\na = np.arange(9).reshape(3, 3)\nb = np.zeros((3, 3))\nfor i in range(3):\n for j in range(3):\n b[i][j] = i+3*j\nprint(a)\nprint(b)\nc = np.array(a[0:3, 0:1]-b[0:3, 0:1])\nprint(c)\n"},"size":{"kind":"number","value":203,"string":"203"}}},{"rowIdx":127559,"cells":{"max_stars_repo_path":{"kind":"string","value":"ex8/ex8.py"},"max_stars_repo_name":{"kind":"string","value":"mlyundin/Machine-Learning"},"max_stars_count":{"kind":"number","value":13,"string":"13"},"id":{"kind":"string","value":"2171259"},"content":{"kind":"string","value":"import matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\nfrom numpy.linalg import det, pinv\r\nimport scipy.io as sio\r\n\r\ndef visualize_data(X, title):\r\n x1, x2 = X.T\r\n plt.plot(x1, x2, 'bx')\r\n plt.xlabel('Latency (ms)')\r\n plt.ylabel('Throughput (mb/s)')\r\n plt.xlim([0, 30])\r\n plt.ylim([0, 30])\r\n plt.title(title)\r\n return plt\r\n\r\ndef visualize_fit(X, mu, sigma2):\r\n visualize_data(X, 'Visualizing Gaussian fit.')\r\n x = np.arange(0, 35, 0.5)\r\n x1, x2 = np.meshgrid(x, x)\r\n\r\n z = multivariate_gaussian(np.hstack((x1.reshape(-1,1), x2.reshape(-1,1))), mu, sigma2).reshape(x1.shape)\r\n plt.contour(x1, x2, z)\r\n\r\n return plt\r\n\r\ndef estimate_gaussian(X):\r\n\r\n return np.mean(X, axis=0)[:, np.newaxis], np.var(X, axis=0)[:, np.newaxis]\r\n\r\ndef multivariate_gaussian(X, mu, Sigma2):\r\n k = float(len(mu))\r\n X = np.copy(X)\r\n\r\n if any(s == 1 for s in Sigma2.shape):\r\n Sigma2 = np.diag(Sigma2.ravel())\r\n\r\n X -= mu.reshape(1, -1)\r\n return (2*np.pi)**(-k/2)*det(Sigma2)**(-0.5)*np.exp(-0.5*np.sum(np.dot(X, pinv(Sigma2))*X, axis=1))\r\n\r\ndef select_threshold(yval, pval):\r\n yval = yval.ravel()\r\n\r\n best_epsilon = 0\r\n best_F1 = 0\r\n stepsize = (np.max(pval) - np.min(pval)) / 1000\r\n for epsilon in np.arange(np.min(pval), np.max(pval), stepsize):\r\n cvPredictions = pval < epsilon\r\n\r\n tp = np.sum((cvPredictions == 1) & (yval == 1), dtype=float)\r\n fp = np.sum((cvPredictions == 1) & (yval == 0))\r\n fn = np.sum((cvPredictions == 0) & (yval == 1))\r\n recall = tp/(tp+fn)\r\n precision = tp/(tp+fp)\r\n\r\n F1 = 2*recall*precision/(recall+precision)\r\n\r\n if F1 > best_F1:\r\n best_F1, best_epsilon = F1, epsilon\r\n\r\n return best_epsilon, best_F1\r\n\r\nif __name__ == '__main__':\r\n data = sio.loadmat('ex8data1.mat')\r\n X = data['X']\r\n Xval = data['Xval']\r\n yval = data['yval']\r\n\r\n visualize_data(X, 'Visualizing example dataset for outlier detection').show()\r\n\r\n mu, sigma2 = estimate_gaussian(X)\r\n p = multivariate_gaussian(X, mu, sigma2)\r\n visualize_fit(X, mu, sigma2).show()\r\n\r\n pval = multivariate_gaussian(Xval, mu, sigma2)\r\n epsilon, F1 = select_threshold(yval, pval)\r\n\r\n print('Best epsilon found using cross-validation: %s' % epsilon)\r\n print('Best F1 on Cross Validation Set: %s' % F1)\r\n print(' (you should see a value epsilon of about 8.99e-05)')\r\n\r\n visualize_data(X, 'The classified anomalies.')\r\n x1, x2 = X[p < epsilon, :].T\r\n plt.plot(x1, x2, 'ro')\r\n plt.show()\r\n\r\n data = sio.loadmat('ex8data2.mat')\r\n X = data['X']\r\n Xval = data['Xval']\r\n yval = data['yval']\r\n\r\n mu, sigma2 = estimate_gaussian(X)\r\n\r\n p = multivariate_gaussian(X, mu, sigma2)\r\n pval = multivariate_gaussian(Xval, mu, sigma2)\r\n epsilon, F1 = select_threshold(yval, pval)\r\n\r\n print('Best epsilon found using cross-validation: %s' % epsilon)\r\n print('Best F1 on Cross Validation Set: %s' % F1)\r\n print('# Outliers found: %s' % np.sum(p < epsilon))\r\n print(' (you should see a value epsilon of about 1.38e-18)')"},"size":{"kind":"number","value":3091,"string":"3,091"}}},{"rowIdx":127560,"cells":{"max_stars_repo_path":{"kind":"string","value":"meggie/actions/raw_ica/controller/ica.py"},"max_stars_repo_name":{"kind":"string","value":"Teekuningas/meggie"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2170086"},"content":{"kind":"string","value":"\"\"\" Contains controlling logic for the ICA.\n\"\"\"\n\nimport logging\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport mne\n\nfrom meggie.utilities.compare import compare_raws\n\n\ndef compute_ica(raw, n_components, method, max_iter, random_state):\n \"\"\" Computes ICA using MNE implementation.\n \"\"\"\n\n ica = mne.preprocessing.ICA(\n n_components=n_components,\n method=method,\n max_iter=max_iter,\n random_state=random_state)\n\n ica.fit(raw)\n return ica\n\n\ndef plot_topographies(ica, n_components):\n \"\"\" Plots topographies from the ICA solution.\n \"\"\"\n\n figs = ica.plot_components(title='')\n for fig in figs:\n fig.canvas.set_window_title('ICA topographic maps')\n\n def update_topography_texts():\n \"\"\" Change texts in the axes to match names in the dialog \"\"\"\n idx = 0\n for fig in figs:\n for ax in fig.get_axes():\n if idx > n_components:\n return\n\n ax.set_title('Component ' + str(idx), fontsize=12)\n idx += 1\n\n update_topography_texts()\n\n\ndef plot_sources(raw, ica):\n \"\"\" Plots sources of the ica solution.\n \"\"\"\n sources = ica.get_sources(raw)\n sources.plot(title='ICA time courses')\n\n\ndef plot_properties(raw, ica, picks):\n \"\"\" Plots properties for specific ICA components.\n \"\"\"\n figs = ica.plot_properties(\n raw, picks)\n for fig in figs:\n fig.canvas.set_window_title('ICA properties')\n\n # fix the names\n idx = 0\n for fig in figs:\n for ax_idx, ax in enumerate(fig.get_axes()):\n if ax_idx == 0:\n ax.set_title(\"Component \" + str(picks[idx]))\n idx += 1\n break\n\n\ndef plot_changes(raw, ica, indices):\n \"\"\" Plot a raw comparison plot for ICA solution.\n \"\"\"\n raw_removed = raw.copy()\n ica.apply(raw_removed, exclude=indices)\n compare_raws(raw, raw_removed)\n\n"},"size":{"kind":"number","value":1921,"string":"1,921"}}},{"rowIdx":127561,"cells":{"max_stars_repo_path":{"kind":"string","value":"algo/test/test_backtracking.py"},"max_stars_repo_name":{"kind":"string","value":"ssavinash1/Algorithm_stanford"},"max_stars_count":{"kind":"number","value":24,"string":"24"},"id":{"kind":"string","value":"2170619"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom src.backtracking import QueenPuzzle, TravelingSalesman, SubsetsOfGivenSum\nfrom src.graph import Graph\n\n\nclass TestBacktracking(unittest.TestCase):\n\n def test_queen_puzzle_3(self):\n puzzle = QueenPuzzle(3)\n puzzle.run()\n expected_solutions = []\n self.assertEqual(puzzle.solutions, expected_solutions,\n 'should not find any solutions for the three problem')\n\n def test_queen_puzzle_4(self):\n puzzle = QueenPuzzle(4)\n puzzle.run()\n expected_solutions = [[(0, 1), (1, 3), (2, 0), (3, 2)],\n [(0, 2), (1, 0), (2, 3), (3, 1)]]\n self.assertItemsEqual(puzzle.solutions, expected_solutions,\n 'should not find any solutions for the three problem')\n\n def test_queen_puzzle_8(self):\n puzzle = QueenPuzzle(8)\n puzzle.run()\n self.assertEqual(len(puzzle.solutions), 92,\n 'should not find any solutions for the three problem')\n\n def test_traveling_salesman(self):\n \"\"\" Given the following graph:\n 2\n (a)----(b)\n | \\4 / |\n | \\/ |5\n 1| /\\ |\n | /3 \\ |\n |/ \\|\n (c)----(d)\n 6\n \"\"\"\n g = Graph.build(edges=[\n ('a', 'b', 2), ('a', 'd', 4), ('a', 'c', 1),\n ('b', 'd', 5), ('b', 'c', 3), ('d', 'c', 6)\n ], directed=False)\n ts = TravelingSalesman(g)\n ts.run()\n expected_min_path = ['a', 'c', 'b', 'd', 'a']\n expected_min_cost = 13\n self.assertEqual(ts.solution, expected_min_path,\n 'should have computed the min path')\n self.assertEqual(ts.min_cost, expected_min_cost,\n 'should have computed the min cost')\n\n def test_subset_of_given_sum(self):\n S = [1,2,2,3,4,5]\n N = 5\n sogs = SubsetsOfGivenSum(S, N)\n sogs.run()\n expected_solutions = [[1,2,2], [1,4], [2,3], [5]]\n self.assertItemsEqual(expected_solutions, sogs.solutions,\n 'should produce the correct solution')\n"},"size":{"kind":"number","value":2172,"string":"2,172"}}},{"rowIdx":127562,"cells":{"max_stars_repo_path":{"kind":"string","value":"test/test_borehole_ground_water.py"},"max_stars_repo_name":{"kind":"string","value":"ArnaudCrl/pywellcad"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2171321"},"content":{"kind":"string","value":"import pathlib\nimport unittest\n\nimport pywintypes\n\nimport wellcad.com\nimport random\nimport pywintypes\nfrom ._extra_asserts import ExtraAsserts\nfrom ._sample_path import SamplePath\n\n\nclass TestBoreholeGroundWater(unittest.TestCase, ExtraAsserts, SamplePath):\n @classmethod\n def setUpClass(cls):\n cls.app = wellcad.com.Application()\n cls.sample_path = cls._find_sample_path()\n cls.fixture_path = pathlib.Path(__file__).parent / \"fixtures\"\n cls.borehole = cls.app.open_borehole(str(cls.fixture_path / \"groundwater/groundwater.wcl\"))\n\n @classmethod\n def tearDownClass(cls):\n cls.app.quit(False)\n\n def test_water_salinity(self):\n config = \"Temperature=Temperature (C),TemperatureUnit=degC\"\n output_log = self.borehole.water_salinity(\"Conductivity\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_water_salinity_documentation(self):\n self.fail(\"water salinity chm documentation : input restricted to conductivity\")\n\n def test_water_resistivity(self):\n config = \"Temperature=25,TemperatureUnit=degC,RefTemperature=25,RefTemperatureUnit=degC,Method=0\"\n output_log = self.borehole.water_resistivity(\"Fluid Resistivity\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_water_resistivity_documentation(self):\n self.fail(\"water_resistivity chm documentation : Method is missing\")\n\n def test_shale_volume(self):\n config = \"Equation=0,ShaleValueType=0,Shale=500,ShaleTopDepth=30,ShaleBotDepth=80,\\\n SandstoneValueType=2,Sandstone=0,SandstoneTopDepth=160,SandstoneBotDepth=180\"\n output_log = self.borehole.shale_volume(\"Gamma Ray\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_porosity_sonic(self):\n config = \"Method=0,MatrixSlowness=50,MatrixSlownessUnit=us/m,FluidSlowness=189,\\\n FluidSlownessUnit=us/m,C=0.67,Compaction=1\"\n output_log = self.borehole.porosity_sonic(\"P-Slowness\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_porosity_archie(self):\n config = \"Method=0,Rw=Rw,RwUnit=ohm.m,Vsh=0,Rsh=30,RshUnit=ohm.m,CementationFactor=1,\\\n CementationExponent=2,Cs=1\"\n output_log = self.borehole.porosity_archie(\"Normal Resistivity\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_porosity_density(self):\n config = \"Method=0,MatrixDensity=2.7,MatrixDensityUnit=g/cc,FluidDensity=1.0,\\\n FluidDensityUnit=g/cc,ShaleVolume=0,ShaleDensity=1.5,ShaleDensityUnit=g/cc\"\n output_log = self.borehole.porosity_density(\"Bulk Density\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_porosity_neutron(self):\n config = \"Vsh=Vsh,ShaleNPhi=50\"\n output_log = self.borehole.porosity_neutron(\"NPhi (Sandstone)\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_permeability(self):\n config = \"CementationFactor=1\"\n output_log = self.borehole.permeability(\"DPhi (Sandstone)\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n def test_hydraulic_conductivity(self):\n config = str(self.fixture_path / \"groundwater/groundwater.ini\")\n output_log = self.borehole.hydraulic_conductivity(\"Permeability\", False, config)\n self.assertIsInstance(output_log, wellcad.com.Log)\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":3524,"string":"3,524"}}},{"rowIdx":127563,"cells":{"max_stars_repo_path":{"kind":"string","value":"wordbook/services.py"},"max_stars_repo_name":{"kind":"string","value":"lostsquirrel/words"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170933"},"content":{"kind":"string","value":"from wordbook.models import wordbookDAO, Wordbook\n\n\ndef all():\n data = wordbookDAO.find_all()\n return [Wordbook.from_db(*r) for r in data]\n\n\ndef get_wordbook(book_id: int):\n _b = wordbookDAO.find(book_id)\n if _b is not None:\n return Wordbook.from_db(*_b)\n\n\ndef get_wordbook_by_guid(guid: int):\n _b = wordbookDAO.find_by_guid(guid)\n if _b is not None:\n return Wordbook.from_db(*_b)\n"},"size":{"kind":"number","value":413,"string":"413"}}},{"rowIdx":127564,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/models/metrics/sequence_confusion_matrix.py"},"max_stars_repo_name":{"kind":"string","value":"V1ct0reo/lightning-fast-hydra"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171493"},"content":{"kind":"string","value":"from typing import Optional, Any\n\nimport numpy as np\nimport pandas as pd\nimport torchmetrics.classification\nfrom torch import Tensor\nfrom sklearn.metrics import confusion_matrix\n\nfrom src.datamodules.datasets.window_makers.sliding_window_maker import MovementDataWindowMaker\n\n\nclass BasicSequenceConfusionMatrix(torchmetrics.classification.ConfusionMatrix):\n def __init__(self,\n num_classes: int,\n window_maker: MovementDataWindowMaker,\n normalize: Optional[str] = None,\n threshold: float = 0.5,\n multilabel: bool = False,\n compute_on_step: bool = False,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n file_name='basic_sequence_confusion_matrix.csv',\n ):\n \"\"\"\n\n \"\"\"\n super().__init__(num_classes=num_classes, normalize=normalize, threshold=threshold, multilabel=multilabel,\n compute_on_step=compute_on_step, dist_sync_on_step=dist_sync_on_step,\n process_group=process_group)\n self.file_name = file_name\n self.window_maker = window_maker\n self.seq_id_col = window_maker.sequenz_identifier\n self.seq_id_list = window_maker.seq_id_list\n first_frame_idx = self.window_maker.window_size - 1\n self.seq_id_preds_targtes = pd.DataFrame(\n columns=['seq_id', 'predicted', 'target'], index=range(first_frame_idx, self.window_maker.num_entries))\n\n def add_batch(self, preds_batch, target_batch, sample_idxs):\n # preds_batch (batch_size, n_classes) <- softmax output\n # target_batch (batch_size)\n # sample_idxs (batch_size, windoiw_size) <- for each batch, windowsize[-1] would be the Frame, tht got predicted\n #\n # the idx for each predicted frame from this batch. Should be used to get the right row from windowmakers data df\n predicted_frames_idxs = sample_idxs[:, -1]\n predicted_frames_idxs = predicted_frames_idxs.detach().numpy()\n predicted_labels = preds_batch.argmax(axis=1)\n predicted_labels = predicted_labels.detach().numpy()\n target_batch_labels = target_batch.detach().numpy()\n self.seq_id_preds_targtes.loc[predicted_frames_idxs] = np.array([\n self.seq_id_list[predicted_frames_idxs], # the seq_id for this window\n predicted_labels, # the prediction for this window\n target_batch_labels # the target for this window\n ]).T\n\n def compute_and_save_csv(self):\n preds = []#np.zeros((self.num_classes))\n targets = []#np.zeros((self.num_classes))\n for seq in sorted(self.seq_id_preds_targtes.seq_id.unique()):\n if not isinstance(seq, int):\n continue\n seq_df = self.seq_id_preds_targtes[self.seq_id_preds_targtes['seq_id'] == seq]\n majority = seq_df.mode(axis=0, dropna=True)\n t = majority['target'].values[0]\n if not isinstance(t, int):\n t = t[0]\n p = majority['predicted'].values[0]\n if not isinstance(p, int):\n p = p[0]\n targets.append(t)\n preds.append(p)\n\n conf_mat = confusion_matrix(y_true=targets, y_pred=preds)\n conf_df = pd.DataFrame(conf_mat, index=np.arange(self.num_classes), columns=np.arange(self.num_classes))\n conf_df.to_csv(self.file_name, index=False)\n\n # preds_majority_vote = self.seq_id_preds_targtes.groupby(self.seq_id_col).predicted.agg(pd.Series.mode)\n # pred_counts = preds_majority_vote.value_counts()\n # seq_preds[pred_counts.index] = pred_counts\n\n # targets_majority_vote = self.seq_id_preds_targtes.groupby(self.seq_id_col).target.agg(pd.Series.mode)\n # targets_counts = targets_majority_vote.value_counts()\n # seq_targets[targets_counts.index] = targets_counts\n"},"size":{"kind":"number","value":3950,"string":"3,950"}}},{"rowIdx":127565,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/treasure/migrations/0001_initial.py"},"max_stars_repo_name":{"kind":"string","value":"yohan394/adverstisement_backend"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171590"},"content":{"kind":"string","value":"# Generated by Django 3.1.7 on 2021-05-24 15:11\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('user', '0001_initial'),\n ('commercial', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RewardCap',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_at', models.DateField(default=django.utils.timezone.now)),\n ('daily_cap', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='TransactionVideo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('rewarded', models.IntegerField()),\n ('date_at', models.DateField(default=django.utils.timezone.now)),\n ('info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.info')),\n ('video', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='commercial.video')),\n ],\n ),\n migrations.CreateModel(\n name='TransactionQuiz',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('rewarded', models.IntegerField()),\n ('date_at', models.DateField(default=django.utils.timezone.now)),\n ('info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.info')),\n ('quiz', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='commercial.quiz')),\n ('user_choice', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='commercial.quizchoices')),\n ],\n ),\n ]\n"},"size":{"kind":"number","value":2005,"string":"2,005"}}},{"rowIdx":127566,"cells":{"max_stars_repo_path":{"kind":"string","value":"code/DataBase.py"},"max_stars_repo_name":{"kind":"string","value":"danielt17/Triplet-loss-few-shot-learning"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2170710"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 4 14:02:13 2021\r\n\r\n@author: danie\r\n\"\"\"\r\n\r\n# %% Imports\r\n\r\nfrom __future__ import print_function\r\nimport torch.utils.data as data\r\nfrom PIL import Image\r\nimport os\r\nimport os.path\r\nimport errno\r\nimport torch\r\nimport codecs\r\nimport numpy as np\r\nimport csv\r\n\r\n# %%\r\n\r\n# %% New data set defnition\r\n\r\nclass FashionMNIST_t(data.Dataset):\r\n '''\r\n Description:\r\n This function creates a dataset object of triplet tuples.\r\n Inputs:\r\n root: path of downloaded datasest\r\n n_train_triplets: amount of training set samples\r\n n_test_triplets: amount of test set samples\r\n train: return train or test set\r\n transform: preform transformations and/or augmentations on input data\r\n target_tranform: preform transformations and/or augmentations on target data\r\n download: download the dataset\r\n Returns:\r\n Dataset (training or test set)\r\n '''\r\n urls = [\r\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\r\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',\r\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\r\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',\r\n ]\r\n raw_folder = 'raw'\r\n processed_folder = 'processed'\r\n training_file = 'training.pt'\r\n test_file = 'test.pt'\r\n train_triplet_file = 'train_triplets.txt'\r\n test_triplet_file = 'test_triplets.txt'\r\n\r\n def __init__(self, root, n_train_triplets=50000, n_test_triplets=10000, train=True, transform=None, target_transform=None, download=False):\r\n self.root = root\r\n \r\n self.transform = transform\r\n self.train = train # training set or test set\r\n\r\n if download:\r\n self.download()\r\n\r\n if not self._check_exists():\r\n raise RuntimeError('Dataset not found.' +\r\n ' You can use download=True to download it')\r\n\r\n if self.train:\r\n self.train_data, self.train_labels = torch.load(\r\n os.path.join(root, self.processed_folder, self.training_file))\r\n self.make_triplet_list(n_train_triplets)\r\n triplets = []\r\n for line in open(os.path.join(root, self.processed_folder, self.train_triplet_file)):\r\n if len(line) == 1:\r\n continue\r\n else:\r\n triplets.append((int(line.split()[0]), int(line.split()[1]), int(line.split()[2]))) # anchor, close, far\r\n self.triplets_train = triplets\r\n else:\r\n self.test_data, self.test_labels = torch.load(os.path.join(root, self.processed_folder, self.test_file))\r\n self.make_triplet_list(n_test_triplets)\r\n triplets = []\r\n for line in open(os.path.join(root, self.processed_folder, self.test_triplet_file)):\r\n if len(line) == 1:\r\n continue\r\n else:\r\n triplets.append((int(line.split()[0]), int(line.split()[1]), int(line.split()[2]))) # anchor, close, far\r\n self.triplets_test = triplets\r\n\r\n\r\n def __getitem__(self, index):\r\n if self.train:\r\n idx1, idx2, idx3 = self.triplets_train[index]\r\n img1, img2, img3 = self.train_data[idx1], self.train_data[idx2], self.train_data[idx3]\r\n else:\r\n idx1, idx2, idx3 = self.triplets_test[index]\r\n img1, img2, img3 = self.test_data[idx1], self.test_data[idx2], self.test_data[idx3]\r\n\r\n # doing this so that it is consistent with all other datasets\r\n # to return a PIL Image\r\n img1 = Image.fromarray(img1.numpy(), mode='L')\r\n img2 = Image.fromarray(img2.numpy(), mode='L')\r\n img3 = Image.fromarray(img3.numpy(), mode='L')\r\n\r\n if self.transform is not None:\r\n img1 = self.transform(img1)\r\n img2 = self.transform(img2)\r\n img3 = self.transform(img3)\r\n\r\n return img1, img2, img3\r\n\r\n def __len__(self):\r\n if self.train:\r\n return len(self.triplets_train)\r\n else:\r\n return len(self.triplets_test)\r\n\r\n def _check_exists(self):\r\n return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \\\r\n os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))\r\n\r\n def _check_triplets_exists(self):\r\n return os.path.exists(os.path.join(self.root, self.processed_folder, self.train_triplet_file)) and \\\r\n os.path.exists(os.path.join(self.root, self.processed_folder, self.test_triplet_file))\r\n\r\n def download(self):\r\n from six.moves import urllib\r\n import gzip\r\n\r\n if self._check_exists():\r\n return\r\n\r\n # download files\r\n try:\r\n os.makedirs(os.path.join(self.root, self.raw_folder))\r\n os.makedirs(os.path.join(self.root, self.processed_folder))\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n for url in self.urls:\r\n print('Downloading ' + url)\r\n data = urllib.request.urlopen(url)\r\n filename = url.rpartition('/')[2]\r\n file_path = os.path.join(self.root, self.raw_folder, filename)\r\n with open(file_path, 'wb') as f:\r\n f.write(data.read())\r\n with open(file_path.replace('.gz', ''), 'wb') as out_f, \\\r\n gzip.GzipFile(file_path) as zip_f:\r\n out_f.write(zip_f.read())\r\n os.unlink(file_path)\r\n\r\n # process and save as torch files\r\n print('Processing...')\r\n\r\n training_set = (\r\n read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),\r\n read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))\r\n )\r\n test_set = (\r\n read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),\r\n read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))\r\n )\r\n with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:\r\n torch.save(training_set, f)\r\n with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:\r\n torch.save(test_set, f)\r\n\r\n print('Done!')\r\n\r\n def make_triplet_list(self, ntriplets):\r\n\r\n if self._check_triplets_exists():\r\n return\r\n print('Processing Triplet Generation ...')\r\n if self.train:\r\n np_labels = self.train_labels.numpy()\r\n filename = self.train_triplet_file\r\n else:\r\n np_labels = self.test_labels.numpy()\r\n filename = self.test_triplet_file\r\n triplets = []\r\n for class_idx in range(10):\r\n a = np.random.choice(np.where(np_labels==class_idx)[0], int(ntriplets/10), replace=True)\r\n b = np.random.choice(np.where(np_labels==class_idx)[0], int(ntriplets/10), replace=True)\r\n while np.any((a-b)==0):\r\n np.random.shuffle(b)\r\n c = np.random.choice(np.where(np_labels!=class_idx)[0], int(ntriplets/10), replace=True)\r\n\r\n for i in range(a.shape[0]):\r\n triplets.append([int(a[i]), int(c[i]), int(b[i])]) \r\n\r\n with open(os.path.join(self.root, self.processed_folder, filename), \"w\") as f:\r\n writer = csv.writer(f, delimiter=' ')\r\n writer.writerows(triplets)\r\n print('Done!')\r\n\r\n\r\n\r\ndef get_int(b):\r\n return int(codecs.encode(b, 'hex'), 16)\r\n\r\ndef parse_byte(b):\r\n if isinstance(b, str):\r\n return ord(b)\r\n return b\r\n\r\n\r\ndef read_label_file(path):\r\n with open(path, 'rb') as f:\r\n data = f.read()\r\n assert get_int(data[:4]) == 2049\r\n length = get_int(data[4:8])\r\n labels = [parse_byte(b) for b in data[8:]]\r\n assert len(labels) == length\r\n return torch.LongTensor(labels)\r\n\r\n\r\ndef read_image_file(path):\r\n with open(path, 'rb') as f:\r\n data = f.read()\r\n assert get_int(data[:4]) == 2051\r\n length = get_int(data[4:8])\r\n num_rows = get_int(data[8:12])\r\n num_cols = get_int(data[12:16])\r\n images = []\r\n idx = 16\r\n for l in range(length):\r\n img = []\r\n images.append(img)\r\n for r in range(num_rows):\r\n row = []\r\n img.append(row)\r\n for c in range(num_cols):\r\n row.append(parse_byte(data[idx]))\r\n idx += 1\r\n assert len(images) == length\r\n return torch.ByteTensor(images).view(-1, 28, 28)\r\n\r\n\r\n\r\n"},"size":{"kind":"number","value":8912,"string":"8,912"}}},{"rowIdx":127567,"cells":{"max_stars_repo_path":{"kind":"string","value":"preprocess/dataPreprocess.py"},"max_stars_repo_name":{"kind":"string","value":"zyt4321/bio"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171400"},"content":{"kind":"string","value":"import sys\nsys.path.append(\"./\")\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.sparse.linalg import spsolve\nfrom scipy.signal import savgol_filter\nimport os\nimport config\nimport matplotlib.pyplot as plt\nimport pandas\nimport progressbar\n\n\ndef baseline_als(y, lam, p, niter=10):\n '''\n find baseline according to y\n '''\n L = len(y)\n D = sparse.diags([1,-2,1],[0,-1,-2], shape=(L,L-2))\n w = np.ones(L)\n for i in range(niter):\n W = sparse.spdiags(w, 0, L, L)\n Z = W + lam * D.dot(D.transpose())\n z = spsolve(Z, w*y)\n w = p * (y > z) + (1-p) * (y < z)\n return z\n\ndef savitzkyGolay(x, window_length=41, polyorder=2):\n '''\n savitzkyGolay smoothing\n '''\n return savgol_filter(x, window_length, polyorder)\n\ndef align(a_l, a_r, b_l, b_r, z):\n '''\n align by 2 points\n '''\n return (a_l - a_r) * (z - b_l) / (b_l - b_r) + a_l\n\ndef main(subPosition=2, datapath=config.DATA_PATH, afterpath=config.DATA_PREPROCESS_PATH):\n totalNum = len(os.listdir(datapath))\n bar = progressbar.ProgressBar(max_value=totalNum)\n cnt = 0\n for path in os.listdir(datapath):\n cnt += 1\n bar.update(cnt)\n\n sType = path.split('_')[subPosition]\n # if sType not in ['A6','A8','S2','C2']:\n # continue\n\n testFilePath = os.path.join(datapath, path)\n df = pandas.read_csv(testFilePath)\n matrix = df.values\n x = matrix[:,0]\n y = matrix[:,1]\n\n y = np.maximum(y, 0) \n\n y = np.sqrt(y)\n\n y = savitzkyGolay(y, window_length=11)\n\n z = baseline_als(y, 10**9, 0.001)\n y = np.maximum(0, y-z).astype(np.float)\n # plt.plot(x, y ,color='y')\n # plt.show()\n\n\n bList = []\n bList_y = []\n interval = 0.5\n \n if sType == 'S2':\n aList = [103.9136, 108.90324, 164.9097]\n elif sType == 'C2':\n aList = [146.01622, 265.91915, 656.02131]\n elif sType in ['A6','A8']:\n aList = [196.9738, 393.9404, 590.907]\n else:\n aList = []\n\n for a in aList:\n xSubList = np.logical_and(a - interval <= x, x <= a + interval)\n ySubList = y[xSubList]\n\n firstIndex = np.where(xSubList==True)[0][0]\n subIndex = np.argmax(ySubList)\n if subIndex == 0 or subIndex == (len(ySubList) - 1):\n print(cnt, totalNum)\n print(path)\n totalIndex = firstIndex + subIndex\n bList.append(x[totalIndex])\n\n if len(bList) != 0:\n x_new = []\n for x_item in x:\n new = x_item\n if x_item < bList[0] or x_item >= bList[2]:\n new = align(aList[0], aList[2], bList[0], bList[2], x_item)\n elif x_item >= bList[0] and x_item < bList[1]:\n new = align(aList[0], aList[1], bList[0], bList[1], x_item)\n elif x_item >= bList[1] and x_item < bList[2]:\n new = align(aList[1], aList[2], bList[1], bList[2], x_item)\n x_new.append(new)\n df['mass'] = x_new\n else:\n df['mass'] = x\n\n df['intensity'] = y\n df.to_csv(os.path.join(afterpath, path), index=False)\n\nif __name__ == \"__main__\":\n # The path where the CSV format file is located\n DATA_PATH='/opt/BioData_Base/data20190708/csv'\n # The path where the processed file will be saved\n DATA_PREPROCESS_PATH='/opt/BioData_Base/data20190708/csv-after-py'\n \n main(2,DATA_PATH, DATA_PREPROCESS_PATH)"},"size":{"kind":"number","value":3562,"string":"3,562"}}},{"rowIdx":127568,"cells":{"max_stars_repo_path":{"kind":"string","value":"tools/nightly-unit-tests/tests/test_semver.py"},"max_stars_repo_name":{"kind":"string","value":"winstondu/dd-sdk-ios"},"max_stars_count":{"kind":"number","value":93,"string":"93"},"id":{"kind":"string","value":"2170426"},"content":{"kind":"string","value":"# -----------------------------------------------------------\n# Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2019-2020 Datadog, Inc.\n# -----------------------------------------------------------\n\nimport unittest\nfrom src.semver import Version\n\n\nclass VersionTestCase(unittest.TestCase):\n def test_parsing(self):\n self.assertEqual(Version.parse('10.0.3'), Version(major=10, minor=0, patch=3))\n self.assertEqual(Version.parse('11.4'), Version(major=11, minor=4, patch=0))\n self.assertEqual(Version.parse('12'), Version(major=12, minor=0, patch=0))\n\n def test_comparing(self):\n self.assertTrue(Version.parse('14.0.0').is_newer_than(Version.parse('13.1.2')))\n self.assertTrue(Version.parse('14.1.1').is_newer_than(Version.parse('14.1.0')))\n self.assertTrue(Version.parse('14.2.3').is_newer_than(Version.parse('14.2.2')))\n self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('15.0.2')))\n self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('14.1.0')))\n self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('14.0.4')))\n self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('14.0.3')))\n self.assertTrue(Version.parse('14.0.3').is_newer_than_or_equal(Version.parse('14.0.3')))\n self.assertFalse(Version.parse('14.0.2').is_newer_than_or_equal(Version.parse('14.0.3')))\n"},"size":{"kind":"number","value":1582,"string":"1,582"}}},{"rowIdx":127569,"cells":{"max_stars_repo_path":{"kind":"string","value":"leetCode/cheapest_flights_within_k_stops.py"},"max_stars_repo_name":{"kind":"string","value":"yskang/AlgorithmPracticeWithPython"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170071"},"content":{"kind":"string","value":"# Title: Cheapest Flights Withn K Stops\n# Link: https://leetcode.com/problems/cheapest-flights-within-k-stops/\n\nfrom collections import defaultdict, deque\nfrom heapq import heappop, heappush\nfrom typing import List\n\nINF = 10**10\n\nclass Problem:\n def find_cheapest_price(self, n: int, flights: List[List[int]], src: int, dst: int, k: int) -> int:\n graph = defaultdict(lambda: [])\n for start, end, time in flights:\n graph[start].append((end, time))\n return self.dijkstra(graph, src, dst, k)\n\n\n def dijkstra(self, graph: defaultdict, start: int, dst: int, k: int):\n pq = []\n heappush(pq, (0, start, k+1))\n\n while pq:\n cost, node, count = heappop(pq)\n if node == dst:\n return cost\n if count > 0:\n for child, time in graph[node]:\n heappush(pq, (cost + time, child, count-1))\n\n return -1\n\ndef solution():\n n = 4\n edges = [[0,1,1],[0,2,5],[1,2,1],[2,3,1]]\n src = 0\n dst = 3\n k = 1\n problem = Problem()\n return problem.find_cheapest_price(n, edges, src, dst, k)\n\n\ndef main():\n print(solution())\n\n\nif __name__ == '__main__':\n main()"},"size":{"kind":"number","value":1198,"string":"1,198"}}},{"rowIdx":127570,"cells":{"max_stars_repo_path":{"kind":"string","value":"01_Implementation.py"},"max_stars_repo_name":{"kind":"string","value":"MaxLiu728/Genre-Prediction"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170450"},"content":{"kind":"string","value":"\"\"\"Personal Challenge_Draft.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1-25-B3CO6yVCH9u2vgbhIjyyFeU3tJ3w\n\"\"\"\n\n# Working environment set up\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport string\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nimport seaborn as sns\nfrom nltk.corpus import wordnet\nimport matplotlib.pyplot as plt\nfrom matplotlib.legend_handler import HandlerLine2D\nimport numpy as np\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier\n\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\n\n\ndef load_data():\n '''\n This function will separately return the features and response variable for the input data\n '''\n data = pd.read_csv('data.csv')\n X = data['Lyric']\n y = data['Genre']\n return X, y\n\n\n# Use pos_tag to get the type of the world and then map the tag to the format wordnet lemmatizer would accept.\ndef get_wordnet_pos(word):\n \"\"\"Map POS tag to first character lemmatize() accepts\"\"\"\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN)\n\n\ndef transform_data():\n '''\n This function will transform the features and will reuturn the countvectorized features.\n Steps are:\n 1. Remove punctuations\n 2. Tokenize\n 3. Lemmatization\n 4. Remove stop words\n 5. CountVectorize\n '''\n X, y = load_data()\n X = X.apply(lambda x: x.translate(str.maketrans('', '', string.punctuation))) # To remove the punctuations\n X_Tokenize = X.apply(lambda x: word_tokenize(x)) # To tokenize\n\n lemmatizer = WordNetLemmatizer()\n X_lemmatize = X_Tokenize.apply(lambda x: ' '.join([lemmatizer.lemmatize(w, pos='v') for w in x]))\n\n stop_words = set(stopwords.words('english'))\n stop_words_more = ('10', '100', '20', '2x', '3x', '4x', '50', 'im') # Add more stop words\n stop_words = stop_words.add(x for x in stop_words_more)\n\n CountVect = CountVectorizer(stop_words=stop_words, min_df=300, lowercase=True, ngram_range=(1, 1))\n Transformmed_array = CountVect.fit_transform(X_lemmatize)\n X_vectorized = pd.DataFrame(Transformmed_array.toarray(), columns=CountVect.get_feature_names())\n return X_vectorized, y\n\n\ndef EDA_visualize(X, y, N):\n '''\n :para X: X is the features to be trained\n :para y: y is the Gnere classification to be trained\n :para N: nlargest frequencied words for each type of Genre\n :return: 1. Barplot to visulize the counts for each type of y 2. Return the n largest frequencies words for each type of y\n '''\n sns.catplot(x='Genre', kind='count', data=pd.DataFrame(y[:50000]))\n DF_Combine = pd.concat([X, y], axis=1)\n\n DF_nlargest = pd.DataFrame(np.ones((3, 1)), columns=['exm'], index=['Hip Hop', 'Pop', 'Rock']) # Initilnize\n for value in DF_Combine.columns[:-1]:\n DF_nlargest[value] = pd.DataFrame(DF_Combine.groupby('Genre')[value].sum())\n\n print(DF_nlargest.apply(lambda s, n: s.nlargest(n).index, axis=1, n=N))\n\n\n# X_temp, y_temp = transform_data()\n\ndef TuneParameter_visulize(X_train, y_train, X_hold, y_hold):\n '''\n It will return severl plots aims to tune paramters.\n parameters are:\n 1. max_depth\n 2. n_estimators\n 3. max_features...\n\n Todo: plotting more parameters\n '''\n # Tune max_depth\n max_depths = np.linspace(10, 200, 15, endpoint=True)\n train_results = []\n validation_results = []\n for depth in max_depths:\n rf = RandomForestClassifier(max_depth=depth, n_jobs=-1)\n rf.fit(X_train, y_train)\n train_results.append(accuracy_score(y_train, rf.predict(X_train)))\n validation_results.append(accuracy_score(y_hold, rf.predict(X_hold)))\n\n line1 = plt.plot(max_depths, train_results, 'b', label='Train accuracy')\n line2 = plt.plot(max_depths, validation_results, 'r', label='Estimated accuracy')\n plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})\n plt.ylabel('accuracy score')\n plt.xlabel('Tree depth')\n plt.show()\n\n\ndef main():\n '''\n It will return:\n 1. EDA visulization\n 2. Visulize parameter tuning process\n 3. Series include Expected accuracy\n 4. Series include the predicted y_test\n '''\n # Load data\n X_input, y_input = transform_data()\n\n # Train, holdset, test split\n y_test = pd.DataFrame(y_input[-5000:], columns=['Genre'])\n y_train = pd.DataFrame(y_input[:50000], columns=['Genre'])\n\n X_train = pd.DataFrame(X_input.iloc[:50000, :], columns=X_input.columns)\n X_test = pd.DataFrame(X_input.iloc[-5000:, :], columns=X_input.columns)\n\n X_holdout_set = X_train.sample(5000, random_state=66)\n y_holdout_set = y_train.iloc[X_holdout_set.index, :]\n X_train_new = X_train.drop(X_holdout_set.index)\n y_train_new = y_train.drop(X_holdout_set.index)\n\n EDA_visualize(X_train, y_train, 10) # For EDA purpose\n\n # Build classifier\n '''\n The RF model will be used. Few reasons below:\n 1. An ensemble (bootstrap) approach might make stronger predictions, without causing serious overfitting\n 2. Compared with distance methods, it needs less datapreprocessing (such as scaling data) \n 3. Non-parametric estimation \n \n However, it may have an obvious drawback: \n 1. May set large max_features\n 2. Should consider more deeper depth\n The drawbacks above will directly triggle the large training workload.\n '''\n # TuneParameter_visulize(X_train_new,y_train_new, X_holdout_set, y_holdout_set) # Tune parameters\n\n RF_Model = RandomForestClassifier(criterion='entropy', n_estimators=100, max_depth=56, max_features=666)\n RF_Model.fit(X_train_new, y_train_new)\n\n estimated_accuracy = accuracy_score(y_holdout_set, RF_Model.predict(X_holdout_set))\n pd.Series(estimated_accuracy).to_csv('ea.csv', index=False, header=False)\n\n # Predict testing set\n test_pred = RF_Model.predict(X_test)\n pd.Series(test_pred).to_csv('pred.csv', index=False, header=False)\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":6417,"string":"6,417"}}},{"rowIdx":127571,"cells":{"max_stars_repo_path":{"kind":"string","value":"Modulo1/saludo.py"},"max_stars_repo_name":{"kind":"string","value":"EUD-curso-python/control_de_flujo-ipsuarezc"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169960"},"content":{"kind":"string","value":"def espanol():\n return 'hola'\n\ndef ingles():\n return 'hello'\n\ndef aleman():\n return 'hallo'\n\ndef hawai():\n return 'aloha'"},"size":{"kind":"number","value":133,"string":"133"}}},{"rowIdx":127572,"cells":{"max_stars_repo_path":{"kind":"string","value":"PBO_18117/latihan_4.2.list2.py"},"max_stars_repo_name":{"kind":"string","value":"daniel8117/PBO"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170223"},"content":{"kind":"string","value":"nilai_matakuliah=[70,80,90,13]\r\nrata_rata= (sum(nilai_matakuliah)/len(nilai_matakuliah))\r\nprint(\"nilai matakuliah=\", nilai_matakuliah)\r\nprint(\"nilai rata-rata=\", rata_rata)"},"size":{"kind":"number","value":172,"string":"172"}}},{"rowIdx":127573,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_dependency_parallel.py"},"max_stars_repo_name":{"kind":"string","value":"ariloulaleelay/fastapi"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171143"},"content":{"kind":"string","value":"from asyncio import sleep\nfrom time import time\n\nfrom fastapi import Depends, FastAPI\nfrom fastapi.testclient import TestClient\n\napp = FastAPI()\n\nclient = TestClient(app)\n\n\nasync def dependency1():\n ts = time()\n await sleep(0.1)\n return ts\n\n\nasync def dependency2():\n ts = time()\n await sleep(0.1)\n return ts\n\n\n@app.get(\"/parallel-dependencies\")\nasync def parallel_dependencies(\n ts1=Depends(dependency1), ts2=Depends(dependency2),\n):\n return abs(ts1 - ts2)\n\n\ndef test_dependencies_run_in_parallel():\n response = client.get('/parallel-dependencies')\n assert 200 == response.status_code, response.text\n assert response.json() < 0.1\n"},"size":{"kind":"number","value":664,"string":"664"}}},{"rowIdx":127574,"cells":{"max_stars_repo_path":{"kind":"string","value":"maverick_api/modules/api/maverick/maverick_shutdown.py"},"max_stars_repo_name":{"kind":"string","value":"deodates-dev/UAV-maverick-api"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2169107"},"content":{"kind":"string","value":"import logging\nimport copy\n\nfrom maverick_api.modules import schemaBase\nfrom maverick_api.modules.base.util.process_runner import ProcessRunner\n\n# graphql imports\nfrom graphql import (\n GraphQLField,\n GraphQLObjectType,\n GraphQLString,\n GraphQLBoolean,\n GraphQLInt,\n)\nfrom graphql.pyutils.simple_pub_sub import SimplePubSubIterator\n\napplication_log = logging.getLogger(\"tornado.application\")\n\n\nclass MaverickShutdownSchema(schemaBase):\n def __init__(self):\n super().__init__(self)\n self.name = \"MaverickShutdown\"\n self.shutdown_command_defaults = {\n \"running\": False,\n \"uptime\": None,\n \"stdout\": None,\n \"stderror\": None,\n \"returncode\": None,\n }\n self.shutdown_command = copy.deepcopy(self.shutdown_command_defaults)\n self.shutdown_proc = None\n\n self.shutdown_command_type = GraphQLObjectType(\n self.name,\n lambda: {\n \"running\": GraphQLField(GraphQLBoolean, description=\"\"),\n \"uptime\": GraphQLField(\n GraphQLInt,\n description=\"Number of seconds the process has been running for\",\n ),\n \"stdout\": GraphQLField(GraphQLString, description=\"\"),\n \"stderror\": GraphQLField(GraphQLString, description=\"\"),\n \"returncode\": GraphQLField(GraphQLInt, description=\"\"),\n },\n description=\"Maverick shutdown interface\",\n )\n\n self.q = {\n self.name: GraphQLField(\n self.shutdown_command_type, resolve=self.get_shutdown_command_status\n )\n }\n\n self.m = {\n self.name: GraphQLField(\n self.shutdown_command_type,\n args=self.get_mutation_args(self.shutdown_command_type),\n resolve=self.run_shutdown_command,\n )\n }\n\n self.s = {\n self.name: GraphQLField(\n self.shutdown_command_type,\n subscribe=self.sub_shutdown_command_status,\n resolve=None,\n )\n }\n\n async def run_shutdown_command(self, root, info, **kwargs):\n application_log.debug(f\"run_shutdown_command {kwargs}\")\n cmd = \"sudo shutdown -a now\"\n\n if self.shutdown_proc:\n # already running?\n if self.shutdown_proc.complete:\n self.shutdown_proc = None\n if not self.shutdown_proc:\n # try to run the command\n self.shutdown_proc = ProcessRunner(\n cmd,\n started_callback=self.process_callback,\n output_callback=self.process_callback,\n complete_callback=self.process_callback,\n )\n self.shutdown_proc.start()\n return self.shutdown_command\n\n def process_callback(self, *args, **kwargs):\n self.shutdown_command[\"running\"] = self.shutdown_proc.running\n self.shutdown_command[\"uptime\"] = self.shutdown_proc.uptime\n self.shutdown_command[\"stdout\"] = self.shutdown_proc.stdout\n self.shutdown_command[\"stderror\"] = self.shutdown_proc.stderror\n self.shutdown_command[\"returncode\"] = self.shutdown_proc.returncode\n self.subscriptions.emit(\n self.subscription_string + self.name, {self.name: self.shutdown_command},\n )\n return self.shutdown_command\n\n def sub_shutdown_command_status(self, root, info):\n return SimplePubSubIterator(\n self.subscriptions, self.subscription_string + self.name,\n )\n\n def get_shutdown_command_status(self, root, info):\n return self.shutdown_command\n"},"size":{"kind":"number","value":3686,"string":"3,686"}}},{"rowIdx":127575,"cells":{"max_stars_repo_path":{"kind":"string","value":"darktorch/utils/data/datasets.py"},"max_stars_repo_name":{"kind":"string","value":"hpennington/darktorch"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171608"},"content":{"kind":"string","value":"import os\nimport sys\nimport pdb\nimport numpy as np\nimport cv2\n# import matplotlib\n# matplotlib.use('Agg')\n# import matplotlib.pyplot as plt\n# import visdom\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\nimport darktorch\n\n\nclass ListDataset(Dataset):\n def __init__(self,\n filepath,\n fp_transform_fn,\n transform=None,\n target_transform=None,\n shuffle_labels=True):\n\n self.fp_transform_fn = fp_transform_fn\n self.transform = transform\n self.target_transform = target_transform\n self.shuffle_labels = shuffle_labels\n\n with open(filepath, 'r') as f:\n self.image_paths = f.read().splitlines()\n\n def __len__(self):\n return len(self.image_paths)\n\n def __getitem__(self, index):\n img_path = self.image_paths[index]\n img = cv2.imread(img_path)\n label_path = self.fp_transform_fn(img_path)\n label = np.loadtxt(label_path).reshape((-1, 5))\n\n if self.shuffle_labels == True:\n np.random.shuffle(label)\n\n if self.transform is not None:\n img = self.transform(img)\n\n #darktorch.utils.write_tensor(img, 'sized-{}.bin'.format(index), True, append=False)\n #plt.imshow(img.numpy().transpose([1, 2, 0]))\n #self.vis.matplot(plt)\n # pdb.set_trace()\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n return img, label\n"},"size":{"kind":"number","value":1507,"string":"1,507"}}},{"rowIdx":127576,"cells":{"max_stars_repo_path":{"kind":"string","value":"__main__.py"},"max_stars_repo_name":{"kind":"string","value":"GRAYgoose124/mushishi"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171236"},"content":{"kind":"string","value":"# Mushishi: A smart discord bot using the discord.py[rewrite] API.\n# Copyright (C) 2018 \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\nimport os\nfrom mushishi import Mushishi\n\n\ndef main():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n config_path = os.path.join(dir_path, 'config.json')\n bot = Mushishi(config_path)\n\n try:\n bot.run()\n except KeyboardInterrupt:\n print(\"Interrupted by user. Data may not be saved.\")\n return\n\n print(\"---Shutdown complete---\\nGoodbye.\")\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":1177,"string":"1,177"}}},{"rowIdx":127577,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/domain/model/blacklist_token.py"},"max_stars_repo_name":{"kind":"string","value":"minhtuan221/architecture-collection"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2169329"},"content":{"kind":"string","value":"import datetime\n\nfrom sqlalchemy import Column, String, Integer, DateTime\n\nfrom app.domain.model import Base\n\n\nclass BlacklistToken(Base):\n \"\"\"\n Token Model for storing JWT tokens\n \"\"\"\n __tablename__ = 'blacklist_tokens'\n\n id: int = Column(Integer, primary_key=True, autoincrement=True)\n token: str = Column(String(500), unique=True, nullable=False)\n blacklisted_on: datetime.datetime = Column(DateTime, nullable=False)\n\n def __repr__(self):\n return ' <>\n#\n# GuessIt is free software; you can redistribute it and/or modify it under\n# the terms of the Lesser GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# GuessIt is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# Lesser GNU General Public License for more details.\n#\n# You should have received a copy of the Lesser GNU General Public License\n# along with this program. If not, see .\n#\n\nfrom __future__ import unicode_literals\nfrom guessit import PY3, u, base_text_type\nfrom guessit.matchtree import MatchTree\nfrom guessit.textutils import normalize_unicode\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass IterativeMatcher(object):\n def __init__(self, filename, filetype='autodetect', opts=None):\n \"\"\"An iterative matcher tries to match different patterns that appear\n in the filename.\n\n The 'filetype' argument indicates which type of file you want to match.\n If it is 'autodetect', the matcher will try to see whether it can guess\n that the file corresponds to an episode, or otherwise will assume it is\n a movie.\n\n The recognized 'filetype' values are:\n [ autodetect, subtitle, movie, moviesubtitle, episode, episodesubtitle ]\n\n\n The IterativeMatcher works mainly in 2 steps:\n\n First, it splits the filename into a match_tree, which is a tree of groups\n which have a semantic meaning, such as episode number, movie title,\n etc...\n\n The match_tree created looks like the following:\n\n 0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111\n 0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000\n 0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000\n __________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___\n xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc\n [XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv\n\n The first 3 lines indicates the group index in which a char in the\n filename is located. So for instance, x264 is the group (0, 4, 1), and\n it corresponds to a video codec, denoted by the letter'v' in the 4th line.\n (for more info, see guess.matchtree.to_string)\n\n\n Second, it tries to merge all this information into a single object\n containing all the found properties, and does some (basic) conflict\n resolution when they arise.\n \"\"\"\n\n valid_filetypes = ('autodetect', 'subtitle', 'video',\n 'movie', 'moviesubtitle',\n 'episode', 'episodesubtitle')\n if filetype not in valid_filetypes:\n raise ValueError(\"filetype needs to be one of %s\" % valid_filetypes)\n if not PY3 and not isinstance(filename, unicode):\n log.warning('Given filename to matcher is not unicode...')\n filename = filename.decode('utf-8')\n\n filename = normalize_unicode(filename)\n\n if opts is None:\n opts = []\n elif isinstance(opts, base_text_type):\n opts = opts.split()\n\n self.match_tree = MatchTree(filename)\n mtree = self.match_tree\n mtree.guess.set('type', filetype, confidence=1.0)\n\n def apply_transfo(transfo_name, *args, **kwargs):\n transfo = __import__('guessit.transfo.' + transfo_name,\n globals=globals(), locals=locals(),\n fromlist=['process'], level=0)\n transfo.process(mtree, *args, **kwargs)\n\n # 1- first split our path into dirs + basename + ext\n apply_transfo('split_path_components')\n\n # 2- guess the file type now (will be useful later)\n apply_transfo('guess_filetype', filetype)\n if mtree.guess['type'] == 'unknown':\n return\n\n # 3- split each of those into explicit groups (separated by parentheses\n # or square brackets)\n apply_transfo('split_explicit_groups')\n\n # 4- try to match information for specific patterns\n # NOTE: order needs to comply to the following:\n # - website before language (eg: tvu.org.ru vs russian)\n # - language before episodes_rexps\n # - properties before language (eg: he-aac vs hebrew)\n # - release_group before properties (eg: XviD-?? vs xvid)\n if mtree.guess['type'] in ('episode', 'episodesubtitle'):\n strategy = [ 'guess_date', 'guess_website', 'guess_release_group',\n 'guess_properties', 'guess_language',\n 'guess_video_rexps',\n 'guess_episodes_rexps', 'guess_weak_episodes_rexps' ]\n else:\n strategy = [ 'guess_date', 'guess_website', 'guess_release_group',\n 'guess_properties', 'guess_language',\n 'guess_video_rexps' ]\n\n if 'nolanguage' in opts:\n strategy.remove('guess_language')\n\n for name in strategy:\n apply_transfo(name)\n\n # more guessers for both movies and episodes\n for name in ['guess_bonus_features', 'guess_year']:\n apply_transfo(name)\n\n if 'nocountry' not in opts:\n apply_transfo('guess_country')\n\n\n # split into '-' separated subgroups (with required separator chars\n # around the dash)\n apply_transfo('split_on_dash')\n\n # 5- try to identify the remaining unknown groups by looking at their\n # position relative to other known elements\n if mtree.guess['type'] in ('episode', 'episodesubtitle'):\n apply_transfo('guess_episode_info_from_position')\n else:\n apply_transfo('guess_movie_title_from_position')\n\n # 6- perform some post-processing steps\n apply_transfo('post_process')\n\n log.debug('Found match tree:\\n%s' % u(mtree))\n\n def matched(self):\n return self.match_tree.matched()\n"},"size":{"kind":"number","value":6481,"string":"6,481"}}},{"rowIdx":127584,"cells":{"max_stars_repo_path":{"kind":"string","value":"entertainment_center.py"},"max_stars_repo_name":{"kind":"string","value":"1UnboundedSentience/MovieTrailerApp"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170875"},"content":{"kind":"string","value":"import media\nimport fresh_tomatoes\n\n\nshawshank = media.Movie('Shawshank Redemption', 'http://t0.gstatic.com/images?q=tbn:ANd9GcSkmMH-bEDUS2TmK8amBqgIMgrfzN1_mImChPuMrunA1XjNTSKm', 'https://www.youtube.com/watch?v=6hB3S9bIaco')\n\ntitanic = media.Movie('Titantic', 'https://titanicsound.files.wordpress.com/2014/11/titanic_movie-hd-1.jpg', 'https://www.youtube.com/watch?v=thrdkT9vE3k')\n\nstar_wars_8 = media.Movie('Star Wars 8', 'http://static.srcdn.com/wp-content/uploads/2017/01/star-wars-8-last-jedi.jpg','https://www.youtube.com/watch?v=Yw_rdbY2I1c'\n)\n\nprint shawshank\n\nmovies_list = [shawshank, titanic, star_wars_8]\n\nfresh_tomatoes.open_movies_page( movies_list )"},"size":{"kind":"number","value":666,"string":"666"}}},{"rowIdx":127585,"cells":{"max_stars_repo_path":{"kind":"string","value":"01-algorithm-design-and-techniques/2_algorithmic_warmup/fibonacci_huge.py"},"max_stars_repo_name":{"kind":"string","value":"hamidgasmi/training.computerscience.algorithms-datastructures"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"id":{"kind":"string","value":"2171540"},"content":{"kind":"string","value":"# Problem Introduction:\n# In this problem, your goal is to compute F n modulo m, where n may be really huge: up to 10 18 . For such\n# values of n, an algorithm looping for n iterations will not fit into one second for sure. Therefore we need to\n# avoid such a loop.\nimport sys\nfrom fibonacci_last_digit import get_fibonacci_last_digit\n\n#O(m)?\ndef get_fibonacci_huge(n, m):\n if n <= 1: return n\n\n previous = 0\n current = 1\n period = []\n period.append(previous)\n period.append(current)\n tmp = []\n for _ in range(n - 1):\n previous, current = current, (previous + current) % m\n\n if len(period) > 0 and period[len(tmp)] == current:\n tmp.append(current)\n if len(period) == len(tmp): break\n else:\n if len(tmp) > 0: \n period.extend(tmp) #O(m)?\n tmp.clear()\n period.append(current)\n\n return period[n % len(period)]\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, m = map(int, input.split())\n\n for i in range(n):\n assert(get_fibonacci_last_digit(i, m) == get_fibonacci_huge(i, m))\n\n print(get_fibonacci_huge(n, m))"},"size":{"kind":"number","value":1156,"string":"1,156"}}},{"rowIdx":127586,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/control.py"},"max_stars_repo_name":{"kind":"string","value":"shrick/shricktris"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171230"},"content":{"kind":"string","value":"# control.py\n\nimport pygame\n\n\nMIN_FPS = 1\nMAX_FPS = 24\n\n\nclass Control:\n def __init__(self, fps):\n self._clock = pygame.time.Clock()\n self._to_quit = False\n self._keystates = {}\n\n self._fps = 0\n self.adjust_fps(fps)\n \n def process_events(self):\n self._clock.tick(self._fps)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._to_quit = True\n elif event.type == pygame.KEYDOWN: \n self._keystates[event.key] = True\n elif event.type == pygame.KEYUP:\n self._keystates[event.key] = False\n\n def adjust_fps(self, dfps):\n self._fps = max(min(self._fps + dfps, MAX_FPS), MIN_FPS)\n print(\"[DEBUG] fps = \" + str(self._fps))\n \n def _is_pressed(self, keys):\n return all(k in self._keystates and self._keystates[k] for k in keys)\n\n def quit(self):\n return self._to_quit or self._is_pressed([pygame.K_q])\n \n def pause(self):\n return self._is_pressed([pygame.K_PAUSE])\n\n def step_left(self):\n return self._is_pressed([pygame.K_LEFT])\n\n def step_right(self):\n return self._is_pressed([pygame.K_RIGHT])\n\n def step_down(self):\n return self._is_pressed([pygame.K_DOWN])\n\n def fall_down(self):\n return self._is_pressed([pygame.K_SPACE])\n\n def rotate(self):\n return self._is_pressed([pygame.K_UP])\n\n def speed_up(self):\n return self._is_pressed([pygame.K_PLUS])\n\n def speed_down(self):\n return self._is_pressed([pygame.K_MINUS])"},"size":{"kind":"number","value":1609,"string":"1,609"}}},{"rowIdx":127587,"cells":{"max_stars_repo_path":{"kind":"string","value":"ssd_data/_utils.py"},"max_stars_repo_name":{"kind":"string","value":"star-baba/res50_sa_ssd"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170797"},"content":{"kind":"string","value":"import os, fnmatch\nimport numpy as np\n\ndef _get_recurrsive_paths(basedir, ext):\n \"\"\"\n :param basedir:\n :param ext:\n :return: list of path of files including basedir and ext(extension)\n \"\"\"\n matches = []\n for root, dirnames, filenames in os.walk(basedir):\n for filename in fnmatch.filter(filenames, '*.{}'.format(ext)):\n matches.append(os.path.join(root, filename))\n return sorted(matches)\n\n\ndef _get_xml_et_value(xml_et, key, rettype=str):\n \"\"\"\n :param xml_et: Elementtree's element\n :param key:\n :param rettype: class, force to convert it from str\n :return: rettype's value\n Note that if there is no keys in xml object, return None\n \"\"\"\n elm = xml_et.find(key)\n if elm is None:\n return elm\n\n if isinstance(rettype, str):\n return elm.text\n else:\n return rettype(elm.text)\n\ndef _one_hot_encode(indices, class_num):\n \"\"\"\n :param indices: list of index\n :param class_num:\n :return: ndarray, relu_one-hot vectors\n \"\"\"\n size = len(indices)\n one_hot = np.zeros((size, class_num))\n one_hot[np.arange(size), indices] = 1\n return one_hot\n\ndef _separate_ignore(target_transform):\n \"\"\"\n Separate Ignore by target_transform\n :param target_transform:\n :return: ignore, target_transform\n \"\"\"\n if target_transform:\n from .target_transforms import Ignore, Compose\n if isinstance(target_transform, Ignore):\n return target_transform, None\n\n if not isinstance(target_transform, Compose):\n return None, target_transform\n\n # search existing target_transforms.Ignore in target_transform\n new_target_transform = []\n ignore = None\n for t in target_transform.target_transforms:\n if isinstance(t, Ignore):\n ignore = t\n else:\n new_target_transform += [t]\n return ignore, Compose(new_target_transform)\n\n else:\n return None, target_transform\n\ndef _contain_ignore(target_transform):\n if target_transform:\n from .target_transforms import Ignore, Compose\n if isinstance(target_transform, Ignore):\n raise ValueError('target_transforms.Ignore must be passed to \\'ignore\\' argument')\n\n if isinstance(target_transform, Compose):\n for t in target_transform.target_transforms:\n if isinstance(t, Ignore):\n raise ValueError('target_transforms.Ignore must be passed to \\'ignore\\' argument')\n\n return target_transform\n\ndef _check_ins(name, val, cls, allow_none=False):\n if allow_none and val is None:\n return val\n\n if not isinstance(val, cls):\n raise ValueError('Argument \\'{}\\' must be {}, but got {}'.format(name, cls.__name__, type(val).__name__))\n return val\n\nDATA_ROOT = os.path.join(os.path.expanduser('~'), 'data')"},"size":{"kind":"number","value":2866,"string":"2,866"}}},{"rowIdx":127588,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/f5/db.py"},"max_stars_repo_name":{"kind":"string","value":"srajag/contrail-controller"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171552"},"content":{"kind":"string","value":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.\n#\n\n\"\"\"\nF5 DB to store Pool and associated objects\n\"\"\"\nimport pycassa\nfrom pycassa.system_manager import *\n\nimport json\nimport time\n\n\nclass F5LBDB(object):\n\n _KEYSPACE = 'f5_lb_keyspace'\n _F5_LB_CF = 'pool_table'\n\n def __init__(self, args):\n self._args = args\n\n if args.cluster_id:\n self._keyspace = '%s_%s' % (args.cluster_id,\n F5LBDB._KEYSPACE)\n else:\n self._keyspace = F5LBDB._KEYSPACE\n\n def init_database(self):\n self._cassandra_init()\n\n def pool_get(self, pool_id):\n json_str = self._db_get(self._f5_lb_cf, pool_id)\n if json_str:\n return json.loads(json_str['info'])\n else:\n return None\n\n def pool_insert(self, pool_id, pool_obj):\n entry = json.dumps(pool_obj)\n return self._db_insert(self._f5_lb_cf, pool_id, {'info': entry})\n\n def pool_remove(self, pool_id, columns=None):\n return self._db_remove(self._f5_lb_cf, pool_id, columns)\n\n def pool_list(self):\n ret_list = []\n for each_entry in self._db_list(self._f5_lb_cf) or []:\n obj_dict = json.loads(each_entry['info'])\n ret_list.append(obj_dict)\n return ret_list\n\n # db CRUD\n def _db_get(self, table, key):\n try:\n entry = table.get(key)\n except Exception as e:\n return None\n\n return entry\n\n def _db_insert(self, table, key, entry):\n try:\n table.insert(key, entry)\n except Exception as e:\n return False\n\n return True\n\n def _db_remove(self, table, key, columns=None):\n try:\n if columns:\n table.remove(key, columns=columns)\n else:\n table.remove(key)\n except Exception as e:\n return False\n\n return True\n\n def _db_list(self, table):\n try:\n entries = list(table.get_range())\n except Exception as e:\n return None\n\n return entries\n\n\n # initialize cassandra\n def _cassandra_init(self):\n server_idx = 0\n num_dbnodes = len(self._args.cassandra_server_list)\n connected = False\n\n while not connected:\n try:\n cass_server = self._args.cassandra_server_list[server_idx]\n sys_mgr = SystemManager(cass_server)\n connected = True\n except Exception as e:\n server_idx = (server_idx + 1) % num_dbnodes\n time.sleep(3)\n\n if self._args.reset_config:\n try:\n sys_mgr.drop_keyspace(self._keyspace)\n except pycassa.cassandra.ttypes.InvalidRequestException as e:\n print \"Warning! \" + str(e)\n\n try:\n sys_mgr.create_keyspace(self._keyspace, SIMPLE_STRATEGY,\n {'replication_factor': str(num_dbnodes)})\n except pycassa.cassandra.ttypes.InvalidRequestException as e:\n print \"Warning! \" + str(e)\n\n # set up column families\n column_families = [self._F5_LB_CF]\n for cf in column_families:\n try:\n sys_mgr.create_column_family(self._keyspace, cf)\n except pycassa.cassandra.ttypes.InvalidRequestException as e:\n print \"Warning! \" + str(e)\n\n conn_pool = pycassa.ConnectionPool(self._keyspace,\n self._args.cassandra_server_list,\n max_overflow=10,\n use_threadlocal=True,\n prefill=True,\n pool_size=10,\n pool_timeout=30,\n max_retries=-1,\n timeout=0.5)\n\n rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM\n wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM\n self._f5_lb_cf = pycassa.ColumnFamily(conn_pool, self._F5_LB_CF,\n read_consistency_level=rd_consistency,\n write_consistency_level=wr_consistency)\n"},"size":{"kind":"number","value":4321,"string":"4,321"}}},{"rowIdx":127589,"cells":{"max_stars_repo_path":{"kind":"string","value":"core/models.py"},"max_stars_repo_name":{"kind":"string","value":"thewolfcommander/IDCardGen"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171587"},"content":{"kind":"string","value":"from django.db import models\nfrom django.contrib.auth.models import PermissionsMixin, AbstractBaseUser, BaseUserManager\nfrom django.conf import settings\n\nUPLOAD_DIRECTORY_PROFILEPHOTO = 'images_profilephoto'\nUPLOAD_DIRECTORY_ID_CARD_PHOTO = 'images_idcardphoto'\n\nclass CustomUserManager(BaseUserManager):\n \"\"\"\n Custom user manager to handle all the operations for the Custom User model\n \"\"\"\n def create_user(self, user_id, mobile_number, email, password, **extra_fields):\n\n user = self.model(user_id=user_id, mobile_number=mobile_number, email=email, *extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, user_id, mobile_number, email, password, **extra_fields):\n user = self.create_user(user_id, mobile_number, email, password, **extra_fields)\n user.is_admin=True\n user.is_superuser = True\n user.save(using=self._db)\n return user\n\n def get_by_natural_key(self, username):\n return self.get(user_id=username)\n\nclass User(AbstractBaseUser, PermissionsMixin):\n \"\"\"\n User that is capable of using the Information System\n \"\"\"\n GENDER = [\n ('MALE', \"MALE\"),\n ('FEMALE', \"FEMALE\"),\n ('TRANSGENDER', \"TRANSGENDER\"),\n ('PREFER_NOT_TO_SAY', \"PREFER_NOT_TO_SAY\")\n ]\n\n upload_directory = 'user_images'\n\n user_id = models.CharField(max_length=24, null=True, blank=True, unique=True, help_text=\"User's unique user id that is always used to login.\")\n full_name = models.CharField(max_length=255, null=True, blank=True, help_text=\"User's full name\")\n gender = models.CharField(max_length=255, choices=GENDER, null=True, blank=True, help_text=\"User's Gender\")\n email = models.EmailField(max_length=255, blank=True, null=True, default='', help_text=\"User's Email\")\n mobile_number = models.CharField(max_length=10, blank=True, null=True, help_text=\"User's Mobile number\")\n profile_photo = models.ImageField(max_length=255, blank=True, null=True, upload_to=UPLOAD_DIRECTORY_PROFILEPHOTO, help_text=\"User's Profile photo\")\n\n is_admin = models.BooleanField(default=False)\n is_executive = models.BooleanField(default=False)\n is_photographer = models.BooleanField(default=False)\n is_student = models.BooleanField(default=True)\n\n objects = CustomUserManager()\n\n USERNAME_FIELD = 'user_id'\n REQUIRED_FIELDS = ['mobile_number', 'email',]\n UNIQUE_TOGETHER = ['user_id', 'email']\n\n def __str__(self):\n return '%s - %s'%(self.id, self.full_name)\n\n @property\n def is_staff(self):\n \"Is the user a member of staff?\"\n # Simplest possible answer: All admins are staff\n return self.is_admin\n\n\nclass StudentInformation(models.Model):\n \"\"\"\n Student Information being uploaded by the Executive\n \"\"\"\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, help_text=\"User Relationship with this model.\")\n full_name = models.CharField(max_length=255, null=True, blank=True, help_text=\"Student's full name\")\n father_name = models.CharField(max_length=255, null=True, blank=True, help_text=\"Father's name of student.\")\n mother_name = models.CharField(max_length=255, null=True, blank=True, help_text=\"Mother's name of student.\")\n studying_class = models.CharField(max_length=10, null=True, blank=True, help_text=\"Class in which student is currently studying.\")\n section = models.CharField(max_length=10, null=True, blank=True, help_text=\"Section of class in which student is currently studying.\")\n id_card_photo = models.ImageField(max_length=255, upload_to=UPLOAD_DIRECTORY_ID_CARD_PHOTO, null=True, blank=True, help_text=\"Student's ID card photo that has been uploaded by Photographer.\")\n school_name = models.CharField(max_length=255, null=True, blank=True, help_text=\"Student's school name.\")\n school_address_full = models.CharField(max_length=524, null=True, blank=True, help_text=\"Student's School complete address.\")\n mobile_number = models.CharField(max_length=20, null=True, blank=True, help_text=\"Student's mobile number.\")\n email = models.CharField(max_length=255, null=True, blank=True, help_text=\"Student's email id.\")\n\n class Meta:\n unique_together = ['user', 'full_name', 'father_name', 'email']\n\nclass SchoolInformation(models.Model):\n \"\"\"\n School Information Complete\n \"\"\"\n school_code = models.CharField(max_length=255, null=True,blank=True, help_text=\"School Code by which it is unique\")\n school_name = models.CharField(max_length=255, null=True,blank=True, help_text=\"School Name full\")\n school_address_line1 = models.CharField(max_length=255, null=True,blank=True, help_text=\"School address line 1\")\n school_address_line2 = models.CharField(max_length=255, null=True,blank=True, help_text=\"School Address line 2\")\n school_city = models.CharField(max_length=255, null=True,blank=True, help_text=\"School City in which it is located.\")\n school_state = models.CharField(max_length=255, null=True,blank=True, help_text=\"School state in which it is located.\")\n zipcode = models.CharField(max_length=255, null=True,blank=True, help_text=\"School Area's pincode.\")\n school_principal_name = models.CharField(max_length=255, null=True,blank=True, help_text=\"School's prinicipal name.\")\n\n def __str__(self):\n return \"%s - %s, %s, %s, %s, %s\"%(self.school_code, self.school_name, self.school_address_line1, self.school_address_line2, self.school_city, self.school_state)\n\nclass Feedback(models.Model):\n email = models.CharField(max_length=255, null=True, blank=True, help_text=\"Email of the person who is giving feedback on the system.\")\n name = models.CharField(max_length=255, null=True, blank=True, help_text=\"Name of the person who is giving feedback on the system.\")\n message = models.CharField(max_length=1055, null=True, blank=True, help_text=\"Message of the person who is giving feedback on the system.\")\n\nclass VerificationCard(models.Model):\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, help_text=\"User model Relationship\")\n student_information = models.ForeignKey(StudentInformation, on_delete=models.DO_NOTHING, help_text=\"Student Information model Relationship\")\n is_verified_by_student = models.BooleanField(default=False)"},"size":{"kind":"number","value":6307,"string":"6,307"}}},{"rowIdx":127590,"cells":{"max_stars_repo_path":{"kind":"string","value":"PLC/Methods/SliceListNames.py"},"max_stars_repo_name":{"kind":"string","value":"dreibh/planetlab-lxc-plcapi"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171130"},"content":{"kind":"string","value":"from PLC.Method import Method\nfrom PLC.Parameter import Parameter, Mixed\nfrom PLC.Filter import Filter\nfrom PLC.Auth import Auth\nfrom PLC.Slices import Slice, Slices\nfrom PLC.Methods.GetSlices import GetSlices\n\nclass SliceListNames(GetSlices):\n \"\"\"\n Deprecated. Can be implemented with GetSlices.\n\n List the names of registered slices.\n\n Users may only query slices of which they are members. PIs may\n query any of the slices at their sites. Admins may query any\n slice. If a slice that cannot be queried is specified in\n slice_filter, details about that slice will not be returned.\n \"\"\"\n\n status = \"deprecated\"\n\n roles = ['admin', 'pi', 'user']\n\n accepts = [\n Auth(),\n Parameter(str, \"Slice prefix\", nullok = True)\n ]\n\n returns = [Slice.fields['name']]\n\n\n def call(self, auth, prefix=None):\n\n slice_filter = None\n if prefix:\n slice_filter = {'name': prefix+'*'}\n\n slices = GetSlices.call(self, auth, slice_filter)\n\n if not slices:\n raise PLCInvalidArgument(\"No such slice\")\n\n slice_names = [slice['name'] for slice in slices]\n\n return slice_names\n"},"size":{"kind":"number","value":1172,"string":"1,172"}}},{"rowIdx":127591,"cells":{"max_stars_repo_path":{"kind":"string","value":"PythonExercicios/ex097.py"},"max_stars_repo_name":{"kind":"string","value":"Lucas-ns/Python-3-Curso-Em-Video"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171603"},"content":{"kind":"string","value":"def escreva(msg):\n print('~' * (len(msg) + 4))\n print(f' {msg}')\n print('~' * (len(msg) + 4))\n\n\nescreva('Olá, Mundo!')\nescreva('')\nescreva('CeV')\nescreva('Curso em Vídeo de Python 3')\n"},"size":{"kind":"number","value":200,"string":"200"}}},{"rowIdx":127592,"cells":{"max_stars_repo_path":{"kind":"string","value":"backend/src/publisher/aggregation/user/models.py"},"max_stars_repo_name":{"kind":"string","value":"rutvikpadhiyar000/github-trends"},"max_stars_count":{"kind":"number","value":157,"string":"157"},"id":{"kind":"string","value":"2171609"},"content":{"kind":"string","value":"from typing import List, Optional\n\nfrom pydantic import BaseModel\n\n\nclass LanguageStats(BaseModel):\n lang: str\n loc: int\n percent: float\n color: Optional[str]\n\n\nclass RepoLanguage(BaseModel):\n lang: str\n color: Optional[str]\n loc: int\n\n\nclass RepoStats(BaseModel):\n repo: str\n private: bool\n langs: List[RepoLanguage]\n loc: int\n"},"size":{"kind":"number","value":361,"string":"361"}}},{"rowIdx":127593,"cells":{"max_stars_repo_path":{"kind":"string","value":"Exercicios/Ex_054.py"},"max_stars_repo_name":{"kind":"string","value":"jotmar/PythonEx"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170667"},"content":{"kind":"string","value":"import datetime\nano = int((str(datetime.datetime.now()))[:4])\nmaior = 0\nmenor = 0\nnas = 0\nidade = 0\nfor c in range(1, 8):\n nas = int(input('Em que ano você nasceu? '))\n if (ano - nas) >= 18:\n maior += 1\n else:\n menor += 1\nprint(f'Existem {menor} menores de idade e {maior} maiores de idade')\n"},"size":{"kind":"number","value":314,"string":"314"}}},{"rowIdx":127594,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/utils/draw.py"},"max_stars_repo_name":{"kind":"string","value":"hoel-bagard/MNIST-TensorFlow"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170546"},"content":{"kind":"string","value":"import cv2\nimport numpy as np\n\n\ndef draw_pred(imgs: np.ndarray, predictions: np.ndarray, labels: np.ndarray):\n \"\"\"\n Draw predictions and labels on the image to help with TensorBoard visualisation.\n Args:\n imgs: Raw images.\n predictions: Predictions of the network, after softmax but before taking argmax\n labels: Labels corresponding to the images\n Returns: images with information written on them\n \"\"\"\n new_imgs = []\n for img, preds, label in zip(imgs, predictions, labels):\n img = np.asarray(img * 255.0, dtype=np.uint8)\n img = cv2.resize(img, (480, 480), interpolation=cv2.INTER_AREA)\n preds = str([round(float(conf), 2) for conf in preds]) + f\" ==> {np.argmax(preds)}\"\n img = cv2.putText(img, preds, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1, cv2.LINE_AA)\n new_imgs.append(cv2.putText(img, f\"Label: {label}\", (20, 40),\n cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1, cv2.LINE_AA))\n return np.asarray(new_imgs)\n"},"size":{"kind":"number","value":1044,"string":"1,044"}}},{"rowIdx":127595,"cells":{"max_stars_repo_path":{"kind":"string","value":"ukz/interface/gradientresolver.py"},"max_stars_repo_name":{"kind":"string","value":"clauderichard/Ultrakazoid"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168240"},"content":{"kind":"string","value":"\r\n# Like note resolver, but for gradients.\r\n\r\n# TODO: use this properly from song.py\r\nclass GradientResolver:\r\n\r\n def __init__(self):\r\n self.gradients = []\r\n\r\n def addGradients(self,gradients):\r\n self.gradients.extend(gradients)\r\n\r\n def resolve(self):\r\n latestGradients = {}\r\n gradients = self.gradients\r\n for cur in gradients:\r\n key = (cur.typ,cur.c)\r\n prev = latestGradients.get(key,None)\r\n if prev is None:\r\n # new typ\r\n latestGradients[key] = cur\r\n continue\r\n if prev.t+prev.d < cur.t:\r\n # gap found, update hm\r\n latestGradients[key] = cur\r\n else:\r\n # TODO: truncate prev, delete stuff before next.t\r\n prev.tcut = cur.t\r\n #raise Exception(\"Gradient truncation not implemented!\")\r\n if cur.t==prev.t:\r\n prev.bend = None\r\n latestGradients[key] = cur\r\n else:\r\n latestGradients[key] = cur\r\n self.gradients = list(filter(lambda prev: \\\r\n prev.bend is not None, gradients))\r\n self.gradients.sort()\r\n\r\n \r\n"},"size":{"kind":"number","value":1233,"string":"1,233"}}},{"rowIdx":127596,"cells":{"max_stars_repo_path":{"kind":"string","value":"agents/python/directoryHarvester.py"},"max_stars_repo_name":{"kind":"string","value":"matt-handy/TheAllCommander"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171060"},"content":{"kind":"string","value":"import os\r\nfrom threading import Thread\r\nimport socket\r\n\r\nclass DirectoryHarvester(Thread):\r\n\r\n\tdef __init__(self, dirname, daemon, target_host, target_port):\r\n\t\tThread.__init__(self)\r\n\t\tself.dirname = dirname\r\n\t\tself.stayAlive = True\r\n\t\tself.isHarvestComplete = False;\r\n\t\tself.daemon = daemon\r\n\t\tself.target_host = target_host\r\n\t\tself.target_port = target_port\r\n\r\n\tdef run(self):\r\n\t\tself.walkDir(self.dirname)\r\n\r\n\tdef kill(self):\r\n\t\tself.stayAlive = False\r\n\r\n\tdef isComplete(self):\r\n\t\treturn self.isHarvestComplete\r\n\r\n\tdef walkDir(self, dirname):\r\n\t\t#TODO: Add recovery\r\n\t\tremoteSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\tremoteSocket.connect((self.target_host, self.target_port))\r\n\t\thostname = socket.gethostname()\r\n\t\thostnameLen = len(hostname)\r\n\t\t#print(\"Hostname: \" + hostname + \" \" + str(hostnameLen))\r\n\t\thostnameLenBytes = hostnameLen.to_bytes(4, 'big')\r\n\t\tremoteSocket.send(hostnameLenBytes)\r\n\t\tremoteSocket.send(hostname.encode('ascii'))\r\n\t\tfor root, dirs, files in os.walk(dirname):\r\n\t\t\tfor file in files:\r\n\t\t\t\tif self.stayAlive:\r\n\t\t\t\t\tabsFilename = os.path.abspath(root + \"//\" + file)\r\n\t\t\t\t\t#print(\"Processing file: \" + absFilename)\r\n\t\t\t\t\tabsFilenameLen = len(absFilename)\r\n\t\t\t\t\tabsFilenameBytes = absFilenameLen.to_bytes(4, 'big')\r\n\t\t\t\t\tremoteSocket.send(absFilenameBytes)\r\n\t\t\t\t\tremoteSocket.send(absFilename.encode('ascii'))\r\n\t\t\t\t\tfileSize = os.path.getsize(absFilename)\r\n\t\t\t\t\tfileSizeBytes = fileSize.to_bytes(8, 'big')\r\n\t\t\t\t\tremoteSocket.send(fileSizeBytes)\r\n\t\t\t\t\twith open(absFilename, 'rb') as f:\r\n\t\t\t\t\t\twhile True and self.stayAlive:\r\n\t\t\t\t\t\t\tbuf = f.read(1024)\r\n\t\t\t\t\t\t\tif buf: \r\n\t\t\t\t\t\t\t\tremoteSocket.send(buf)#Note, I'm assuming that read(int) will return a partial if EOF is hit\r\n\t\t\t\t\t\t\t\t#Python docs are stupid and don't list edge case behavior.\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\tendMsg = \"End of transmission\"\r\n\t\tendMsgLen = len(endMsg)\r\n\t\tendMsgLenBytes = endMsgLen.to_bytes(4, 'big')\r\n\t\tremoteSocket.send(endMsgLenBytes)\r\n\t\tremoteSocket.send(endMsg.encode('ascii'))\r\n\t\tself.isHarvestComplete = True\r\n\t\tself.daemon.postResponse(\"Harvest complete: \" + dirname)"},"size":{"kind":"number","value":2098,"string":"2,098"}}},{"rowIdx":127597,"cells":{"max_stars_repo_path":{"kind":"string","value":"koku/api/migrations/0030_auto_20201007_1403.py"},"max_stars_repo_name":{"kind":"string","value":"cgoodfred/koku"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171421"},"content":{"kind":"string","value":"# Generated by Django 3.1.2 on 2020-10-07 14:03\nfrom django.db import migrations\nfrom django.db import models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"api\", \"0029_auto_20200921_2016\")]\n\n operations = [\n migrations.AlterField(model_name=\"user\", name=\"is_active\", field=models.BooleanField(default=True, null=True))\n ]\n"},"size":{"kind":"number","value":353,"string":"353"}}},{"rowIdx":127598,"cells":{"max_stars_repo_path":{"kind":"string","value":"codecademy_scripts/reversed_pair.py"},"max_stars_repo_name":{"kind":"string","value":"Faraaz54/python_training_problems"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171577"},"content":{"kind":"string","value":"def check_reversed(word, let, length):\r\n for i in range(0, len(word_list) - 1):\r\n buff = word_list[i]\r\n if buff[0] == let:\r\n if len(buff) == len(word):\r\n if buff == word:\r\n return True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\r\n\t\t\t\r\nfin = open('words.txt', 'r')\r\nword_list = []\r\nwhile True:\r\n line = fin.readline()\r\n word = line.strip()\r\n word_list.append(word)\r\n if not line:\r\n break\r\n\t\t\r\nmatch_list = []\r\n\r\nfor i in range(0, len(word_list) - 1):\r\n buff = word_list[i]\r\n rev = buff[::-1]\r\n le = rev[0]\r\n l = len(buff)\r\n if check_reversed(rev, le, l):\r\n print buff, rev\r\n\t #match_list.append((buff, rev))\r\n\t\t\r\nprint match_list\r\n\t\t\r\n\t\t\r\n\r\n\t\t\r\n \r\n\t\t\r\n\r\n\r\n\r\n\r\nfin.close()\r\n"},"size":{"kind":"number","value":764,"string":"764"}}},{"rowIdx":127599,"cells":{"max_stars_repo_path":{"kind":"string","value":"train/custom_train_3.py"},"max_stars_repo_name":{"kind":"string","value":"bjw806/Crypto-Deep-Learning-test1"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171267"},"content":{"kind":"string","value":"from keras import models, layers\r\nfrom keras import Input\r\nfrom keras.models import Model, load_model\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras import optimizers, initializers, regularizers, metrics\r\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\r\nfrom keras.layers import BatchNormalization, Conv2D, Activation, Dense, GlobalAveragePooling2D, MaxPooling2D, \\\r\n ZeroPadding2D, Add\r\nfrom keras.optimizer_v2.rmsprop import RMSprop\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\n\r\nepochs = 100\r\ntrain_samples = 7836\r\nvalidation_samples = 2000\r\nbatch_size = 16\r\ntrain_datagen = ImageDataGenerator(rescale=1. / 255)\r\nval_datagen = ImageDataGenerator(rescale=1. / 255)\r\n\r\ntrain_dir = os.path.join('./data/train/')\r\nval_dir = os.path.join('./data/validation/')\r\n\r\ntrain_generator = train_datagen.flow_from_directory(train_dir, batch_size=batch_size, target_size=(356, 295), color_mode='rgb')\r\nval_generator = val_datagen.flow_from_directory(val_dir, batch_size=batch_size, target_size=(356, 295), color_mode='rgb')\r\n\r\n# number of classes\r\nK = 2\r\n\r\ninput_tensor = Input(shape=(356, 295, 3), dtype='float32', name='input')\r\n\r\n\r\ndef conv1_layer(x):\r\n x = ZeroPadding2D(padding=(3, 3))(x)\r\n x = Conv2D(64, (7, 7), strides=(2, 2))(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n x = ZeroPadding2D(padding=(1, 1))(x)\r\n\r\n return x\r\n\r\n\r\ndef conv2_layer(x):\r\n x = MaxPooling2D((3, 3), 2)(x)\r\n\r\n shortcut = x\r\n\r\n for i in range(3):\r\n if (i == 0):\r\n x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)\r\n shortcut = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(shortcut)\r\n x = BatchNormalization()(x)\r\n shortcut = BatchNormalization()(shortcut)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n else:\r\n x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n return x\r\n\r\n\r\ndef conv3_layer(x):\r\n shortcut = x\r\n\r\n for i in range(4):\r\n if (i == 0):\r\n x = Conv2D(128, (1, 1), strides=(2, 2), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)\r\n shortcut = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(shortcut)\r\n x = BatchNormalization()(x)\r\n shortcut = BatchNormalization()(shortcut)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n else:\r\n x = Conv2D(128, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n return x\r\n\r\n\r\ndef conv4_layer(x):\r\n shortcut = x\r\n\r\n for i in range(6):\r\n if (i == 0):\r\n x = Conv2D(256, (1, 1), strides=(2, 2), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x)\r\n shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='valid')(shortcut)\r\n x = BatchNormalization()(x)\r\n shortcut = BatchNormalization()(shortcut)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n else:\r\n x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n return x\r\n\r\n\r\ndef conv5_layer(x):\r\n shortcut = x\r\n\r\n for i in range(3):\r\n if (i == 0):\r\n x = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x)\r\n shortcut = Conv2D(2048, (1, 1), strides=(2, 2), padding='valid')(shortcut)\r\n x = BatchNormalization()(x)\r\n shortcut = BatchNormalization()(shortcut)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n else:\r\n x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)\r\n x = BatchNormalization()(x)\r\n x = Activation('relu')(x)\r\n\r\n x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x)\r\n x = BatchNormalization()(x)\r\n\r\n x = Add()([x, shortcut])\r\n x = Activation('relu')(x)\r\n\r\n shortcut = x\r\n\r\n return x\r\n\r\n\r\nx = conv1_layer(input_tensor)\r\nx = conv2_layer(x)\r\nx = conv3_layer(x)\r\nx = conv4_layer(x)\r\nx = conv5_layer(x)\r\n\r\nx = GlobalAveragePooling2D()(x)\r\noutput_tensor = Dense(K, activation='softmax')(x)\r\n\r\nresnet50 = Model(input_tensor, output_tensor)\r\nresnet50.summary()\r\n\r\nresnet50.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['acc'])#SGD\r\n#다른 곳에서 불러오면 안됨\r\nmetric = 'acc' #val_acc\r\ntarget_dir = \"./models/weights-improvement/\"\r\nif not os.path.exists(target_dir):\r\n os.mkdir(target_dir)\r\nresnet50.save('./models/model.h5')\r\nresnet50.save_weights('./models/weights.h5')\r\n\r\ncheckpoint = ModelCheckpoint(filepath=target_dir + 'weights-improvement-{epoch:02d}-{acc:.2f}.hdf5',\r\n monitor=metric, verbose=2, save_best_only=True, mode='max')\r\ncallbacks_list = [checkpoint]\r\n\r\nresnet50.fit(\r\n train_generator,\r\n steps_per_epoch=train_samples // batch_size,\r\n epochs=epochs,\r\n shuffle=True,\r\n validation_data=val_generator,\r\n callbacks=callbacks_list,#[checkpoint],\r\n #callbacks=[tensorboard_callback],#텐서보드\r\n validation_steps=validation_samples // batch_size)\r\n\r\n"},"size":{"kind":"number","value":7958,"string":"7,958"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1275,"numItemsPerPage":100,"numTotalItems":129320,"offset":127500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjkzOTg3MSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTY5NDM0NzEsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.mE9mi9nv7MUeuHRj-RwTTQNhEJvi9Bx6k-D-AKQM951JzwR2RtSmsCmIDYezQ2JGKrZP52gs6Scj1bRMocv1AA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
pydeeprecsys/tests/unit/test_base_networks.py
luksfarris/pydeeprecsys
2
2171283
from pydeeprecsys.rl.agents.rainbow import RainbowDQNAgent from pydeeprecsys.rl.neural_networks.value_estimator import ValueEstimator import numpy as np from pydeeprecsys.movielens_fairness_env import MovieLensFairness # noqa: F401 from pydeeprecsys.rl.agents.reinforce import ReinforceAgent from pydeeprecsys.rl.agents.actor_critic import ActorCriticAgent def test_save_load(tmp_file_cleanup): # given a neural network network = ValueEstimator(4, [4], 1) # when we train it a little bit inputs = np.array([1, 2, 3, 4]) output = 1 for i in range(20): network.update(inputs, output) # then it can accurately make predictions predicted_value = network.predict(inputs).detach().cpu().numpy()[0] assert round(predicted_value) == output # and when we store params network.save(tmp_file_cleanup) # and recreate the network network = ValueEstimator(4, [4], 1) network.load(tmp_file_cleanup) # then the prediction is the same assert network.predict(inputs).detach().cpu().numpy()[0] == predicted_value def test_tensorboard_writer_reinforce(): env = MovieLensFairness(slate_size=1) reinforce_agent = ReinforceAgent( state_size=env.observation_space.shape[0], n_actions=env.action_space.n ) reinforce_agent.policy_estimator.add_to_tensorboard(env.reset()) ac_agent = ActorCriticAgent( state_size=env.observation_space.shape[0], n_actions=env.action_space.n ) ac_agent.value_estimator.add_to_tensorboard(env.reset()) dqn_agent = RainbowDQNAgent(env.observation_space.shape[0], env.action_space.n) dqn_agent.network.add_to_tensorboard(env.reset()) # if no errors were raised, we're good
1,708
prettyqt/quick/quickitem.py
phil65/PrettyQt
7
2171156
from __future__ import annotations from typing import Literal from prettyqt import core, gui, qml from prettyqt.qt import QtQuick from prettyqt.utils import InvalidParamError, bidict FLAGS = bidict( clips_children_to_shape=QtQuick.QQuickItem.Flag.ItemClipsChildrenToShape, accepts_input_method=QtQuick.QQuickItem.Flag.ItemAcceptsInputMethod, is_focus_scope=QtQuick.QQuickItem.Flag.ItemIsFocusScope, has_contents=QtQuick.QQuickItem.Flag.ItemHasContents, accepts_drops=QtQuick.QQuickItem.Flag.ItemAcceptsDrops, ) FlagStr = Literal[ "clips_children_to_shape", "accepts_input_method", "is_focus_scope", "has_contents", "accepts_drops", ] ITEM_CHANGE = bidict( child_added_change=QtQuick.QQuickItem.ItemChange.ItemChildAddedChange, child_removed_change=QtQuick.QQuickItem.ItemChange.ItemChildRemovedChange, item_scene_change=QtQuick.QQuickItem.ItemChange.ItemSceneChange, visible_has_changed=QtQuick.QQuickItem.ItemChange.ItemVisibleHasChanged, parent_has_changed=QtQuick.QQuickItem.ItemChange.ItemParentHasChanged, opacity_has_changed=QtQuick.QQuickItem.ItemChange.ItemOpacityHasChanged, active_focus_has_changed=QtQuick.QQuickItem.ItemChange.ItemActiveFocusHasChanged, rotation_has_changed=QtQuick.QQuickItem.ItemChange.ItemRotationHasChanged, pixel_ratio_has_changed=QtQuick.QQuickItem.ItemChange.ItemDevicePixelRatioHasChanged, anti_aliasing_has_changed=QtQuick.QQuickItem.ItemChange.ItemAntialiasingHasChanged, enabled_has_changed=QtQuick.QQuickItem.ItemChange.ItemEnabledHasChanged, ) ItemChangeStr = Literal[ "child_added_change", "child_removed_change", "item_scene_change", "visible_has_changed", "parent_has_changed", "opacity_has_changed", "active_focus_has_changed", "rotation_has_changed", "pixel_ratio_has_changed", "anti_aliasing_has_changed", "enabled_has_changed", ] TRANSFORM_ORIGIN = bidict( top_left=QtQuick.QQuickItem.TransformOrigin.TopLeft, top=QtQuick.QQuickItem.TransformOrigin.Top, top_right=QtQuick.QQuickItem.TransformOrigin.TopRight, left=QtQuick.QQuickItem.TransformOrigin.Left, center=QtQuick.QQuickItem.TransformOrigin.Center, right=QtQuick.QQuickItem.TransformOrigin.Right, bottom_left=QtQuick.QQuickItem.TransformOrigin.BottomLeft, bottom=QtQuick.QQuickItem.TransformOrigin.Bottom, bottom_right=QtQuick.QQuickItem.TransformOrigin.BottomRight, ) TransformOriginStr = Literal[ "top_left", "top", "top_right", "left", "center", "right", "bottom_left", "bottom", "bottom_right", ] QtQuick.QQuickItem.__bases__ = (core.Object, qml.QmlParserStatus) class QuickItem(QtQuick.QQuickItem): def get_children_rect(self) -> core.RectF: return core.RectF(self.childrenRect()) def get_cursor(self) -> gui.Cursor: return gui.Cursor(self.cursor()) def get_flags(self): pass def set_transform_origin(self, origin: TransformOriginStr): """Set the origin point around which scale and rotation transform. The default is "center". Args: origin: transform origin to use Raises: InvalidParamError: transform origin does not exist """ if origin not in TRANSFORM_ORIGIN: raise InvalidParamError(origin, TRANSFORM_ORIGIN) self.setTransformOrigin(TRANSFORM_ORIGIN[origin]) def get_transform_origin(self) -> TransformOriginStr: """Return the render type of text-like elements in Qt Quick. Returns: transform origin """ return TRANSFORM_ORIGIN.inverse[self.transformOrigin()] if __name__ == "__main__": item = QuickItem()
3,727
pepper/modules/python/models/predict_distributed_gpu.py
Samteymoori/pepper
155
2170747
import sys import os import torch import torch.distributed as dist import torch.nn as nn from torch.utils.data import DataLoader import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel from pepper.modules.python.models.dataloader_predict import SequenceDataset from tqdm import tqdm from pepper.modules.python.models.ModelHander import ModelHandler from pepper.modules.python.Options import ImageSizeOptions, TrainOptions from pepper.modules.python.DataStorePredict import DataStore os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' def predict(input_filepath, file_chunks, output_filepath, model_path, batch_size, num_workers, rank, device_id): transducer_model, hidden_size, gru_layers, prev_ite = \ ModelHandler.load_simple_model_for_training(model_path, input_channels=ImageSizeOptions.IMAGE_CHANNELS, image_features=ImageSizeOptions.IMAGE_HEIGHT, seq_len=ImageSizeOptions.SEQ_LENGTH, num_classes=ImageSizeOptions.TOTAL_LABELS) transducer_model.eval() transducer_model = transducer_model.eval() # create output file output_filename = output_filepath + "pepper_prediction_" + str(device_id) + ".hdf" prediction_data_file = DataStore(output_filename, mode='w') # data loader input_data = SequenceDataset(input_filepath, file_chunks) data_loader = DataLoader(input_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) torch.cuda.set_device(device_id) transducer_model.to(device_id) transducer_model.eval() transducer_model = DistributedDataParallel(transducer_model, device_ids=[device_id]) if rank == 0: progress_bar = tqdm( total=len(data_loader), ncols=100, leave=False, position=rank, desc="GPU #" + str(device_id), ) with torch.no_grad(): for contig, contig_start, contig_end, chunk_id, images, position, index in data_loader: sys.stderr.flush() images = images.type(torch.FloatTensor) hidden = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE) prediction_base_tensor = torch.zeros((images.size(0), images.size(1), ImageSizeOptions.TOTAL_LABELS)) images = images.to(device_id) hidden = hidden.to(device_id) prediction_base_tensor = prediction_base_tensor.to(device_id) for i in range(0, ImageSizeOptions.SEQ_LENGTH, TrainOptions.WINDOW_JUMP): if i + TrainOptions.TRAIN_WINDOW > ImageSizeOptions.SEQ_LENGTH: break chunk_start = i chunk_end = i + TrainOptions.TRAIN_WINDOW # chunk all the data image_chunk = images[:, chunk_start:chunk_end] # run inference output_base, hidden = transducer_model(image_chunk, hidden) # now calculate how much padding is on the top and bottom of this chunk so we can do a simple # add operation top_zeros = chunk_start bottom_zeros = ImageSizeOptions.SEQ_LENGTH - chunk_end # do softmax and get prediction # we run a softmax a padding to make the output tensor compatible for adding inference_layers = nn.Sequential( nn.Softmax(dim=2), nn.ZeroPad2d((0, 0, top_zeros, bottom_zeros)) ) inference_layers = inference_layers.to(device_id) # run the softmax and padding layers base_prediction = inference_layers(output_base).to(device_id) # now simply add the tensor to the global counter prediction_base_tensor = torch.add(prediction_base_tensor, base_prediction) del inference_layers torch.cuda.empty_cache() base_values, base_labels = torch.max(prediction_base_tensor, 2) # this part is for the phred score calculation counts = torch.ones((base_values.size(0), base_values.size(1) - 2 * ImageSizeOptions.SEQ_OVERLAP)) top_ones = nn.ZeroPad2d((ImageSizeOptions.SEQ_OVERLAP, ImageSizeOptions.SEQ_OVERLAP)) counts = top_ones(counts) + 1 base_values = base_labels.cpu().numpy() phred_score = -10 * torch.log10(1.0 - (base_values / counts)) phred_score[phred_score == float('inf')] = 100 predicted_base_labels = base_labels.cpu().numpy() phred_score = phred_score.cpu().numpy() for i in range(images.size(0)): prediction_data_file.write_prediction(contig[i], contig_start[i], contig_end[i], chunk_id[i], position[i], index[i], predicted_base_labels[i], phred_score[i]) if rank == 0: progress_bar.update(1) if rank == 0: progress_bar.close() def cleanup(): dist.destroy_process_group() def setup(rank, device_ids, args, all_input_files): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '12355' # initialize the process group dist.init_process_group("gloo", rank=rank, world_size=len(device_ids)) filepath, output_filepath, model_path, batch_size, num_workers = args # issue with semaphore lock: https://github.com/pytorch/pytorch/issues/2517 # mp.set_start_method('spawn') # Explicitly setting seed to make sure that models created in two processes # start from same random weights and biases. https://github.com/pytorch/pytorch/issues/2517 # torch.manual_seed(42) predict(filepath, all_input_files[rank], output_filepath, model_path, batch_size, num_workers, rank, device_ids[rank]) cleanup() def predict_distributed_gpu(filepath, file_chunks, output_filepath, model_path, batch_size, device_ids, num_workers): """ Create a prediction table/dictionary of an images set using a trained model. :param filepath: Path to image files to predict on :param file_chunks: Path to chunked files :param batch_size: Batch size used for prediction :param model_path: Path to a trained model :param output_filepath: Path to output directory :param device_ids: List of GPU devices to use :param num_workers: Number of workers to be used by the dataloader :return: Prediction dictionary """ args = (filepath, output_filepath, model_path, batch_size, num_workers) mp.spawn(setup, args=(device_ids, args, file_chunks), nprocs=len(device_ids), join=True)
7,041
create_dataset/get_data_yolo.py
AVsolutionsai/YOLOv3_custom
0
2166441
# -*- coding: utf-8 -*- """ Created on Fri Mar 12 01:06:02 2021 @author: Isaac """ import cv2 import numpy as np from skimage import morphology from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image import os class ProcesarVideo(): image=[] image_1=[] def __init__(self, filename): self.filename = filename def start_video(self): vidcap = cv2.VideoCapture(self.filename) success,image = vidcap.read() newImage = image.copy() model = load_model('model-280-0.989506-0.981818-0.064893.h5') kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2)) bg2 = cv2.createBackgroundSubtractorMOG2(history = 5000000, varThreshold =72, detectShadows = False) frame_width = int(vidcap.get(3)) frame_height = int(vidcap.get(4)) name = self.filename.split('.') frame_length=int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) fps=vidcap.get(cv2.CAP_PROP_FPS) print(fps) count=0 countEspermas = CountSpermas() while (count!=frame_length): sperms_coords = [] sperms_coords_filter = [] success, image = vidcap.read() if image is not None: newImage = image.copy() else: image=image count += 1 gray_image = cv2.cvtColor(newImage, cv2.COLOR_RGB2GRAY) mask = bg2.apply(gray_image) arr = mask > 0 cleaned = morphology.remove_small_objects(arr, min_size=15) mask_cleaned = morphology.remove_small_holes(cleaned, min_size=15) indices = mask_cleaned.astype(np.uint8) indices*=255 output = cv2.morphologyEx(indices, cv2.MORPH_OPEN, kernel) countEspermas.find_contornos(output, newImage, count, newImage, sperms_coords, sperms_coords_filter, model) th = 30 if count>th: for each_sperm_coords in sperms_coords: image_detection = OperacionesSpermas.image_detection(image, each_sperm_coords) OperacionesSpermas.sperm_evaluation(image_detection, model, each_sperm_coords, sperms_coords_filter) if not os.path.exists('Obj'): os.makedirs('Obj') path = 'Obj' if len(sperms_coords_filter)>1 and count> th: cv2.imwrite(os.path.join(path , name[0]+'_Frame_' + str(count)+'.jpg'), image) txt_name = name[0]+'_Frame_' + str(count)+'.txt' file_txt = open("Database/" + txt_name, "w") for coords in sperms_coords_filter: class_line_txt='0 ' file_txt.write(class_line_txt) x_c = coords[0]/frame_width y_c = coords[1]/frame_height x_n = (((coords[0]+10) - (coords[0]-10))/2)/frame_width y_n = (((coords[1]+10) - (coords[1]-10))/2)/frame_height info_coords = "%f %f %f %f" % (round(x_c, 6), round(y_c,6), round(x_n, 6), round(y_n,6)) file_txt.write(info_coords) next_txtline="\n" file_txt.write(next_txtline) # DRAW BOUNDING #cv2.rectangle(newImage, (coords[0]-10, coords[1]-10), # (coords[0]+10, coords[1]+10), (255,0,0), 2) #cv2.imwrite(os.path.join(path , name[0]+'_Frame_' + # str(count)+ '_seed' +'.jpg'), newImage) file_txt.close() # VISUALIZE DETECTION #cv2.imshow("Imagen", newImage) if cv2.waitKey(1) & 0xFF == ord('q'): break vidcap.release() cv2.destroyAllWindows() class CountSpermas(): def __init__(self): self.vector = [] self.pos_frames =[] self.v_actual=[] self.v_anterior=[] def find_contornos(self, th3ot, image2, count, image_o, sperms_coords, sperms_coords_filter, model): contours, hierarchy = cv2.findContours(th3ot, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_1=[] for i in contours: area = cv2.contourArea(i) if area>2 and area<200: contours_1.append(i) self.v_anterior = self.v_actual self.v_actual.clear() for c in contours_1: M = cv2.moments(c) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) x=cx y=cy if x> 10 and x < 630 and y> 10 and y < 470: x_sperm = x y_sperm = y sperms_coords.append([x_sperm,y_sperm]) else: continue class OperacionesSpermas(): @classmethod def image_detection(cls,image_original, SpermCoords): n=SpermCoords sperm_morpho=image_original[n[1]-10:n[1]+10,n[0]-10:n[0]+10] if sperm_morpho.shape[1] > 0 and sperm_morpho.shape[0] > 0: sperm_resize = cv2.resize(sperm_morpho, (32,32), interpolation = cv2.INTER_AREA) else: sperm_resize = np. zeros(shape=[32, 32, 3], dtype=np. uint8) return sperm_resize @classmethod def sperm_evaluation(cls, detect_image, model, coords_sperm, lista_evaluacion_sperm): x=image.img_to_array(detect_image) x=x/255 x=np.expand_dims(x,axis=0) imagea=np.vstack([x]) classes = model.predict(imagea) if classes[0]>0.5: lista_evaluacion_sperm.append(coords_sperm)
6,817
run.py
nseifert/SpecControl
0
2170680
from src.data_obj.ArbPulse import ArbPulse from src.instruments.AWG import AWG import os if __name__ == "__main__": key_path = os.path.abspath('C:\\Users\\jaeger\\.ssh\\id_rsa') pass_path = os.path.abspath('C:\\Users\\jaeger\\Desktop\\SpecControl\sec\pass.pass') custom_pulse = ({'type': 'sine', 'channel': 'pulse', 'freq': 2500.0, 'amplitude': 1.0, 'length': 4.0, 'start_time': 8.0},) inp_pulse = ArbPulse(channel=1, additional_pulses=custom_pulse) # import scipy.fftpack as sfft # import numpy as np # # data = np.append(inp_pulse.pulse[:,0], np.zeros(len(inp_pulse.pulse[:,0]))) # data = np.column_stack((sfft.fftfreq(len(data),1.0/inp_pulse.s_rate)/(inp_pulse.freq_multiplier),abs(sfft.fft(data))/100.0)) # from matplotlib import pyplot as plt # print inp_pulse.get_params() # plt.plot(inp_pulse.pulse[:,0]) plt.plot(inp_pulse.pulse[:,1] + 2) plt.plot(inp_pulse.pulse[:,2] - 2) plt.show() # test = AWG(ip_addr='192.168.1.102', connect_type='instr', key_path=key_path, rsa_pass=pass_path, pulse=inp_pulse)
1,193
photutils/utils/_misc.py
prajwel/photutils
0
2168597
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides tools to return the installed astropy and photutils versions. """ def _get_version_info(): """ Return a dictionary of the installed version numbers for photutils and its dependencies. Returns ------- result : dict A dictionary containing the version numbers for photutils and its dependencies. """ versions = {} packages = ('photutils', 'astropy', 'numpy', 'scipy', 'skimage') for package in packages: try: pkg = __import__(package) version = pkg.__version__ except ImportError: version = None versions[package] = version return versions
752
notemanager/models.py
muhammadsiraf/Maktub
0
2170917
from django.db import models from django.contrib.auth.models import AbstractModel # Create your models here. class User(AbstractModel): pass
146
manu_sawyer/src/KinectA.py
robertocalandra/the-feeling-of-success
10
2171285
#!/usr/bin/env python import numpy as np from cv_bridge import CvBridge, CvBridgeError import rospy import multiprocessing from sensor_msgs.msg import Image import grip_and_record.locate_cylinder as locate_cylinder import tensorflow_model_is_gripping.aolib.util as ut import grip_and_record.getch cache_path = '/home/manu/ros_ws/src/manu_research/frame_cache' class KinectA: def __init__(self, save_init=True): """ Class for recording data from the KinectA """ print("Initializing KinectA") # Set up the subscribers # self.image_topic = "/kinect2/hd/image_color" self.image_topic = "/kinect2/qhd/image_color" self.depth_topic = "/kinect2/sd/image_depth_rect" rospy.Subscriber(self.image_topic, Image, self.store_latest_color_image) rospy.Subscriber(self.depth_topic, Image, self.store_latest_depth_image) # Defining variables self.init_depth_image = None self.depth_image = None self.color_image = None # Needed to transform images self.bridge = CvBridge() # Saving initial images cache_file = ut.pjoin(cache_path, 'kinect_a_init.pk') if save_init: # Requests the user to clear the table of all objects. # Takes initial picture with KinectA. # The initial picture is needed to localize the object on the table later. print('Please remove all objects from the table and then press ESC.') done = False while not done and not rospy.is_shutdown(): c = grip_and_record.getch.getch() if c: if c in ['\x1b', '\x03']: done = True ut.mkdir(cache_path) self.save_initial_color_image() self.save_initial_depth_image() ut.save(cache_file, (self.color_image, self.init_depth_image, self.depth_image)) else: (self.color_image, self.init_depth_image, self.depth_image) = ut.load(cache_file) # Starting multiprocessing def spin_thread(): rospy.spin() self.cam_process = multiprocessing.Process(target=spin_thread) self.cam_process.start() print("Done") def calc_object_loc(self): # Uses Andrews code to fit a cylinder and returns the center, height, radius and an image of the cylinder. ini_arr = np.array(self.init_depth_image) ini_depth = np.array(ini_arr[:, :, 0], 'float32') curr_arr = np.array(self.depth_image) curr_depth = np.array(curr_arr[:, :, 0], 'float32') return locate_cylinder.fit_cylinder(ini_depth, curr_depth) def save_initial_color_image(self): img = rospy.wait_for_message(self.image_topic, Image) self.color_image = self.bridge.imgmsg_to_cv2(img, "bgr8") self.color_image = np.flipud(self.color_image) self.color_image = np.fliplr(self.color_image) def save_initial_depth_image(self): img = rospy.wait_for_message(self.depth_topic, Image) self.depth_image = self.bridge.imgmsg_to_cv2(img, '16UC1') self.init_depth_image = self.depth_image def store_latest_color_image(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) self.color_image = cv_image def store_latest_depth_image(self, data): img = self.bridge.imgmsg_to_cv2(data, '16UC1') self.depth_image = img def get_color_image(self): return np.flipud(np.fliplr(self.color_image)) def get_depth_image(self): return np.flipud(np.fliplr(self.depth_image)) def end_process(self): self.cam_process.terminate() self.cam_process.join()
3,813
tests/ut/test_object_gateway.py
CTERA-Networks/ctera-python-sdk
5
2170774
import socket from cterasdk import exception from cterasdk.common import Object from tests.ut import base_edge class TestObjectGateway(base_edge.BaseEdgeTest): def setUp(self): super().setUp() self._host = "" self._port = 80 self._socket_connect_mock = self.patch_call("cterasdk.client.host.socket.socket.connect_ex") def test_connection_success(self): get_response = Object() get_response.hostname = self._filer.host() self._init_filer(get_response=get_response) self._socket_connect_mock.return_value = 0 self._filer.test() self._socket_connect_mock.assert_called_once_with((self._host, self._port)) self._filer.get.assert_called_once_with('/nosession/logininfo') def test_connection_socket_connect_error(self): get_response = Object() get_response.hostname = self._filer.host() self._init_filer(get_response=get_response) self._socket_connect_mock.side_effect = socket.gaierror() with self.assertRaises(exception.HostUnreachable) as error: self._filer.test() self._socket_connect_mock.assert_called_once_with((self._host, self._port)) self.assertEqual('Unable to reach host', error.exception.message) def test_connection_socket_connect_error_none_zero_rc(self): get_response = Object() get_response.hostname = self._filer.host() self._init_filer(get_response=get_response) self._socket_connect_mock.return_value = 1 with self.assertRaises(exception.HostUnreachable) as error: self._filer.test() self._socket_connect_mock.assert_called_once_with((self._host, self._port)) self.assertEqual('Unable to reach host', error.exception.message)
1,793
retrieve.tweet.py
leondz/pheme-twitter-conversation-collection
52
2170775
import json import tweepy import sys import pprint import os import ConfigParser import time tweetid = sys.argv[1] config = ConfigParser.ConfigParser() config.read('twitter.ini') consumer_key = config.get('Twitter', 'consumer_key') consumer_secret = config.get('Twitter', 'consumer_secret') access_key = config.get('Twitter', 'access_key') access_secret = config.get('Twitter', 'access_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) api = tweepy.API(auth) try: tweet = api.get_status(tweetid) print json.dumps(tweet.json) except: sys.exit()
618
App/pages/view_attendance.py
yonycherkos/automatic-classroom-attendance-system-using-face-recognition
1
2169079
import sys sys.path.append(".") sys.path.append("./App/utils") from PyQt5.QtWidgets import QMainWindow, QTableWidgetItem, QApplication, QHeaderView, QMessageBox, QFileDialog from PyQt5.QtCore import QModelIndex from PyQt5 import QtGui from PyQt5 import uic import pandas as pd import numpy as np import pdfkit import config import os class ViewAttendance(QMainWindow): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) uic.loadUi("App/ui/viewAttendance.ui", self) self.filterComboBox.activated[str].connect(self.filter) self.searchBtn.clicked.connect(self.search) self.saveAsPdfBtn.clicked.connect(self.saveAsPdf) self.backBtn.clicked.connect(self.back) self.attendance = config.ATTENDANCE_PATH self.df = pd.read_csv(self.attendance, index_col=0) self.displayTable() def search(self): self.df = pd.read_csv(self.attendance, index_col=0) if self.searchText.text() == "": self.showDialog(icon=QMessageBox.Warning, displayText="Enter student fullname", windowTitle="Search Name") else: name = self.searchText.text().replace(" ", "_") self.df = self.df[self.df["names"] == name] if len(self.df) == 0: self.showDialog(icon=QMessageBox.Warning, displayText="student with the name {} is not registered".format( name), windowTitle="Search Name") else: self.displayTable() def filter(self, selected): self.df = pd.read_csv(self.attendance, index_col=0) if selected == "Good": attend_frac = self.df.sum(axis=1)/self.df.count(axis=1, numeric_only=True) self.df = self.df[attend_frac >= 0.9] elif selected == "Warning": attend_frac = self.df.sum(axis=1)/self.df.count(axis=1, numeric_only=True) self.df = self.df[(attend_frac >= 0.8) & (attend_frac < 0.9)] elif selected == "Danger": attend_frac = self.df.sum(axis=1)/self.df.count(axis=1, numeric_only=True) self.df = self.df[attend_frac < 0.8] else: self.df = pd.read_csv(self.attendance, index_col=0) self.searchText.setText("") self.displayTable() def displayTable(self): counts = list(round((self.df.sum(axis=1)/self.df.count(axis=1, numeric_only=True))*100, 2)) colorMap = {"Good": QtGui.QColor(0, 255, 0, 150), "Warning": QtGui.QColor( 255, 255, 0, 150), "Danger": QtGui.QColor(255, 0, 0, 150)} self.df['attend_percent'] = counts df = self.df.loc[:, ['names', 'attend_percent']] df = df.rename({'names': 'Student Name', 'attend_percent': 'Attendance Percentage'}, axis=1) self.table.setRowCount(df.shape[0]) self.table.setColumnCount(df.shape[1]) self.table.setHorizontalHeaderLabels(df.columns) self.table.horizontalHeader().setStretchLastSection(True) for (i, row) in enumerate(df.values): if counts[i] >= 90: color = colorMap["Good"] elif counts[i] >= 80: color = colorMap["Warning"] else: color = colorMap["Danger"] for (j, data) in enumerate(row): self.table.setItem(i, j, QTableWidgetItem(str(data))) self.table.item(i, j).setBackground(color) header = self.table.horizontalHeader() header.setSectionResizeMode(QHeaderView.ResizeToContents) header.setSectionResizeMode(0, QHeaderView.Stretch) self.table.clicked.connect(self.detailAttendance) def detailAttendance(self): from detail_attendance import DetailAttendance self.detailAttendancePage = DetailAttendance(self.df, self.table.currentRow()) self.detailAttendancePage.show() self.close() def saveAsPdf(self): df = pd.read_csv(self.attendance, index_col=0) df = df.replace({0: "absent", 1: "present", None: "excused"}) df = df.rename({'names': 'Student Name'}, axis=1) dlg = QFileDialog(self) dlg.setFileMode(QFileDialog.Directory) outputPath = dlg.getExistingDirectory() baseFilename = os.path.split(self.attendance)[-1].split(".")[0] htmlFilename = baseFilename + ".html" pdfFilename = baseFilename + ".pdf" htmlFilePath = os.path.join(outputPath, htmlFilename) pdfFilePath = os.path.join(outputPath, pdfFilename) df.to_html(htmlFilePath) pdfkit.from_file(htmlFilePath, pdfFilePath) def back(self): from home import HomePage self.homePage = HomePage() self.homePage.show() self.close() def showDialog(self, icon, displayText, windowTitle): msg = QMessageBox() msg.setIcon(icon) msg.setText(displayText) msg.setWindowTitle(windowTitle) msg.setStandardButtons(QMessageBox.Ok) msg.exec_() if __name__ == '__main__': app = QApplication(sys.argv) window = ViewAttendance() window.show() app.exec_()
5,138
files/default/virtual_host_alias.py
Sainsburys/chef-websphere
7
2168648
# this script manages virtual hosts. import sys if len(sys.argv) < 4: print "Missing arguments. \n Usage: virtual_hosts.py <action> <alias_host> <alias_port>. \n Action must be add_alias or remove_alias." sys.exit(1) action = sys.argv[0] # can be add_alian or remove_alias vhost = sys.argv[1] alias_host = sys.argv[2] alias_port = sys.argv[3] def addVirtualHostAlias(virtualHostName, alias): virtualHost = AdminConfig.getid("/VirtualHost:" + virtualHostName) print "adding vhost alias " + virtualHost + " alias_host: " + alias[0] + " port: " + alias[1] AdminConfig.create("HostAlias", virtualHost, [["hostname", alias[0]], ["port", alias[1]]]) def removeVirtualHostAlias(virtualHostName, alias): virtualHost = AdminConfig.getid("/VirtualHost:" + virtualHostName) for a in toList(AdminConfig.showAttribute(virtualHost, 'aliases')): if AdminConfig.showAttribute(a, 'hostname') == alias[0] and AdminConfig.showAttribute(a, 'port') == alias[1]: print "removing vhost alias " + virtualHost + " alias_host: " + alias[0] + " port: " + alias[1] AdminConfig.remove(a) def virtualHostExists(virtualHostName): for vh in toList(AdminConfig.list("VirtualHost")): if AdminConfig.showAttribute(vh, "name") == virtualHostName: return 1 return 0 def aliasExists(virtualHostName, alias): for vh in toList(AdminConfig.list("VirtualHost")): if AdminConfig.showAttribute(vh, "name") == virtualHostName: for al in toList(AdminConfig.showAttribute(vh, 'aliases')): if AdminConfig.showAttribute(al, 'hostname') == alias[0] and AdminConfig.showAttribute(al, 'port') == alias[1]: return 1 return 0 def toList(inStr): outList=[] if (len(inStr)>0 and inStr[0]=='[' and inStr[-1]==']'): inStr = inStr[1:-1] tmpList = inStr.split(" ") else: tmpList = inStr.split("\n") for item in tmpList: item = item.rstrip(); if (len(item)>0): outList.append(item) return outList if action == 'add_alias': if virtualHostExists(vhost)==1 and aliasExists(vhost, [alias_host, alias_port])==0: addVirtualHostAlias(vhost, [alias_host, alias_port]) AdminConfig.save() else: print "vhost doesn't exist, or alias already exists" elif action == 'remove_alias': if aliasExists(vhost, [alias_host, alias_port]): removeVirtualHostAlias(vhost, [alias_host, alias_port]) AdminConfig.save() else: print "Missing is mismatched action paramater. Action must be add_alias or remove_alias. \n Usage: virtual_hosts.py <action> <alias_host> <alias_port>"
2,608
pyembroidery/DatReader.py
wwderw/pyembroidery
1
2171503
def read_barudan_dat(f, out): stitched_yet = False count = 0 while True: count += 1 byte = bytearray(f.read(3)) if len(byte) != 3: break ctrl = byte[0] y = -byte[1] x = byte[2] if ctrl & 0x80 == 0: # This bit should always be set, must be other dat type. return False if ctrl & 0x40 != 0: y = -y if ctrl & 0x20 != 0: x = -x if (ctrl & 0b11111) == 0: stitched_yet = True out.stitch(x, y) continue if (ctrl & 0b11111) == 1: out.move(x, y) continue if ctrl == 0xF8: break if ctrl == 0xE7: out.trim() continue if ctrl == 0xE8: if count > 1: out.stop() continue if 0xE9 <= ctrl < 0xF8: needle = ctrl - 0xE8 if stitched_yet: out.color_change() continue break # Uncaught Control out.end() return True def read_sunstar_dat_stitches(f, out): count = 0 while True: count += 1 byte = bytearray(f.read(3)) if len(byte) != 3: break x = byte[0] & 0x7F y = byte[1] & 0x7F if byte[0] & 0x80: x = -x if byte[1] & 0x80: y = -y y = -y ctrl = byte[2] if ctrl == 0x07: out.stitch(x, y) continue if ctrl == 0x04: out.move(x, y) continue if ctrl == 0x80: out.trim(x, y) continue if ctrl == 0x87: out.color_change() if x != 0 or y != 0: out.stitch(x, y) continue if ctrl == 0x84: # Initialized info. out.stitch(x, y) continue elif ctrl == 0: break break # Uncaught Control out.end() def read_sunstar_dat(f, out): # f.seek(0x02, 0) # stitches = read_int_16le(f) f.seek(0x100, 0) read_sunstar_dat_stitches(f, out) def read(f, out, settings=None): if not read_barudan_dat(f, out): f.seek(0, 0) read_sunstar_dat(f, out)
2,366
lambda-spacy/lambda_function.py
byrro/serverless-nlp
8
2171377
'''Entry point script for the AWS Lambda function''' from app_handler import request_handler def lambda_handler(event, context): '''Invocation handler for the Lambda function :arg event: (dict) event payload provided by the invoker :arg context: (dict) runtime context provided by AWS Lambda environment ''' response = request_handler( event=event, context=context, environ='local' if not context else 'aws', ) return response if __name__ == '__main__': lambda_handler( event={ 'data_type': 'named-entity', 'text': 'Lambda is an event-driven, serverless computing platform provided by Amazon as a part of the Amazon Web Services. It is a computing service that runs code in response to events and automatically manages the computing resources required by that code. It was introduced in November 2014. Headquartered in Seattle, USA and leaded by <NAME> (CEO), Amazon Web Services made $17.4 billion in revenue and $4.331 billion in profits in the year of 2017. Source: Wikipedia.org.', # NOQA 'model_name': 'en_core_web_sm', }, context=None, )
1,171
alfred-workflow-py3/extras/benchmarks/03-read-envvars/script.py
kw-lee/alfdaumdict
1
2169454
#!/usr/bin/python # encoding: utf-8 # # Copyright (c) 2016 <NAME> <<EMAIL>> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2016-07-9 # """ """ import sys from workflow import Workflow log = None def main(wf): """Do nothing.""" log.debug("datadir=%r", wf.datadir) if __name__ == "__main__": wf = Workflow() log = wf.logger sys.exit(wf.run(main))
398
keyphrase/dataset/million-paper/preprocess.py
memray/seq2seq-keyphrase
341
2170630
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Load the paper metadata from json, do preprocess (cleanup, tokenization for words and sentences) and export to json ''' import json import re import sys import nltk __author__ = "<NAME>" __email__ = "<EMAIL>" sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') def load_file(input_path): record_dict = {} count = 0 no_keyword_abstract = 0 with open(input_path, 'r') as f: for line in f: # clean the string line = re.sub(r'<.*?>|\s+|“', ' ', line.lower()) # load object record = json.loads(line) # store the name of input sentence text = record['abstract'] text = re.sub('[\t\r\n]', ' ', text) text = record['title'] + ' <EOS> ' + ' <EOS> '.join(sent_detector.tokenize(text)) + ' <EOS>' text = filter(lambda w: len(w)>0, re.split('\W',text)) record['tokens'] = text # store the terms of outputs keyphrases = record['keyword'] record['name'] = [filter(lambda w: len(w)>0, re.split('\W',phrase)) for phrase in keyphrases.split(';')] # filter out the duplicate record_dict[record['title']] = record count += 1 if len(record['keyword'])==0 or len(record['abstract'])==0: no_keyword_abstract += 0 record['filename'] = record['title'] print(record['title']) # print(record['abstract']) # print(record['token']) # print(record['keyword']) # print(record['name']) # print('') print('Total paper = %d' % count) print('Remove duplicate = %d' % len(record_dict)) print('No abstract/keyword = %d' % no_keyword_abstract) return record_dict.values() ''' Two ways to preprocess, -d 0 will export one abstract to one phrase, -d 1 will export one abstract to multiple phrases ''' if __name__ == '__main__': sys.argv = 'dataset/keyphrase/million-paper/all_title_abstract_keyword.json dataset/keyphrase/million-paper/processed_all_title_abstract_keyword_one2many.json 1'.split() if len(sys.argv) < 3: print 'Usage <keyword_input_json_file> <output_file> -d 0|1 \n' \ ' -d: format to output, 0 means one abstract to one keyphrase, 1 means one to many keyphrases' sys.exit(-1) input_file = sys.argv[0] output_file = sys.argv[1] records = load_file(input_file) print(len(records)) with open(output_file, 'w') as out: output_list = [] if sys.argv[2]=='0': for record in records: for keyphrase in record['name']: dict = {} dict['tokens'] = record['tokens'] dict['name'] = keyphrase dict['filename'] = record['filename'] output_list.append(dict) if sys.argv[2]=='1': for record in records: dict = {} dict['tokens'] = record['tokens'] dict['name'] = record['name'] dict['filename'] = record['filename'] output_list.append(dict) print(len(output_list)) out.write(json.dumps(output_list))
3,307
demo/instrument_demo/networking.py
Itaybre/py-ios-device
0
2170669
import json import os import sys sys.path.append(os.getcwd()) import time from _ctypes import Structure from ctypes import c_byte, c_uint16, c_uint32 from ios_device.servers.Instrument import InstrumentServer from ios_device.util import logging log = logging.getLogger(__name__) def networking(rpc): headers = { 0: ['InterfaceIndex', "Name"], 1: ['LocalAddress', 'RemoteAddress', 'InterfaceIndex', 'Pid', 'RecvBufferSize', 'RecvBufferUsed', 'SerialNumber', 'Kind'], 2: ['RxPackets', 'RxBytes', 'TxPackets', 'TxBytes', 'RxDups', 'RxOOO', 'TxRetx', 'MinRTT', 'AvgRTT', 'ConnectionSerial'] } msg_type = { 0: "interface-detection", 1: "connection-detected", 2: "connection-update", } def on_callback_message(res): from socket import inet_ntoa, htons, inet_ntop, AF_INET6 class SockAddr4(Structure): _fields_ = [ ('len', c_byte), ('family', c_byte), ('port', c_uint16), ('addr', c_byte * 4), ('zero', c_byte * 8) ] def __str__(self): return f"{inet_ntoa(self.addr)}:{htons(self.port)}" class SockAddr6(Structure): _fields_ = [ ('len', c_byte), ('family', c_byte), ('port', c_uint16), ('flowinfo', c_uint32), ('addr', c_byte * 16), ('scopeid', c_uint32) ] def __str__(self): return f"[{inet_ntop(AF_INET6, self.addr)}]:{htons(self.port)}" data = res.parsed if data[0] == 1: if len(data[1][0]) == 16: data[1][0] = str(SockAddr4.from_buffer_copy(data[1][0])) data[1][1] = str(SockAddr4.from_buffer_copy(data[1][1])) elif len(data[1][0]) == 28: data[1][0] = str(SockAddr6.from_buffer_copy(data[1][0])) data[1][1] = str(SockAddr6.from_buffer_copy(data[1][1])) print(msg_type[data[0]] + json.dumps(dict(zip(headers[data[0]], data[1])))) # print("[data]", res.parsed) rpc.register_channel_callback("com.apple.instruments.server.services.networking", on_callback_message) var = rpc.call("com.apple.instruments.server.services.networking", "replayLastRecordedSession").parsed log.debug(f"replay {var}") var = rpc.call("com.apple.instruments.server.services.networking", "startMonitoring").parsed log.debug(f"start {var}") time.sleep(10) var = rpc.call("com.apple.instruments.server.services.networking", "stopMonitoring").parsed log.debug(f"stopMonitoring {var}") rpc.stop() if __name__ == '__main__': rpc = InstrumentServer().init() networking(rpc) rpc.stop()
2,825
library/inspect_test.py
creativemindplus/skybison
278
2170686
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com) import inspect import math import unittest class InspectModuleTest(unittest.TestCase): def test_signature_with_function_returns_signature(self): def foo(arg0, arg1): pass result = inspect.signature(foo) self.assertEqual(str(result), "(arg0, arg1)") def test_signature_with_classmethod_returns_signature(self): class C: @classmethod def foo(cls, arg0, arg1): pass result = inspect.signature(C.foo) self.assertEqual(str(result), "(arg0, arg1)") def test_signature_with_dunder_call_returns_signature(self): class C: def __call__(self, arg0, arg1): pass instance = C() result = inspect.signature(instance) self.assertEqual(str(result), "(arg0, arg1)") def test_signature_with_c_function_raises_value_error(self): with self.assertRaises(ValueError): inspect.signature(math.log) def test_getmodule_with_frame_returns_module(self): from types import ModuleType frame = inspect.currentframe() module = inspect.getmodule(frame) self.assertIsInstance(module, ModuleType) self.assertIs(module.__name__, __name__) if __name__ == "__main__": unittest.main()
1,401
configuration/models.py
Parveen3300/Reans
0
2171480
""" Configuration Level Models """ from django.core.validators import MaxValueValidator # import django validators from django.core.validators import MinValueValidator from django.db import models from helper.models import AbstractCreatedByUpdatedBy from helper.models import AbstractDate from helper.models import AbstractMetaTag from helper.models import AbstractStatus ADMIN_MODELS = dict( LANGUAGE='Language Configuration', CURRENCY_MASTER='Currency Master', CONTACT_TYPE_REASON='Contact/Inquiry Type Reasons', RATING_PARAMETER='Rating Parameters', CANCELLATION_REASON='Cancellation Reasons', BUSINESS_TYPE='Business Type', PARAMETERS_SETTINGS='Parameter Setting', UNIT=' Unit Of Measurement' ) class Language( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """Language configurations model table with abstract all needfull models """ language_code = models.CharField(max_length=70, unique=True) language_name = models.CharField(max_length=70, verbose_name='Language', unique=True) class Meta: verbose_name_plural = verbose_name = ADMIN_MODELS['LANGUAGE'] db_table = 'language_configuration' def __str__(self): return self.language_code class CurrencyMaster( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """currency master model table with abstract all needfull models """ currency = models.CharField(max_length=30, unique=True, verbose_name='Currency Name') symbol = models.CharField(max_length=30) code_iso = models.CharField( max_length=30, null=True, blank=True, verbose_name='Code ISO') hex_symbol = models.CharField(max_length=30, null=True, blank=True, verbose_name='Hex Code') class Meta: verbose_name = ADMIN_MODELS['CURRENCY_MASTER'] verbose_name_plural = ADMIN_MODELS['CURRENCY_MASTER'] db_table = 'currency_master' def __str__(self): return self.currency class ContactTypesReasons( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """ Contact Types Reasons model table with abstract all needfull models """ contact_reasons = models.CharField(max_length=100, unique=True, verbose_name='contact type Reasons') class Meta: verbose_name = ADMIN_MODELS['CONTACT_TYPE_REASON'] verbose_name_plural = ADMIN_MODELS['CONTACT_TYPE_REASON'] db_table = 'contact_types_reasons' def __str__(self): return str(self.contact_reasons) class RatingParameter( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """ rating parameters for candidate and employer with abstract all needfull models """ rating_parameter = models.CharField( max_length=100, unique=True, verbose_name='Rating Parameter Name' ) rating_parameter_value = models.IntegerField( null=True, unique=True, validators=[ MaxValueValidator(5), MinValueValidator(1) ], verbose_name='Rating Points' ) class Meta: verbose_name = ADMIN_MODELS['RATING_PARAMETER'] verbose_name_plural = ADMIN_MODELS['RATING_PARAMETER'] db_table = "rating_parameter_configuration" def __str__(self): """ :return: rating parameter name """ return str(self.rating_parameter) class CancellationReason( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """ list of all cancellation reasons with abstract all needfull models """ cancel_reason_name = models.CharField(max_length=100, verbose_name='Cancellation Reason') cancel_reason_for = models.CharField(max_length=255) cancel_reason_details = models.CharField(max_length=255, blank=True, null=True, verbose_name='Description') class Meta: verbose_name = ADMIN_MODELS['CANCELLATION_REASON'] verbose_name_plural = ADMIN_MODELS['CANCELLATION_REASON'] db_table = 'cancellation_reason_configuration' def __str__(self): return self.cancel_reason_name class BusinessType( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """ BusinessType Model Table """ business_type = models.CharField(max_length=50, unique=True) description = models.CharField(max_length=200, null=True, blank=True) class Meta: verbose_name = ADMIN_MODELS['BUSINESS_TYPE'] verbose_name_plural = ADMIN_MODELS['BUSINESS_TYPE'] ordering = ['business_type'] db_table = 'business_type_manager' def __str__(self): return self.business_type class ParameterSetting( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): prefix = models.CharField(max_length=4, verbose_name='Inquiry Prefix') def __str__(self): return self.prefix class Meta: verbose_name = ADMIN_MODELS['PARAMETERS_SETTINGS'] verbose_name_plural = ADMIN_MODELS['PARAMETERS_SETTINGS'] db_table = 'parameter_settings' class UnitOfMeasurement( AbstractCreatedByUpdatedBy, AbstractDate, AbstractStatus, AbstractMetaTag ): """ UnitOfMeasurement """ unit_measurement = models.CharField(max_length=30, unique=True) short_form = models.CharField(max_length=10) description = models.CharField(max_length=200, blank=True, null=True) class Meta: verbose_name = ADMIN_MODELS['UNIT'] verbose_name_plural = ADMIN_MODELS['UNIT'] db_table = 'unit_of_measurement' def __str__(self): return self.unit_measurement
6,054
examples/submit.py
ltalirz/aiida-gudhi
0
2170803
# -*- coding: utf-8 -*- """Submit a test calculation on localhost. Usage: verdi run submit.py Note: This script assumes you have set up computer and code as in README.md. """ from aiida_gudhi.tests import get_code, TEST_DIR import os code = get_code(entry_point='gudhi.rdm') # set up calculation calc = code.new_calc() calc.label = "compute rips from distance matrix" calc.set_max_wallclock_seconds(1 * 60) calc.set_withmpi(False) calc.set_resources({"num_machines": 1, "num_mpiprocs_per_machine": 1}) # Prepare input parameters from aiida.orm import DataFactory Parameters = DataFactory('gudhi.rdm') parameters = Parameters(dict={'max-edge-length': 4.2}) calc.use_parameters(parameters) SinglefileData = DataFactory('singlefile') distance_matrix = SinglefileData( file=os.path.join(TEST_DIR, 'sample_distance.matrix')) calc.use_distance_matrix(distance_matrix) calc.store_all() calc.submit() #calc.submit_test(folder=gt.get_temp_folder())
951
15_Day_Python_type_errors/type errors.py
diegofregolente/30-Days-Of-Python
0
2171315
# print 'hello world' Missing parentheses in call to 'print'. Did you mean print('hello world')? - Syntax Error try: print(a) except Exception as error: # a is not defined yet print(error, type(error)) numbers = [1, 2, 3] try: print(numbers[3]) # numbers as no index 3 because 0 1 2 except Exception as error: print(error, type(error)) try: import maths # maths doesn't exist except Exception as error: print(error, type(error)) try: import math print(math.PI) # the right way to verify "pi" its with math.pi (lower_case) except Exception as error: print(error, type(error)) users = {'name':'Diego', 'age':24, 'country':'Brazil'} try: print(users['county']) # thats a mistake, you forgot 'R' in word country except Exception as error: print(error, type(error)) try: 3 + '4' # Impossible to concatenated or sum INT with STR values except Exception as error: print(error, type(error)) try: 3 + int('4') # Now have no error, because we convert str('4') for int('4') except Exception as error: print(error, type(error)) try: print(int('Tres')) # cannot convert because letters cannot convert to int with base 10: except Exception as error: print(error, type(error)) try: from math import power # this will not work because in math power receive name of pow except Exception as error: print(error, type(error)) try: 1 / 0 # Its impossible to divide a number by zero except Exception as error: print(error, type(error))
1,508
tests/tango.py
hao1032/adbui
136
2171110
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2020/3/29 3:58 上午 # @Author : tangonian # @Site : # @File : tango.py # @Software: PyCharm import subprocess from lxml import etree from adbui import Device from adbui import Util import logging logging.basicConfig(level=logging.DEBUG) d = Device() ui = d.get_uis_by_ocr('') print(ui)
353
setup.py
flynneva/usgs-topo-tiler
63
2171371
"""Setup for usgs-topo-tiler.""" from setuptools import find_packages, setup with open("README.md") as f: readme = f.read() # Runtime requirements. inst_reqs = ["numpy", "rasterio", "rio-tiler>=2.0a6"] extra_reqs = { "cli": [ "boto3", "click", "cogeo_mosaic", "geopandas", "mercantile", "pandas", "python-dateutil", "requests", "shapely"]} setup( name="usgs-topo-tiler", version="0.2.0", description="rio-tiler plugin to read mercator tiles from USGS Topo Quads", long_description=readme, long_description_content_type="text/markdown", classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: GIS"], keywords="COG cogeo usgs topo raster map tiler gdal rasterio", author="<NAME>", author_email="<EMAIL>", url="https://github.com/kylebarron/usgs-topo-tiler", license="MIT", entry_points={ 'console_scripts': ['usgs-topo-tiler=usgs_topo_tiler.cli:main', ], }, packages=find_packages( exclude=["ez_setup", "scripts", "examples", "tests"]), include_package_data=True, zip_safe=False, install_requires=inst_reqs, extras_require=extra_reqs, )
1,451
CSVReader/csv_reader.py
kelleyjean/SenseHat_ETL
0
2170657
import csv class CsvReader: def __init__(self, filepath): self.data = [] with open(filepath as text_data: csv_data = csv.Reader(text_data, delimiter=',') for row in csv_data: self.data.append
209
2016/day6/day6.py
naitmare01/Adventofcode
0
2169224
#!/usr/bin/python3 # -*- coding: utf-8 -*- import argparse def arguments(): # Handle command line arguments parser = argparse.ArgumentParser(description='Adventofcode.') parser.add_argument('-f', '--file', required=True) args = parser.parse_args() return args def char_frequency(str1): dict = {} for n in str1: keys = dict.keys() if n in keys: dict[n] += 1 else: dict[n] = 1 return dict def listToString(s): # initialize an empty string str1 = "" # traverse in the string for ele in s: str1 += ele # return string return str1 def main(): args = arguments() with open(args.file) as file: input_file = file.read().strip() input_file = input_file.splitlines() part1 = "" part2 = "" new_result = list(map(list, zip(*input_file))) for row in new_result: most_common = char_frequency(listToString(row)) most_common = {k: v for k, v in sorted(most_common.items(), key=lambda item: item[1], reverse=True)} most_common_part2 = {k: v for k, v in sorted(most_common.items(), key=lambda item: item[1])} dict_pairs = most_common.items() pairs_iterator = iter(dict_pairs) first_pair = next(pairs_iterator) part1 += first_pair[0] dict_pairs_part2 = most_common_part2.items() pairs_iterator_part2 = iter(dict_pairs_part2) first_pair_part2 = next(pairs_iterator_part2) part2 += first_pair_part2[0] print(part1) print(part2) if __name__ == '__main__': main()
1,613
data/test/python/aea407d10a62408c301dd07e94c1180690d80ad5handlers.py
harshp8l/deep-learning-lang-detection
84
2170856
# -*- coding: utf-8 -*- from django.db.models import get_model from django.conf import settings from transifex.resources.signals import post_save_translation from transifex.addons.lotte.signals import lotte_save_translation def save_copyrights(sender, **kwargs): """ Save copyright info for po files. """ resource = kwargs['resource'] language = kwargs['language'] if resource.i18n_type != 'PO': return copyrights = kwargs['copyrights'] CModel = get_model('copyright', 'Copyright') for c in copyrights: owner = c[0] years = c[1] for year in years: CModel.objects.assign( resource=resource, language=language, owner=owner, year=year ) def connect(): post_save_translation.connect(save_copyrights) lotte_save_translation.connect(save_copyrights)
881
example_patch.py
MadPUG/Introduction-to-Testing-in-Python
1
2171374
import requests class GitHubUser(object): url = 'https://api.github.com/users/{0}' def __init__(self, username): self.username = username self.url = self.url.format(username) def fetch(self): r = requests.get(self.url) return r.json() def short_info(self): user = self.fetch() return """------------------ Username: {0} Name: {1} Link: {2} """.format(user['login'], user['name'], user['html_url'])
486
mqtt-rpc-python/tests/logging_tests/main.py
mastash3ff/MQTT-RPC
5
2171043
import logging import logging.config # load my module import my_module # load the logging configuration logging.config.fileConfig('logging.ini') my_module.foo() bar = my_module.Bar() bar.bar()
196
ML_model.py
MichelKu/Stress_Hospital_Staff
0
2170888
# Efter att tidtagit de olika modellerna föll valet på Random forest. import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler import sqlite3 sc = StandardScaler() classifier11 = RandomForestClassifier(n_estimators = 150, random_state = 0) #classifier12 = RandomForestClassifier(n_estimators = 150, random_state = 0) def train_col11(): con = sqlite3.connect("StressDatabase.db") dataset = pd.read_sql_query("SELECT * FROM stress", con) X = dataset.iloc[:, [0,1,2,4,5,7,8,9]].values # change columns here to find out effect. y = dataset.iloc[:, 10].values # put the dependent varible here. X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 0) X_train = sc.fit_transform(X_train) classifier11.fit(X_train, y_train) def predict_col11(age, gender, Specialization, workHours, patientPerDay, overtimeWorkInterest, overtimeWorkPaid, sector): y_pred = classifier11.predict(sc.transform([[age, gender, sector, Specialization, workHours, patientPerDay, overtimeWorkInterest, overtimeWorkPaid]])) #print('from inside ML_model col11: ', y_pred) y_pred = int(y_pred) return { "col11_predict": y_pred } # def train_col12(): # con = sqlite3.connect("StressDatabase.db") # dataset = pd.read_sql_query("SELECT * FROM stress", con) # X = dataset.iloc[:, [0,1,2,4,5,7,8,9]].values # change columns here to find out effect. # y = dataset.iloc[:, 11].values # put the dependent varible here. # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 0) # X_train = sc.fit_transform(X_train) # classifier12.fit(X_train, y_train) # def predict_col12(age, gender, Specialization, workHours, patientPerDay, overtimeWorkInterest, overtimeWorkPaid, sector): # y_pred = classifier12.predict(sc.transform([[age, gender, sector, Specialization, workHours, patientPerDay, overtimeWorkInterest, overtimeWorkPaid]])) # #print('from inside ML_model col12: ', y_pred) # y_pred = int(y_pred) # return{"col12-predict": y_pred} #train_col11() #train_col12() #predict_col11(2,1,6,1,1,1) # predict_col12(2,2,2,2,2,1)
2,268
ui.py
dansjack/airsana
1
2171410
from pathlib import Path from util.profile import * def main_loop(file): """ :param file: path of file containing a list of profiles :return: dict. The profile selected by the user """ while True: print('Commands:\n1. Run script with existing profile\n2. Add/update/' 'delete profile\n3. Run script with test profile') run_existing = input( 'Enter a command by digit: ').lower() if run_existing[0] == '1': # Run with existing profile names = get_profile_names(file) if not names: print('No profiles found. Add a profile to profiles.json ' 'and re-start the program.') break else: print('Enter the name of one of the profiles listed') print_profile_names(names) profile_name = input('profile name: ') if profile_name not in names: print('That profile name does not exist. Returning to ' 'previous menu...') else: return get_profile(profile_name, file) elif run_existing[0] == '2': # Make new profile print(f'\nUNDER CONSTRUCTION - Currently, the fastest way to ' f'edit a profile is to manipulate it directly within' f' /profiles.json\n') # make_profile(file) # print('New profile created') elif run_existing[0] == '3': # Run with example profile print('Running script with test profile...') return get_profile('TEST PROFILE', (Path(__file__).parent / "./profile_example.json").resolve()) elif run_existing[0] == 'q': break else: print("Did not understand command. Please enter 'y', 'n', " "or 'q'...")
1,915
me/storage/postgresql/__init__.py
me-env/me.
2
2171214
from me.logger import MeLogger, DEBUG from me.storage.i_database import IDatabase from me.storage.postgresql.tables import type_to_table, type_to_schema, psql_to_python import os import yaml from sqlalchemy import create_engine class PostgreSQL(IDatabase): def __init__(self): self.log = MeLogger(name=__name__, level=DEBUG) self.engine = self.connect() self.auto_values = ['id', 'timestamp'] # try: TODO self.con = self.engine.raw_connection() self.log.debug('con', self.con) # self.con.cursor().execute("SET SCHEMA '{}'".format('me_banking')) # except: # pass def isConnectionValid(self): return bool(self.engine) and bool(self.con) def connect(self): try: engine = self.__getConnectionFromProfile() self.log.info("Connected to PostgreSQL database!") except IOError as e: self.log.error("Failed to get database connection!", e) return None return engine def addRow(self, data_type, data): """ Add data 'data' in database (should be one row) # TODO make this generic # TODO check if the type are correctly formatted to be push """ self.con.cursor().execute(""" INSERT INTO me_banking.transactions (transaction_id, transaction_name, account_id, account_name, amount, type, tx_date, refunded, double_checked, payment) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (transaction_id, account_id) DO NOTHING; """, data) self.con.commit() def addMultipleRows(self, data_type, data): """ Add data 'data' in database (should be one a list of rows) # TODO make this generic # TODO check if the type are correctly formatted to be push """ self.log.debug("add multiple rows in psql data:", data) data = [list(row.values()) for row in data] self.con.cursor().executemany(""" INSERT INTO me_banking.transactions (transaction_id, transaction_name, account_id, account_name, amount, type, tx_date, refunded, double_checked, payment) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (transaction_id, account_id) DO NOTHING;; """, data) self.con.commit() def getAllRows(self, data_type): """ Add data 'data' in database (should be one a list of rows) # TODO make this generic # TODO check if the type are correctly formatted to be push """ self.log.debug("get All rows in psql", __name__) cur = self.con.cursor() cur.execute(""" SELECT * FROM me_banking.transactions ; """) data = cur.fetchall() self.log.debug("retrieved data", data) return data def reset(self): """ Reset database content """ self.con.cursor().execute(""" TRUNCATE me_banking.transactions """) self.con.commit() def getScheme(self, data_type, include_auto_fields=False): """ Get Scheme to respect for data type 'data_type' :param data_type :param include_auto_fields: if True then id fields and timestamp fields are included in scheme (please note that these values are supposed to be filled automatically by postgres) """ self.log.debug('datatype value', data_type.value) cur = self.con.cursor() cur.execute(f""" select column_name, data_type from information_schema.columns where table_schema = '{type_to_schema[data_type.value]}' AND table_name = '{type_to_table[data_type.value]}' ORDER BY ordinal_position; """) res = cur.fetchall() check = {i[0]: psql_to_python(i[1], 'check') for i in res if i[0] not in self.auto_values or include_auto_fields} convert = {i[0]: psql_to_python(i[1], 'convert') for i in res if i[0] not in self.auto_values or include_auto_fields} self.log.debug('return check', check) self.log.debug('return convert', convert) return check, convert def __getConnectionFromProfile(self, config_file_name="default_profile.yaml"): """ Sets up database connection from config file. Input: config_file_name: File containing PGHOST, PGUSER, PGPASSWORD, PGDATABASE, PGPORT, which are the credentials for the PostgreSQL database """ with open(config_file_name, 'r') as f: config = yaml.load(f, Loader=yaml.FullLoader) if not ('PGHOST' in config.keys() and 'PGUSER' in config.keys() and 'PGPASSWORD' in config.keys() and 'PGDATABASE' in config.keys() and 'PGPORT' in config.keys()): raise Exception('Bad config file: ' + config_file_name) return self.__getEngine(config['PGDATABASE'], config['PGUSER'], config['PGHOST'], config['PGPORT'], config['PGPASSWORD']) def __getEngine(self, db, user, host, port, passwd): """ Get SQLalchemy engine using credentials. Input: db: database name user: Username host: Hostname of the database server port: Port number passwd: <PASSWORD> """ url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format( user=user, passwd=passwd, host=host, port=port, db=db) self.log.info('Connect to psql with url', url) engine = create_engine(url, pool_size=50) return engine def addTestRow(self): self.con.cursor().execute(""" INSERT INTO me_banking.transactions (transaction_id, transaction_name, account_id, account_name, amount, type, tx_date, timestamp, refunded, double_checked, payment) VALUES ('0001', %s, '00001', 'acc_test', 1.5, '{"type1", "type2"}', '1996-12-02', '1996-12-02', True, False, True); """, ('test',)) self.con.commit() if __name__ == '__main__': db = PostgreSQL() db.addTestRow()
6,311
parquet-to-csv.py
delftdata/data-preparation-tools
0
2171255
import pandas as pd import sys dest_folder = sys.argv[1] filename = sys.argv[2] df = pd.read_parquet(filename + '.parquet') df.to_csv(dest_folder + "/" + filename + '.csv')
175
ACME/utility/istorch.py
mauriziokovacic/ACME
3
2171568
import torch from .istype import * def istorch(*obj): """ Returns whether or not the input is a PyTorch tensor Parameters ---------- *obj : object... a sequence of objects Returns ------- bool True if the inputs are PyTorch Tensors, False otherwise """ return istype(torch.Tensor, *obj)
348
psychic/plots.py
breuderink/psychic
3
2171222
import matplotlib.pyplot as plt import numpy as np import scalpplot from scalpplot import plot_scalp from positions import POS_10_5 from scipy import signal def plot_timeseries(frames, time=None, offset=None, color='k', linestyle='-'): frames = np.asarray(frames) if offset == None: offset = np.max(np.std(frames, axis=0)) * 3 if time == None: time = np.arange(frames.shape[0]) plt.plot(time, frames - np.mean(frames, axis=0) + np.arange(frames.shape[1]) * offset, color=color, ls=linestyle) def plot_scalpgrid(scalps, sensors, locs=POS_10_5, width=None, clim=None, cmap=None, titles=None): ''' Plots a grid with scalpplots. Scalps contains the different scalps in the rows, sensors contains the names for the columns of scalps, locs is a dict that maps the sensor-names to locations. Width determines the width of the grid that contains the plots. Cmap selects a colormap, for example plt.cm.RdBu_r is very useful for AUC-ROC plots. Clim is a list containing the minimim and maximum value mapped to a color. Titles is an optional list with titles for each subplot. Returns a list with subplots for further manipulation. ''' scalps = np.asarray(scalps) assert scalps.ndim == 2 nscalps = scalps.shape[0] subplots = [] if not width: width = int(min(8, np.ceil(np.sqrt(nscalps)))) height = int(np.ceil(nscalps/float(width))) if not clim: clim = [np.min(scalps), np.max(scalps)] plt.clf() for i in range(nscalps): subplots.append(plt.subplot(height, width, i + 1)) plot_scalp(scalps[i], sensors, locs, clim=clim, cmap=cmap) if titles: plt.title(titles[i]) # plot colorbar next to last scalp bb = plt.gca().get_position() plt.colorbar(cax=plt.axes([bb.xmax + bb.width/10, bb.ymin, bb.width/10, bb.height]), ticks=np.linspace(clim[0], clim[1], 5).round(2)) return subplots
1,878
fastapi-master-api/app/repository/models/users.py
SionAbes/fullstack-porfolio
1
2171306
from sqlalchemy import Boolean, Column, DateTime, Integer, String from .base import BaseModel, TimesMixin class User(TimesMixin, BaseModel): __tablename__ = "users" id = Column( Integer, primary_key=True, ) last_login_at = Column(DateTime(True)) is_superuser = Column(Boolean, nullable=False) first_name = Column(String(150), nullable=True) last_name = Column(String(150), nullable=True) email = Column(String, index=True, nullable=False) password = Column(String(128), nullable=False)
542
fingan/models/wgan_gp.py
Droyd97/fingan
0
2170866
import os import logging import torch from torch import nn from torch.nn.utils import spectral_norm from torch.utils.data import DataLoader from torch.autograd import grad as torch_grad from torch.autograd import Variable import torch.optim as optim from tqdm import tqdm from fingan.models.model import Model class WGAN_GP(Model): """ WGAN """ def __init__(self, wkdir=None, is_logging=False, workers=2, batch_size=128, num_epochs=200, save_point=50, ngpu=0, n_critic=5, lr=0.0002, beta1=0.5): super().__init__('WGAN_GP', wkdir, is_logging, batch_size, workers, num_epochs, save_point, ngpu) # Set device to run on self.device = torch.device("cuda:0" if (torch.cuda.is_available() and self.ngpu > 0) else "cpu") # Number of critic iterations per iteration of the generator self.n_critic = n_critic # Learning rate for optimizer self.lr = lr # Beta1 hyperparameter for Adam optimizers self.beta1 = 0.5 # Create Generator self.net_g = self.Generator(self.ngpu).to(self.device) # Create Critic self.net_c = self.Critic(self.ngpu).to(self.device) # Noise length self.noiseLength = 50 # Track Losses self.losses = {'g': torch.zeros(num_epochs), 'c': torch.zeros(num_epochs), 'gp': [], 'gradNorm': [], 'iter': 0} # Gradient penalty weight self.gp_weight = 0.001 # optimisers self.optimiserC = optim.Adam(self.net_c.parameters(), lr=self.lr, betas=(self.beta1, 0.999)) self.optimiserG = optim.Adam(self.net_g.parameters(), lr=self.lr, betas=(self.beta1, 0.999)) class Generator(nn.Module): def __init__(self, ngpu): super().__init__() self.ngpu = ngpu self.genArchitecture = nn.Sequential( nn.Linear(50, 100), nn.LeakyReLU(0.2, inplace=True), AddDimension(), spectral_norm(nn.Conv1d(1, 32, 3, padding=1), n_power_iterations=10), nn.Upsample(200), spectral_norm(nn.Conv1d(32, 32, 3, padding=1), n_power_iterations=10), nn.LeakyReLU(0.2, inplace=True), nn.Upsample(400), spectral_norm(nn.Conv1d(32, 32, 3, padding=1), n_power_iterations=10), nn.LeakyReLU(0.2, inplace=True), nn.Upsample(800), spectral_norm(nn.Conv1d(32, 1, 3, padding=1), n_power_iterations=10), nn.LeakyReLU(0.2, inplace=True), SqueezeDimension(), nn.Linear(800, 100) ) def forward(self, input): return self.genArchitecture(input) class Critic(nn.Module): def __init__(self, ngpu): super().__init__() self.ngpu = ngpu self.criticArchitecture = nn.Sequential( AddDimension(), spectral_norm(nn.Conv1d(1, 32, 3, padding=1), n_power_iterations=10), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool1d(2), spectral_norm(nn.Conv1d(32, 32, 3, padding=1), n_power_iterations=10), nn.LeakyReLU(0.2, inplace=True), nn.MaxPool1d(2), spectral_norm(nn.Conv1d(32, 32, 3, padding=1), n_power_iterations=10), nn.LeakyReLU(0.2, inplace=True), nn.Flatten(), nn.Linear(800, 50), nn.LeakyReLU(0.2, inplace=True), nn.Linear(50, 15), nn.LeakyReLU(0.2, inplace=True), nn.Linear(15, 1) ) def forward(self, input): return self.criticArchitecture(input) def train(self, dataset, name=None): # Set working directory to current one if not provided if self.wkdir is None: self.wkdir = os.getcwd() # Set file path if name is None: file_path = self.wkdir + self.name + "-save.pt" else: file_path = self.wkdir + name + "-save.pt" start_epoch = 0 if os.path.isfile(file_path): epoch = self.load(file_path) start_epoch = epoch - 1 print("Loaded saved model") dataloader = DataLoader(dataset, self.batch_size) print("Starting Training Loop...") for epoch in tqdm(range(start_epoch, self.num_epochs)): for i, data in enumerate(dataloader, 0): # Critic b_size = data.size()[0] cReal = self.net_c(data) noise = torch.randn(b_size, self.noiseLength, device=self.device) cGenerated = self.net_c(self.net_g(noise)) gp = self.gradient_penalty(data, self.net_g(noise)) # Get loss self.optimiserC.zero_grad() d_loss = (cGenerated.mean() - cReal.mean()) + gp d_loss.backward() self.optimiserC.step() if i % self.n_critic == 0: # Generator self.optimiserG.zero_grad() noise = torch.randn(b_size, self.noiseLength, device=self.device) gGenerated = self.net_c(self.net_g(noise)) g_loss = - gGenerated.mean() g_loss.backward() self.optimiserG.step() self.losses['c'][epoch] = d_loss.data.item() self.losses['g'][epoch] = g_loss.data.item() if self.is_logging: logging.info("Critic loss: {}".format(self.losses['c'][epoch])) logging.info("Generator loss: {}".format(self.losses['g'][epoch])) if (epoch + 1) % self.save_point == 0: self.save(epoch + 1, path=file_path) def generate(self, number): noise = torch.randn(number, self.noiseLength, device=self.device) gGenerated = self.net_g(noise) return gGenerated def save(self, epoch, path=None): if path is None: path = self.wkdir torch.save({ 'epoch': epoch, 'critic_state_dict': self.net_c.state_dict(), 'generator_state_dict': self.net_g.state_dict(), 'critic_optimizer_state_dict': self.optimiserC.state_dict(), 'generator_optimizer_state_dict': self.optimiserG.state_dict(), # 'loss': loss, }, path) def load(self, file_path): checkpoint = torch.load(file_path) self.net_c.load_state_dict(checkpoint['critic_state_dict']) self.net_g.load_state_dict(checkpoint['generator_state_dict']) self.optimiserC.load_state_dict(checkpoint['critic_optimizer_state_dict']) self.optimiserG.load_state_dict(checkpoint['generator_optimizer_state_dict']) epoch = checkpoint['epoch'] return epoch # loss = checkpoint['loss'] def summary(self): pass def wasserstein_loss(self): pass def gradient_penalty(self, real_data, generated_data): b_size = real_data.size()[0] alpha = torch.rand(b_size, 1).expand_as(real_data) interpolated = Variable(alpha * real_data.data + (1 - alpha) * generated_data.data, requires_grad=True) prob_interploated = self.net_c(interpolated) gradients = torch_grad(outputs=prob_interploated, inputs=interpolated, grad_outputs=torch.ones(prob_interploated.size()), create_graph=True, retain_graph=True)[0] gradients = gradients.view(b_size, -1) self.losses['gradNorm'].append(gradients.norm(2, dim=1).mean().data.item()) gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12) return self.gp_weight * ((gradients_norm - 1) ** 2).mean() class AddDimension(nn.Module): """ AddDimension """ def forward(self, x): return x.unsqueeze(1) class SqueezeDimension(nn.Module): """ Squeeze Dimension """ def forward(self, x): return x.squeeze(1)
8,120
test/test_alpha_tree.py
tgale96/memory-address-trace-tools
1
2170039
import pytest import numpy as np from lib.AlphaTree import AlphaTree def test_process_access(): """ tests AlphaTree::ProcessAccess function with sequence of memory accesses""" # init default AlphaTree a = AlphaTree() reuseDist = 0 memAddr = 0b111111111 a.ProcessAccess(memAddr, reuseDist) # count should be all 0's assert np.array_equal(a.reuseCount[reuseDist], np.zeros((7, 2), dtype = np.float)) a.ProcessAccess(memAddr, reuseDist) sol = np.zeros((7, 2), dtype = np.float) sol[:, 1] = 1.0 # should have every level reused assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b111110000 a.ProcessAccess(memAddr, reuseDist) # top 5 reused, then 1 non-reuse & 1 nothing sol[2:7, 1] += 1 sol[1, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b000000000 a.ProcessAccess(memAddr, reuseDist) # should change except for first layer non-reuse sol[6, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b000000100 a.ProcessAccess(memAddr, reuseDist) # 6-1 reuse and 0 non-reuse sol[1:7, 1] += 1 sol[0, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b111000000 a.ProcessAccess(memAddr, reuseDist) # 6th non-reuse, 5-4 reuse, 3rd non-reuse, 2-0 nothing sol[6, 0] += 1 sol[4:6, 1] += 1 sol[3, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) def test_get_family(): """ tests node family functions for AlphaTree""" # init AlphaTree a = AlphaTree() # start with root nodeID = 0 # check left child ID assert 1 == a.GetChild(nodeID, 0) # check right child ID assert 2 == a.GetChild(nodeID, 1) nodeID = 1 # check parent is root assert 0 == a.GetParent(nodeID) # check sibling is 2 assert 2 == a.GetSibling(nodeID) # check left child assert 3 == a.GetChild(nodeID, 0) # check right child assert 4 == a.GetChild(nodeID, 1) nodeID = 3 # check left child assert 7 == a.GetChild(nodeID, 0) # check right child assert 8 == a.GetChild(nodeID, 1) # check parent assert 1 == a.GetParent(nodeID) # check sibling is 4 assert 4 == a.GetSibling(nodeID) def test_tree_level(): """ tests AlphaTree::GetTreeLevel function""" # init AlphaTree() a = AlphaTree() block = 512 # check level is top level assert 6 == a.GetTreeLevel(block) block = 8 assert 0 == a.GetTreeLevel(block) block = 32 assert 2 == a.GetTreeLevel(block) def test_larger_root_size(): """ tests AlphaTree::ProcessAccess with rootSize = 1024""" # init AlphaTree a = AlphaTree(1024) reuseDist = 0 memAddr = 0b1111111111 a.ProcessAccess(memAddr, reuseDist) sol = np.zeros((8,2), dtype = np.float) assert np.array_equal(a.reuseCount[reuseDist], sol) a.ProcessAccess(memAddr, reuseDist) # all reused sol[:, 1] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b0000000000 a.ProcessAccess(memAddr, reuseDist) # one non-reuse at top sol[7, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b1111100000 a.ProcessAccess(memAddr, reuseDist) # 7 non-reuse, 3-6 reuse, 2 non-reuse, else nothing sol[7, 0] += 1 sol[3:7, 1] += 1 sol[2, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) def test_smaller_root_size(): """ tests AlphaTree::ProcessAccess with rootSize = 256""" # init AlphaTree a = AlphaTree(256) reuseDist = 0 memAddr = 0b11111111 a.ProcessAccess(memAddr, reuseDist) sol = np.zeros((6, 2), dtype = np.float) assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b00000000 a.ProcessAccess(memAddr, reuseDist) sol[5, 0] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) memAddr = 0b11110000 a.ProcessAccess(memAddr, reuseDist) # 5 non-reuse, 2-4 reuse, 1 non-reuse sol[5, 0] += 1 sol[1,0] += 1 sol[2:5, 1] += 1 assert np.array_equal(a.reuseCount[reuseDist], sol) def test_load_alphas(): """ tests AlphaTree::LoadAlphas to ensure values are stored correctly""" # init AlphaTree a = AlphaTree() test_alphas = np.zeros((3, 7, 2), dtype = np.float) test_alphas[:, :, 1] = np.arange(7) / 7.0 test_alphas[:, :, 0] = 1 - (np.arange(7) / 7.0) a.LoadAlphas(test_alphas) assert np.array_equal(test_alphas, a.reuseCount) def test_generate_access(): """ test AlphaTree::GenerateAccess to ensure accesses are tracked correctly and ouput is generated correctly based on the path take when traversing the tree from root to leaf""" # init AlphaTree a = AlphaTree() reuseDist = 0 # load alpha values test_alphas = np.zeros((3, 7, 2), dtype = np.float) test_alphas[:, :, 1] = np.arange(7) / 7.0 test_alphas[:, :, 0] = 1 - (np.arange(7) / 7.0) a.LoadAlphas(test_alphas) # test first access test = a.GenerateAccess(reuseDist) # traverse tree to find theoretical output node = 0 block = 512 results = 0 while a.GetRightChild(node) < len(a.tree): if a.tree[a.GetLeftChild(node)]: node = a.GetLeftChild(node) else: node = a.GetRightChild(node) results = results | (block >> 1) block = block >> 1 # compare results assert test == results # test second access test = a.GenerateAccess(reuseDist) # traverse tree to find theoretical output node = 0 block = 512 results = 0 while a.GetRightChild(node) < len(a.tree): if a.tree[a.GetLeftChild(node)]: node = a.GetLeftChild(node) else: node = a.GetRightChild(node) results = results | (block >> 1) block = block >> 1 # compare results assert test == results # load different distribution test_alphas = np.ones((3, 7, 2), dtype = np.float) test_alphas /= 2.0 a.LoadAlphas(test_alphas) # test 3rd access test = a.GenerateAccess(reuseDist) # traverse tree to find theoretical output node = 0 block = 512 results = 0 while a.GetRightChild(node) < len(a.tree): if a.tree[a.GetLeftChild(node)]: node = a.GetLeftChild(node) else: node = a.GetRightChild(node) results = results | (block >> 1) block = block >> 1 # compare results assert test == results def test_different_reuse_distances(): """ tests the calculation of alpha values using different reuse distances""" # init default AlphaTree a = AlphaTree() reuseDist = 0 memAddr = 0b111111111 a.ProcessAccess(memAddr, reuseDist) sol = np.zeros((3, 7, 2), dtype = np.float) # count should be all 0's assert np.array_equal(a.reuseCount, sol) reuseDist = 1 a.ProcessAccess(memAddr, reuseDist) sol[reuseDist, :, 1] += 1 assert np.array_equal(a.reuseCount, sol) reuseDist = 2 a.ProcessAccess(memAddr, reuseDist) sol[reuseDist, :, 1] += 1 assert np.array_equal(a.reuseCount, sol) reuseDist = 0 memAddr = 0b011111111 a.ProcessAccess(memAddr, reuseDist) # non-reuse for first node, no previous history for rest sol[reuseDist, 6, 0] += 1 assert np.array_equal(a.reuseCount, sol) def test_different_num_bins(): """ tests an alphaTree with a different number of reuse distance bins""" # init AlphaTree a = AlphaTree(bins = 7) reuseDist = 0 memAddr = 0b111111111 a.ProcessAccess(memAddr, reuseDist) sol = np.zeros((7, 7, 2), dtype = np.float) # count should be all 0's assert np.array_equal(a.reuseCount, sol) reuseDist = 20 a.ProcessAccess(memAddr, reuseDist) # all reused for rd >= 6 sol[6, :, 1] += 1 # count should be all 0's assert np.array_equal(a.reuseCount, sol) reuseDist = 4 a.ProcessAccess(memAddr, reuseDist) # all reused for rd >= 6 sol[4, :, 1] += 1 # count should be all 0's assert np.array_equal(a.reuseCount, sol) def test_normalize_reuse_count(): """ tests AlphaTree.NormalizeReuseCount to verify correct noralization""" # init alphaTree a = AlphaTree() values = np.ones((3, 7, 2), dtype = np.float) a.reuseCount = values a.NormalizeReuseCount() sol = np.zeros((3, 7, 2), dtype = np.float) sol[:,:,:] = .5 assert np.array_equal(a.reuseCount, sol) values = np.zeros((3, 7, 2), dtype = np.float) a.reuseCount = values a.NormalizeReuseCount() sol = np.zeros((3, 7, 2), dtype = np.float) sol[:, :, 1] = 1.0 assert np.array_equal(a.reuseCount, sol)
9,393
generate_sample.py
Tak-jae-ho/RGBD-GAN-pytorch
0
2171270
import argparse import os import sys from PIL import Image import numpy as np import torch import matplotlib.pyplot as plt import torch.nn as nn from networks import PGGANGenerator from util.save_results import generate_sample_rgbd, convert_batch_images_rgbd, save_batch_sample_rgbd def make_z(batch_size, z_size, device=torch.device('cuda')): z = torch.normal(0, 1, size=(batch_size, z_size, 1, 1)) z /= torch.sqrt(torch.sum(z * z, dim=1, keepdims=True) / z_size + 1e-8) return z.to(device) if __name__ == '__main__': # parse arguments parser = argparse.ArgumentParser() parser.add_argument("--cp_path", required=True, type=str, help="directory contains the generator checkpoint") parser.add_argument("--save_path", type=str, default="./generated_samples", help="directory sample saved") parser.add_argument("--grid_mode", type=bool, default=False, help="if True, make a sample images grid with 4 samples. else, make each images with 1 sample") parser.add_argument("--y_rotate", type=float, default=0.6108, help="y_angle value to rotate samples") args = parser.parse_args() save_path = args.save_path cp_path = args.cp_path grid_mode = args.grid_mode y_rotate = args.y_rotate if not os.path.exists(save_path): os.makedirs(save_path) cp = torch.load(cp_path) latent_size = cp['gen']['current_net.0.conv1.w'].shape[1] - 4 ch = cp['gen']['current_net.0.conv1.w'].shape[0] out_res = 2 ** (cp['depth'] + 1) print("latent size: %4d, channels: %4d, out resolution: %3d" % (latent_size, ch, out_res)) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") generator = PGGANGenerator(latent_size, out_res, ch=ch, rgbd=True).to(device) generator.load_state_dict(cp['gen']) generator.depth = cp['depth'] generator.alpha = cp['alpha'] generator.eval() latent_z = make_z(4, latent_size, device=device) sample_x = generate_sample_rgbd(generator, latent_z, y_rotate, device=device) if grid_mode: img, depth = convert_batch_images_rgbd(sample_x, rows=8) save_batch_sample_rgbd(img, path=os.path.join(save_path, "sample_%.5f.png" % y_rotate), depth=depth, axis=False) else: list_theta = torch.linspace(-y_rotate, y_rotate, 8) img, depth = convert_batch_images_rgbd(sample_x, rows=8, grid_mode=False) for num_sample in range(4): for num_angle in range(8): plt.figure() plt.imshow(img[num_sample * 8 + num_angle].permute(1, 2, 0)) plt.axis("off") plt.savefig( os.path.join(save_path, "image_sample_%d_angle_%d_%.5f.png" % (num_sample, num_angle, list_theta[num_angle].item())), bbox_inches="tight", pad_inches=0 ) plt.close() plt.figure() plt.imshow(depth[num_sample * 8 + num_angle].permute(1, 2, 0), cmap='plasma') plt.axis("off") plt.savefig( os.path.join(save_path, "depth_sample_%d_angle_%d_%.5f.png" % (num_sample, num_angle, list_theta[num_angle].item())), bbox_inches="tight", pad_inches=0, ) plt.close()
3,413
sample/hello.py
mountcedar/dbarchive
0
2170463
#!/usr/bin/env python import logging import numpy from datetime import datetime from dbarchive import Base if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) class Sample(Base): def __init__(self, maxval=10): self.base = "hoge" self.bin = numpy.arange(maxval) self.created = datetime.now() print 'dropping past sample collection' Sample.drop_collection() print 'create sample instance' sample01 = Sample(10) sample01.save() sample02 = Sample(3) sample02.save() print "query mongodb with custom constructor" for sample in Sample.objects.all(): print 'sample: ', type(sample) print '\tbase: ', sample.base, type(sample.base) print '\tbin: ', sample.bin, type(sample.bin) print '\tcreated: ', sample.created, type(sample.created) print 'updating sample object' sample01.bin = numpy.arange(20) sample01.save() print "confirming the variable 'bin' is updated." for sample in Sample.objects.all(): print 'sample: ', type(sample) print '\tbase: ', sample.base, type(sample.base) print '\tbin: ', sample.bin, type(sample.bin) print '\tcreated: ', sample.created, type(sample.created) print "query mongodb without custom constructor" for sample in Sample.native_objects().all(): print 'sample: ', type(sample) print '\tbase: ', sample.base, type(sample.base) print '\tbin: ', sample.bin if 'bin' in sample.__dict__ else 'bin object is not found.' print '\tcreated: ', sample.created, type(sample.created) print "all task completed"
1,666
bs/demos/01_hello_world/objectives.py
SimplyKnownAsG/build-system
0
2171154
# import compilers and objectives from bs import compilers, objectives # the objective here is to create an executable hello = objectives.Executable('hello_world', 'hello_world.c') # select the compiler we'd like to use c_compiler = compilers.get_compiler('c') # compile the objective using said compiler c_compiler.compile(hello)
344
api/views.py
choi-jiwoo/capstone-project-django
0
2171469
from django.shortcuts import render from rest_framework.response import Response from rest_framework.decorators import api_view from rest_framework.pagination import PageNumberPagination from api.models import Stay, Cafe, Res, CafeTag, ResTag, CafeKwrd, ResKwrd from api.serializers import StaySerializer, CafeSerializer, ResSerializer from api.serializers import CafeTagSerializer, ResTagSerializer from api.serializers import CafeKwrdSerializer, ResKwrdSerializer # cafe @api_view(['GET']) def get_cafe(request): paginator = PageNumberPagination() paginator.page_size = 10 params = request.GET.getlist('tag') if params: data = CafeTag.objects.filter(tag__in=params).values_list('store_id', flat=True) cafe_list = Cafe.objects.filter(id__in=data).order_by('-review_count')[:30] else: cafe_list = Cafe.objects.all().order_by('-review_count')[:30] result_page = paginator.paginate_queryset(cafe_list, request) serializer = CafeSerializer(result_page, many=True) return paginator.get_paginated_response(serializer.data) # restaurant @api_view(['GET']) def get_restaurant(request): paginator = PageNumberPagination() paginator.page_size = 10 params = request.GET.getlist('tag') if params: data = ResTag.objects.filter(tag__in=params).values_list('store_id', flat=True) res_list = Res.objects.filter(id__in=data).order_by('-review_count')[:30] else: res_list = Res.objects.all().order_by('-review_count')[:30] result_page = paginator.paginate_queryset(res_list, request) serializer = ResSerializer(result_page, many=True) return paginator.get_paginated_response(serializer.data) # keywords @api_view(['GET']) def get_cafe_kwrds(request): data = CafeKwrd.objects.all().order_by('id') serializer = CafeKwrdSerializer(data, many=True) return Response(serializer.data) @api_view(['GET']) def get_res_kwrds(request): data = ResKwrd.objects.all().order_by('id') serializer = ResKwrdSerializer(data, many=True) return Response(serializer.data) # stay @api_view(['GET']) def get_filter_stay(request): paginator = PageNumberPagination() paginator.page_size = 10 params = request.query_params district = params['district'] data = Stay.objects.filter(district=district) result_page = paginator.paginate_queryset(data, request) serializer = StaySerializer(result_page, many=True) return paginator.get_paginated_response(serializer.data)
2,505
evaluate.py
Talendar/neuroevolutionary_investor
2
2171062
""" Auxiliary functions for displaying the evaluation of a population of investors. @Author: <NAME> (Talendar) """ import matplotlib.pyplot as plt import matplotlib.ticker as mticker def static_plot(best_investors, ibov_var): """ Plots the performances of IBOVESPA and the best investors of the population. :param best_investors: performance history of the best investors. :param ibov_var: price history of IBOVESPA. """ interval = range(len(ibov_var)) ax = plt.subplot() plt.plot(interval, ibov_var, "g", linewidth=3, alpha=0.6) plt.plot(interval, best_investors[0][0], "r", linewidth=3, alpha=0.6) for i in best_investors[1:]: plt.plot(interval, i[0], linewidth=2, alpha=0.6) for line in ax.lines: y = line.get_ydata()[-1] ax.annotate('%0.2f%%' % y, xy=(1, y), xytext=(8, 0), color=line.get_color(), xycoords=('axes fraction', 'data'), textcoords='offset points', weight="bold") plt.legend(['IBOV'] + ["Investor %d" % (i + 1) for i in range(len(best_investors))], loc='upper left') plt.xlabel("Time (days)") plt.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f%%')) plt.show() def dynamic_plot(investor_history, ibov_var, print_ops, max_num_ops=7): """ Dynamically plots, over time, the performance of IBOVESPA and on investor. :param investor_history: performance history of the investor. :param ibov_var: price history of IBOVESPA. :param print_ops: if True, the investor's decisions (buy and sell operations) will be plot. :param max_num_ops: max number of operations to display on the screen at a time. """ # init plt.ion() figure, ax = plt.subplots() lines_ibov, = ax.plot([], [], "g", linewidth=3, alpha=0.6) lines_inv, = ax.plot([], [], "r", linewidth=3, alpha=0.6) BASE_PAUSE_TIME = 1 pause_time = BASE_PAUSE_TIME ax.set_autoscaley_on(True) ax.set_xlim(0, len(ibov_var)) ax.grid() plt.legend(['IBOV', "Investor"], loc='upper left') plt.xlabel("Time (days)") plt.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f%%')) figure.subplots_adjust(left=0.25, bottom=0.25) # speed slider spd_slider_ax = figure.add_axes([0.42, 0.07, 0.3, 0.05], facecolor='lightgoldenrodyellow') spd_slider = plt.Slider(spd_slider_ax, 'Speed', 0.2, 20, valinit=BASE_PAUSE_TIME) def spd_slider_on_changed(val): nonlocal pause_time pause_time = BASE_PAUSE_TIME / val spd_slider.on_changed(spd_slider_on_changed) # plot xdata = [] ydata_inv = [] ydata_ibov = [] pc_ann_inv = pc_ann_ibov = None op_points = [] op_annotations = [] for x in range(len(ibov_var)): # set data xdata.append(x) ydata_inv.append(investor_history[0][x]) ydata_ibov.append(ibov_var[x]) lines_inv.set_xdata(xdata) lines_inv.set_ydata(ydata_inv) lines_ibov.set_xdata(xdata) lines_ibov.set_ydata(ydata_ibov) # rescale ax.relim() ax.autoscale_view() # percentage annotation if pc_ann_ibov is not None: pc_ann_ibov.remove() pc_ann_ibov = ax.annotate('%0.2f%%' % ydata_ibov[-1], xy=(1, ydata_ibov[-1]), xytext=(8, 0), color=lines_ibov.get_color(), xycoords=('axes fraction', 'data'), textcoords='offset points', weight="bold") if pc_ann_inv is not None: pc_ann_inv.remove() pc_ann_inv = ax.annotate('%0.2f%%' % ydata_inv[-1], xy=(1, ydata_inv[-1]), xytext=(8, 0), color=lines_inv.get_color(), xycoords=('axes fraction', 'data'), textcoords='offset points', weight="bold") # op annotation if print_ops and investor_history[1][x][1] != 0: color = "g" if investor_history[1][x][0] == "BUY" else "r" p, = ax.plot([xdata[-1]], [ydata_inv[-1]], marker='o', markersize=5, color=color) op_points.append(p) op_annotations.append( ax.annotate("%d" % investor_history[1][x][1], xy=(xdata[-1], ydata_inv[-1]), xytext=(xdata[-1] - 0.25, ydata_inv[-1] - 0.25), color=color, weight="bold", fontsize=8, arrowprops={"arrowstyle": "->"})) if len(op_annotations) > max_num_ops: op_points.pop(0).remove() op_annotations.pop(0).remove() # remove the oldest annotation # draw and delay plt.pause(pause_time) plt.ioff() plt.show()
4,654
kpc_connector_utils/common/base64.py
praiwann/kpc-connector-utils
0
2168423
import base64 def base64_decode(value): return base64.b64decode(value.encode()).decode() def base64_encode(value): return base64.b64encode(value.encode()).decode()
176
utils/video_utils.py
Cloudster1/deepStyleKeras
0
2170428
import imageio class VideoReader: def __init__(self, file): self.vid = imageio.get_reader(file) def get_next(self): return self.vid.get_next_data() def __len__(self): return len(self.vid) def iter_data(self, function, *args): for num, image in enumerate(self.vid.iter_data()): print(f'frame number: {num + 1}') function(image, *args) def get_framerate(self): return self.vid.get_meta_data()['fps'] class VideoWriter: def __init__(self, file, framerate): self.writer = imageio.get_writer(file, fps=30) def push(self, img): self.writer.append_data(img)
585
1_course/4_week/4_array_inversions/python/main.py
claytonjwong/Algorithms-UCSD
6
2170566
# python3 ## # # Python3 implementation to count array inversions # # (c) Copyright 2019 <NAME> ( http://www.claytonjwong.com ) # ## from typing import List Result = List[int], int class Solution: def inversions( self, A: List[int] ) -> Result: A, cnt = self.go( A, 0, len(A) ) return cnt def go( self, A: List[int], L: int, R: int ) -> Result: size = ( R - L ) if size < 2: return A[L:R], 0 mid = L + ( size // 2 ) A1, cnt1 = self.go( A, L, mid ) A2, cnt2 = self.go( A, mid, R ) A3, cnt3 = self.merge( A1, A2 ) return A3, cnt1 + cnt2 + cnt3 def merge( self, A: List[int], B: List[int] ) -> Result: C = [] i = 0 j = 0 cnt = 0 while i < len(A) and j < len(B): if A[i] <= B[j]: C.append( A[i] ) i += 1 else: C.append( B[j] ) j += 1 cnt += len( A[i:] ) C.extend( A[i:] ) C.extend( B[j:] ) return C, cnt if __name__ == '__main__': solution = Solution() N = input() A = list( map( int, input().split() )) ans = solution.inversions( A ) print( ans )
1,233
Chapter04/restful_python_2_04_02/Flask01/service/models.py
PacktPublishing/Hands-On-RESTful-Python-Web-Services-Second-Edition
45
2171226
from marshmallow import Schema, fields, pre_load from marshmallow import validate from flask_sqlalchemy import SQLAlchemy from flask_marshmallow import Marshmallow from passlib.apps import custom_app_context as password_context import re orm = SQLAlchemy() ma = Marshmallow() class ResourceAddUpdateDelete(): def add(self, resource): orm.session.add(resource) return orm.session.commit() def update(self): return orm.session.commit() def delete(self, resource): orm.session.delete(resource) return orm.session.commit() class User(orm.Model, ResourceAddUpdateDelete): id = orm.Column(orm.Integer, primary_key=True) name = orm.Column(orm.String(50), unique=True, nullable=False) # I save the hash for the password (I don't persist the actual password) password_hash = orm.Column(orm.String(120), nullable=False) creation_date = orm.Column(orm.TIMESTAMP, server_default=orm.func.current_timestamp(), nullable=False) def verify_password(self, password): return password_context.verify(password, self.password_hash) def check_password_strength_and_hash_if_ok(self, password): if len(password) < 8: return 'The password is too short. Please, specify a password with at least 8 characters.', False if len(password) > 32: return 'The password is too long. Please, specify a password with no more than 32 characters.', False if re.search(r'[A-Z]', password) is None: return 'The password must include at least one uppercase letter.', False if re.search(r'[a-z]', password) is None: return 'The password must include at least one lowercase letter.', False if re.search(r'\d', password) is None: return 'The password must include at least one number.', False if re.search(r"[ !#$%&'()*+,-./[\\\]^_`{|}~"+r'"]', password) is None: return 'The password must include at least one symbol.', False self.password_hash = password_context.hash(password) return '', True def __init__(self, name): self.name = name class Notification(orm.Model, ResourceAddUpdateDelete): id = orm.Column(orm.Integer, primary_key=True) message = orm.Column(orm.String(250), unique=True, nullable=False) ttl = orm.Column(orm.Integer, nullable=False) creation_date = orm.Column(orm.TIMESTAMP, server_default=orm.func.current_timestamp(), nullable=False) notification_category_id = orm.Column(orm.Integer, orm.ForeignKey('notification_category.id', ondelete='CASCADE'), nullable=False) notification_category = orm.relationship('NotificationCategory', backref=orm.backref('notifications', lazy='dynamic' , order_by='Notification.message')) displayed_times = orm.Column(orm.Integer, nullable=False, server_default='0') displayed_once = orm.Column(orm.Boolean, nullable=False, server_default='false') @classmethod def is_message_unique(cls, id, message): existing_notification = cls.query.filter_by(message=message).first() if existing_notification is None: return True else: if existing_notification.id == id: return True else: return False def __init__(self, message, ttl, notification_category): self.message = message self.ttl = ttl self.notification_category = notification_category class NotificationCategory(orm.Model, ResourceAddUpdateDelete): id = orm.Column(orm.Integer, primary_key=True) name = orm.Column(orm.String(150), unique=True, nullable=False) @classmethod def is_name_unique(cls, id, name): existing_notification_category = cls.query.filter_by(name=name).first() if existing_notification_category is None: return True else: if existing_notification_category.id == id: return True else: return False def __init__(self, name): self.name = name class NotificationCategorySchema(ma.Schema): id = fields.Integer(dump_only=True) # Minimum length = 3 characters name = fields.String(required=True, validate=validate.Length(3)) url = ma.URLFor('service.notificationcategoryresource', id='<id>', _external=True) notifications = fields.Nested('NotificationSchema', many=True, exclude=('notification_category',)) class NotificationSchema(ma.Schema): id = fields.Integer(dump_only=True) # Minimum length = 5 characters message = fields.String(required=True, validate=validate.Length(5)) ttl = fields.Integer() creation_date = fields.DateTime() notification_category = fields.Nested(NotificationCategorySchema, only=['id', 'url', 'name'], required=True) displayed_times = fields.Integer() displayed_once = fields.Boolean() url = ma.URLFor('service.notificationresource', id='<id>', _external=True) @pre_load def process_notification_category(self, data): notification_category = data.get('notification_category') if notification_category: if isinstance(notification_category, dict): notification_category_name = notification_category.get('name') else: notification_category_name = notification_category notification_category_dict = dict(name=notification_category_name) else: notification_category_dict = {} data['notification_category'] = notification_category_dict return data class UserSchema(ma.Schema): id = fields.Integer(dump_only=True) name = fields.String(required=True, validate=validate.Length(3)) url = ma.URLFor('service.userresource', id='<id>', _external=True)
5,900
src/db/db.py
hgf777-br/space-flight-news-20210823
0
2170452
from fastapi.responses import JSONResponse ESQUEMA = "space" TABELA = "articles" CAMPOS = """ id_space, title, url, imageUrl, newsSite, summary, publishedAt, updateddAt, featured, launch_id, launch_provider, events_id, events_provider """ class Db: def __init__(self, conn): self._db = conn # Rotinas para a inserção inicial dos dados def checar_schema(self, schema: str): sql = f""" SELECT exists( SELECT nspname FROM pg_catalog.pg_namespace pn WHERE nspname='{schema}' ) AS schema_exists """ cur = self._db.cursor() cur.execute(sql) rs = cur.fetchone() cur.close() return rs[0] def criar_schema(self, schema: str): try: sql = f"CREATE SCHEMA {schema}" cur = self._db.cursor() cur.execute(sql) cur.close() self._db.commit() except: return False return True def checar_table(self, table: str, schema: str): sql = f"""SELECT exists( SELECT tablename FROM pg_catalog.pg_tables tb WHERE tablename='{table}' and schemaname='{schema}' ) AS table_exists """ cur = self._db.cursor() cur.execute(sql) rs = cur.fetchone() cur.close() return rs[0] def criar_table(self, table: str, schema: str, campos: str): try: sql = f"CREATE TABLE {schema}.{table}({campos})" cur = self._db.cursor() cur.execute(sql) cur.close() self._db.commit() except: return False return True def inserir_dados(self, table: str, schema: str, campos: str, dados: list): try: sql = f"INSERT INTO {schema}.{table} ({campos}) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" cur = self._db.cursor() for a in dados: cur.execute(sql, a.listar_dados()) cur.close() self._db.commit() except: return False return True # Rotinas para a API def _recuperar_ultimo_article(self): rs = None try: sql = f"SELECT * FROM {ESQUEMA}.{TABELA} ORDER BY id DESC LIMIT 1" cur = self._db.cursor() cur.execute(sql) rs = cur.fetchone() cur.close() except: return None ds = self._criar_dict(rs) return ds def recuperar_article(self, id: int): rs = None try: sql = f"SELECT * FROM {ESQUEMA}.{TABELA} WHERE id={id}" cur = self._db.cursor() cur.execute(sql) rs = cur.fetchone() cur.close() except: return None if rs is None: return JSONResponse(status_code=404, content={"message": "Não encontramos este ID na Base de Dados"}) else: ds = self._criar_dict(rs) return ds def recuperar_article_range(self, offset: int, limit: int): rs = None try: sql = f"SELECT * FROM {ESQUEMA}.{TABELA} WHERE id>={offset} ORDER BY id LIMIT {limit}" cur = self._db.cursor() cur.execute(sql) rs = cur.fetchall() cur.close() except: return None return [self._criar_dict(r) for r in rs] def apagar_article(self, id: int): ds = self.recuperar_article(id) try: sql = f"DELETE FROM {ESQUEMA}.{TABELA} WHERE id={id}" cur = self._db.cursor() cur.execute(sql) cur.close() self._db.commit() except: return None return ds def inserir_article(self, article: dict): ds = list(article.values()) ev = list(ds.pop()[0].values()) la = list(ds.pop()[0].values()) ds += (la + ev) try: sql = f"INSERT INTO {ESQUEMA}.{TABELA} ({CAMPOS}) VALUES (0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" cur = self._db.cursor() cur.execute(sql, ds) cur.close() self._db.commit() except: return None return self._recuperar_ultimo_article() def editar_article(self, id: int, article: dict): ds = list(article.values()) ev = list(ds.pop()[0].values()) la = list(ds.pop()[0].values()) ds += (la + ev) if isinstance(self.recuperar_article(id), dict): sql = f"""UPDATE {ESQUEMA}.{TABELA} SET title = '{ds[0]}', url = '{ds[1]}', imageUrl = '{ds[2]}', newsSite = '{ds[3]}', summary = '{ds[4]}', publishedAt = '{ds[5]}', updateddAt = '{ds[6]}', featured = '{ds[7]}', launch_id = '{ds[8]}', launch_provider = '{ds[9]}', events_id = '{ds[10]}', events_provider = '{ds[11]}' WHERE id = {id} """ cur = self._db.cursor() cur.execute(sql, ds) cur.close() self._db.commit() return self.recuperar_article(id) return JSONResponse(status_code=404, content={"message": "Não encontramos este ID na Base de Dados"}) def _criar_dict(self, rs): ds = { "id": rs[0], "title": rs[2], "url": rs[3], "imageUrl": rs[4], "newsSite": rs[5], "summary": rs[6], "publishedAt": rs[7], "updatedAt": rs[8], "featured": rs[9] } ds["launches"] = [{"id": rs[10], "provider": rs[11]}] if rs[10] != "" else [] ds["events"] = [{"id": rs[12], "provider": rs[13]}] if rs[12] != "" else [] return ds def fechar(self): self._db.close()
6,259
leetcode/0-250/127-middleNode.py
palash24/algorithms-and-data-structures
23
2169236
# Definition for singly-linked list. class ListNode: def __init__(self, x): self.val = x self.next = None class Solution: def middleNode(self, head): """ :type head: ListNode :rtype: ListNode """ slow, fast = head, head while (fast != None and fast.next != None): slow = slow.next fast = fast.next.next return slow def middleNode2(self, head): """ :type head: ListNode :rtype: ListNode """ slow = fast = head while (fast and fast.next): slow = slow.next fast = fast.next.next return slow def middleNode3(self, head): A = [head] while A[-1].next: A.append(A[-1].next) return A[(int) (len(A) / 2)]
836
tests/conftest.py
jobomix/ariadne-relay
0
2170037
from dataclasses import dataclass from typing import Dict from ariadne import InterfaceType, make_executable_schema from graphql import GraphQLSchema import pytest from ariadne_relay import RelayQueryType, resolve_node_query_sync from ariadne_relay.node import NodeObjectType @dataclass(frozen=True) class Foo: id: int # noqa: A003 @pytest.fixture def type_defs() -> str: return """ type Query { node(id: ID!): Node foos( after: String before: String first: Int last: Int ): FoosConnection! } interface Node { id: ID! } type PageInfo { hasNextPage: Boolean! hasPreviousPage: Boolean! startCursor: String endCursor: String } type Foo implements Node { id: ID! } type FooEdge { cursor: String! node: Foo } type FoosConnection { pageInfo: PageInfo! edges: [FooEdge]! } """ @pytest.fixture def foo_nodes() -> Dict[str, Foo]: return {str(i): Foo(id=i) for i in range(10)} @pytest.fixture def query_type(foo_nodes: Dict[str, Foo]) -> RelayQueryType: query_type = RelayQueryType() query_type.set_field("node", resolve_node_query_sync) query_type.set_connection("foos", lambda *_: list(foo_nodes.values())) return query_type @pytest.fixture def node_interface_type() -> InterfaceType: node_interface_type = InterfaceType("Node") node_interface_type.set_type_resolver(lambda obj, *_: obj.__class__.__name__) return node_interface_type @pytest.fixture def foo_type(foo_nodes: Dict[str, Foo]) -> NodeObjectType: foo_type = NodeObjectType("Foo") foo_type.set_instance_resolver(lambda id, *_: foo_nodes[id]) return foo_type @pytest.fixture def schema( type_defs: str, query_type: RelayQueryType, node_interface_type: InterfaceType, foo_type: NodeObjectType, ) -> GraphQLSchema: return make_executable_schema( type_defs, [query_type, node_interface_type, foo_type], ) @pytest.fixture def node_query() -> str: return "query($id: ID!) { node(id: $id) { __typename, id } }" @pytest.fixture def connection_query() -> str: return """ { foos { edges { cursor node { __typename id } } pageInfo { hasNextPage hasPreviousPage startCursor endCursor } } } """
2,778
djangoreport/urls.py
gabrielferreira/djangoreport
1
2170903
# -*- coding: utf-8 -*- from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'djangoreport.views.home', name='home'), # url(r'^djangoreport/', include('djangoreport.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', include(admin.site.urls)), )
487
easy/1029-Two City Scheduling.py
Davidxswang/leetcode
2
2169899
""" https://leetcode.com/problems/two-city-scheduling/ There are 2N people a company is planning to interview. The cost of flying the i-th person to city A is costs[i][0], and the cost of flying the i-th person to city B is costs[i][1]. Return the minimum cost to fly every person to a city such that exactly N people arrive in each city. Example 1: Input: [[10,20],[30,200],[400,50],[30,20]] Output: 110 Explanation: The first person goes to city A for a cost of 10. The second person goes to city A for a cost of 30. The third person goes to city B for a cost of 50. The fourth person goes to city B for a cost of 20. The total minimum cost is 10 + 30 + 50 + 20 = 110 to have half the people interviewing in each city. Note: 1 <= costs.length <= 100 It is guaranteed that costs.length is even. 1 <= costs[i][0], costs[i][1] <= 1000 """ # time complexity: O(nlogn), space complexity: O(n) # this is provided by @logan138 in the discussion area. # assume all the people have been sent to A, we need to calculate the refund of sending them to B for each one of them # refund is: cost[i][0]-cost[i][1] class Solution: def twoCitySchedCost(self, costs: List[List[int]]) -> int: for i in range(len(costs)): costs[i].append(costs[i][0]-costs[i][1]) costs.sort(key=lambda x:x[2]) return sum(cost[0] for cost in costs) - sum(costs[i][2] for i in range(len(costs)//2,len(costs)))
1,422
features/steps/prs_steps.py
gthb/python-pptx
0
2170243
import logging import os from behave import given, when, then from hamcrest import (assert_that, has_item, is_, is_not, equal_to, greater_than) from StringIO import StringIO from pptx import packaging from pptx import Presentation from pptx.util import Inches def absjoin(*paths): return os.path.abspath(os.path.join(*paths)) thisdir = os.path.split(__file__)[0] scratch_dir = absjoin(thisdir, '../_scratch') test_file_dir = absjoin(thisdir, '../../test/test_files') basic_pptx_path = absjoin(test_file_dir, 'test.pptx') saved_pptx_path = absjoin(scratch_dir, 'test_out.pptx') test_image_path = absjoin(test_file_dir, 'python-powered.png') test_text = "python-pptx was here!" # logging.debug("saved_pptx_path is ==> '%s'\n", saved_pptx_path) # given --------------------------------------------------- @given('a clean working directory') def step(context): if os.path.isfile(saved_pptx_path): os.remove(saved_pptx_path) @given('an initialized pptx environment') def step(context): pass @given('I have an empty presentation open') def step(context): context.prs = Presentation() @given('I have a reference to a blank slide') def step(context): context.prs = Presentation() slidelayout = context.prs.slidelayouts[6] context.sld = context.prs.slides.add_slide(slidelayout) @given('I have a reference to a slide') def step(context): context.prs = Presentation() slidelayout = context.prs.slidelayouts[0] context.sld = context.prs.slides.add_slide(slidelayout) # when ---------------------------------------------------- @when('I add a new slide') def step(context): slidelayout = context.prs.slidemasters[0].slidelayouts[0] context.prs.slides.add_slide(slidelayout) @when("I add a picture stream to the slide's shape collection") def step(context): shapes = context.sld.shapes x, y = (Inches(1.25), Inches(1.25)) with open(test_image_path) as f: stream = StringIO(f.read()) shapes.add_picture(stream, x, y) @when("I add a picture to the slide's shape collection") def step(context): shapes = context.sld.shapes x, y = (Inches(1.25), Inches(1.25)) shapes.add_picture(test_image_path, x, y) @when("I add a text box to the slide's shape collection") def step(context): shapes = context.sld.shapes x, y = (Inches(1.00), Inches(2.00)) cx, cy = (Inches(3.00), Inches(1.00)) sp = shapes.add_textbox(x, y, cx, cy) sp.text = test_text @when('I construct a Presentation instance with no path argument') def step(context): context.prs = Presentation() @when('I open a basic PowerPoint presentation') def step(context): context.prs = Presentation(basic_pptx_path) @when('I save the presentation') def step(context): if os.path.isfile(saved_pptx_path): os.remove(saved_pptx_path) context.prs.save(saved_pptx_path) @when("I set the title text of the slide") def step(context): context.sld.shapes.title.text = test_text # then ---------------------------------------------------- @then('I receive a presentation based on the default template') def step(context): prs = context.prs assert_that(prs, is_not(None)) slidemasters = prs.slidemasters assert_that(slidemasters, is_not(None)) assert_that(len(slidemasters), is_(1)) slidelayouts = slidemasters[0].slidelayouts assert_that(slidelayouts, is_not(None)) assert_that(len(slidelayouts), is_(11)) @then('I see the pptx file in the working directory') def step(context): assert_that(os.path.isfile(saved_pptx_path)) minimum = 30000 actual = os.path.getsize(saved_pptx_path) assert_that(actual, is_(greater_than(minimum))) @then('the image is saved in the pptx file') def step(context): pkgng_pkg = packaging.Package().open(saved_pptx_path) partnames = [part.partname for part in pkgng_pkg.parts if part.partname.startswith('/ppt/media/')] assert_that(partnames, has_item('/ppt/media/image1.png')) @then('the picture appears in the slide') def step(context): prs = Presentation(saved_pptx_path) sld = prs.slides[0] shapes = sld.shapes classnames = [sp.__class__.__name__ for sp in shapes] assert_that(classnames, has_item('Picture')) @then('the text box appears in the slide') def step(context): prs = Presentation(saved_pptx_path) textbox = prs.slides[0].shapes[0] textbox_text = textbox.textframe.paragraphs[0].runs[0].text assert_that(textbox_text, is_(equal_to(test_text))) @then('the pptx file contains a single slide') def step(context): prs = Presentation(saved_pptx_path) assert_that(len(prs.slides), is_(equal_to(1))) @then('the text appears in the title placeholder') def step(context): prs = Presentation(saved_pptx_path) title_shape = prs.slides[0].shapes.title title_text = title_shape.textframe.paragraphs[0].runs[0].text assert_that(title_text, is_(equal_to(test_text)))
4,975
neighbour_app/models.py
otienonick/Thee_neighbourhood_Django
1
2170928
from django.db import models from django.contrib.auth.models import User from cloudinary.models import CloudinaryField # Create your models here. class Profile(models.Model): username = models.CharField(max_length = 255,blank=True) user = models.OneToOneField(User,on_delete=models.CASCADE) email = models.EmailField() image = CloudinaryField('image') identity = models.IntegerField(null=True ,default='12345678') updated = models.DateTimeField(auto_now=True) created = models.DateTimeField(auto_now_add=True) hood = models.CharField(max_length=255,default='mtaa yako') location = models.CharField(max_length=255,default='areacode') occupants = models.IntegerField(null=True,blank=True,default='5000') def __str__(self): return f'{self.username}-{self.created}' def save_profile(self): self.save() def create_neighbourhood(self): self.save() def delete_neighbourhood(self): self.delete() def update_neighbourhood(self): self.save() def update_occupants(self): self.save() @classmethod def find_neighbourhood(cls,neighbourhood_id = None): neighbourhood = cls.objects.filter(id = neighbourhood_id) return neighbourhood class Business(models.Model): name = models.CharField(max_length=255) user = models.ForeignKey(User,on_delete=models.CASCADE) image = CloudinaryField('image') neighbourhood_id = models.ForeignKey(Profile,on_delete=models.CASCADE) email = models.EmailField(unique=True) phone_number = models.IntegerField(null=True,blank=True) def __str__(self): return f'{self.name}-{self.neighbourhood_id}' def create_business(self): self.save() def delete_business(self): self.delete() def update_business(self): self.save() @classmethod def find_business(cls,business_id = None): business = cls.objects.filter(id = business_id) return business class Post(models.Model): title = models.CharField(max_length=80) content = models.TextField() image = CloudinaryField('image') updated = models.DateTimeField(auto_now=True) created = models.DateTimeField(auto_now_add=True) author = models.ForeignKey(User,on_delete=models.CASCADE,related_name='posts') def __str__(self): return str(self.title[:30]) def create_post(self): self.save() def delete_post(self): self.delete() def update_post(self): self.save() @classmethod def find_post(cls,post_id = None): post = cls.objects.filter(id = post_id) return post @classmethod def search_by_title(cls,search_term): post = cls.objects.filter(title__icontains = search_term) # We filter the model data using the __icontains query filter return post class Meta: ordering = ['-created']
2,991
rule-vetting/tests/test_datasets.py
jeremy-goldwasser/rule-vetting
0
2167313
from os.path import join as oj import importlib import os import rulevetting import rulevetting.api.util DATA_PATH = oj(os.path.dirname(os.path.abspath(__file__)), '..', 'data') def test_datasets(project): """Check that each dataset is implemented """ if not project == 'None': project_ids = [project] else: project_ids = rulevetting.api.util.get_project_ids() project_module_names = [f'rulevetting.projects.{project_id}.dataset' for project_id in project_ids] for project_module_name in project_module_names: module = importlib.import_module(project_module_name) dset = module.Dataset() # pipeline should run df_train, df_tune, df_test = dset.get_data(data_path=DATA_PATH, save_csvs=False) assert df_tune.shape[0] > 0, 'Tune set must not be empty' assert df_train.shape[0] > df_tune.shape[0], 'Train set should be larger than tune set' for df in [df_train, df_tune, df_test]: assert 'outcome' in df.columns, 'Each df must have the outcome contained in a column named "outcome"' # strings should be filled in str_funcs = [dset.get_outcome_name, dset.get_dataset_id] for str_func in str_funcs: s = str_func() assert isinstance(s, str), f'Metadata function {s} should return a string'
1,412
Utils/features.py
jose-tapia/Hyper-heuristic-Knapsack
1
2169264
from typing import List import numpy as np from Utils.knapsack import Item # Features considered for the characterization of an instance featuresCalculation = { 'NORM_MEAN_WEIGHT' : lambda w, p: np.mean(w)/np.max(w), 'NORM_MEDIAN_WEIGHT': lambda w, p: np.median(w)/np.max(w), 'NORM_STD_WEIGHT' : lambda w, p: np.std(w)/np.max(w), 'NORM_MEAN_PROFIT' : lambda w, p: np.mean(p)/np.max(p), 'NORM_MEDIAN_PROFIT': lambda w, p: np.median(p)/np.max(p), 'NORM_STD_PROFIT' : lambda w, p: np.std(p)/np.max(p), 'NORM_CORRELATION' : lambda w, p: np.corrcoef(w/np.max(w), p/np.max(p))[0, 1]/2+0.5 if len(w) > 1 else 0 } def getFeature(featureName: str, items: List[Item]): # Calculate the feature for the given instance w = [item.getWeight() for item in items] p = [item.getProfit() for item in items] if featureName in featuresCalculation: return featuresCalculation[featureName](w, p) else: print('Invalid feature: ', featureName) return None def getAllFeatures(items: List[Item]): # Calculate the characterization of the instance w = [item.getWeight() for item in items] p = [item.getProfit() for item in items] return [featureFunction(w, p) for featureFunction in featuresCalculation.values()]
1,285
trishul/assignments1/assignments1.py
JediRhymeTrix/AssignmentSubmission
1
2170446
#!C:\Python34\python import cgi import sqlite3 import os #from http.cookies import* form = cgi.FieldStorage() #path='/var/www/html/canteendb.db' con=sqlite3.connect('canteendb.db') cur=con.cursor() #c=SimpleCookie() curQ = 'Untitled-3.html' print ("Content-type:text/html\r\n\r\n") z = """ <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <link rel="stylesheet" href="cssmenu/styles.css"> <title>assignment</title> <style> @import url(http://fonts.googleapis.com/css?family=Roboto:400,300,500); </style> <title>Untitled Document</title> <script src="http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js"></script> <script src="jquery-linedtextarea.js"></script> <link href="jquery-linedtextarea.css" type="text/css" rel="stylesheet" /> <style> body { background-color: #F2F2F2 } table { margin: 100; width: 100%; } div#t_area { background-color: #333549; background-color: #; color: #DDDDDD; box-shadow: 2px 2px 5px 1px rgba(0, 0, 0, 0.2); } textarea { background-color: #333549; color: #DDDDDD; width: 100%; box-shadow: 2px 2px 5px 1px rgba(0, 0, 0, 0.2); } iframe { box-shadow: 2px 2px 5px 1px rgba(0, 0, 0, 0.2); } input.submit { box-shadow: 2px 2px 5px 1px rgba(0, 0, 0, 0.2); } input.submit: { box-shadow: 2px 2px 5px 1px rgba(0, 0, 0, 0.2); } </style> </head> <body> <table> <tbody> <tr>Question: <td><iframe name='question' width="600px" height="400px" src=%s></iframe></td> <td></td> <td> <form> <div id="t_area">Your Code Here: <textarea class="lined" rows="25" cols="100" name='answer' id='answer'> JavaScript was originally developed by Brendan Eich of Netscape under the name Mocha, which was later renamed to LiveScript, and finally to JavaScript. The change of name from LiveScript to JavaScript roughly coincided with Netscape adding support for Java technology in its Netscape Navigator web browser. JavaScript was first introduced and deployed in the Netscape browser version 2.0B3 in December 1995. The naming has caused confusion, giving the impression that the language is a spin-off of Java, and it has been characterized by many as a marketing ploy by Netscape to give JavaScript the cachet of what was then the hot new web-programming language. </textarea> </div> <input class="submit" type='submit' name='submit' onClick=init()/> </form></td> </tr> </tbody> </table> <script> $(function() { $(".lined").linedtextarea( {selectedLine: 1} ); }); </script> <scrip src='../dist/autosize.js'> </script> <script> autosize(document.querySelectorAll('textarea')); </script> </body> </html> """ #% ( curQ, ) ''' def toFile() : #string = form.getvalue('answer') print( form.getvalue('answer') ) a = open ( 'C:\answer.py' , 'w' ) a.write( string ) a.close() ''' if 'submit' in form : #string = form.getvalue('answer') a = open ( 'answer.py' , 'w' ) a.write( form.getvalue('answer') ) a.close() else : a=open('answer2.py', 'w') print (z) ''' def session_in(): cur.execute("insert into session values(?,?)",(hno,pwd)) con.commit() def register(): cur.execute("insert into studentbase values(?,?,?,?,?)",(Roll,Name,Password,Mobile,Email)) con.commit() os.system('/var/www/html/stud_reg_success.py') def login(): cur.execute("select roll,password from studentbase") temp=cur.fetchall() flag=0 for i in temp: if i[0]==hno and i[1]==pwd: flag=1 c['username']=hno os.system('/var/www/html/stud_main.py') break if flag==0: os.system('/var/www/html/stud_login_fail.py') if form.getvalue('pwd') == form.getvalue('cpwd') : Name=form.getvalue('Name') Roll=form.getvalue('Roll_no') Password=form.getvalue('<PASSWORD>') Mobile=form.getvalue('mobile') Email=form.getvalue('Email') else : os.system('/var/www/html/stud_login_reg_fail.py') if 'register' in form: register() hno=form.getvalue('@username') pwd=form.getvalue('@password') if 'login' in form : login() if Name == None and hno==None : print(z) con.close() print(c.js_output()) '''
4,526
gbmc_v0/tests/test_create_imgs.py
leilakhalili87/gbmc_v0
0
2171242
import numpy as np import pytest import gbmc_v0.pad_dump_file as pdf import gbmc_v0.util_funcs as uf @pytest.mark.parametrize('filename0, rCut, lat_par, non_p', [("data/dump_1", 8.1, 4.05, 2), ("data/dump_1", 30, 4.05, 2), ("data/dump_2", 8.1, 4.05, 1)]) def test_create_imgs(filename0, rCut, lat_par, non_p): data = uf.compute_ovito_data(filename0) arr = pdf.p_arr(non_p) GbRegion, GbIndex, GbWidth, w_bottom_SC, w_top_SC = pdf.GB_finder(data, lat_par, non_p) sim_cell = data.cell[...] sim_1vec = np.array(sim_cell[:, arr[0]]) sim_2vec = np.array(sim_cell[:, arr[1]]) p1_vec = np.array([sim_1vec[arr[0]], sim_1vec[arr[1]]]) p2_vec = np.array([sim_2vec[arr[0]], sim_2vec[arr[1]]]) [n1, n2] = pdf.num_rep_2d(p1_vec, p2_vec, rCut) pts1, gb1_inds = pdf.pad_gb_perp(data, GbRegion, GbIndex, rCut, non_p) pts_w_imgs, inds_array = pdf.create_imgs(pts1, n1, n2, sim_1vec, sim_2vec, non_p) num0 = pts_w_imgs.shape[0]/pts1.shape[0] num1 = np.power(n1+n2+1, 2) assert np.allclose(num0, num1)
1,120
test/test_procmaps.py
woodruffw/procmaps.py
30
2170727
import os import unittest import procmaps class TestProcmaps(unittest.TestCase): def check_map_properties(self, map_): self.assertIsInstance(map_.begin_address, int) self.assertIsInstance(map_.end_address, int) self.assertTrue(map_.begin_address in map_) self.assertFalse(map_.end_address in map_) self.assertIsInstance(map_.is_readable, bool) self.assertIsInstance(map_.is_writable, bool) self.assertIsInstance(map_.is_executable, bool) self.assertIsInstance(map_.is_shared, bool) self.assertIsInstance(map_.is_private, bool) self.assertIsInstance(map_.offset, int) self.assertIsInstance(map_.device, tuple) self.assertIsInstance(map_.device[0], int) self.assertIsInstance(map_.device[1], int) self.assertIsInstance(map_.inode, int) if map_.is_shared: self.assertFalse(map_.is_private) if map_.is_private: self.assertFalse(map_.is_shared) self.assertTrue(isinstance(map_.pathname, str) or map_.pathname is None) def test_from_pid(self): maps = procmaps.from_pid(os.getpid()) for map_ in maps: self.check_map_properties(map_) def test_from_path(self): maps = procmaps.from_path("/proc/self/maps") for map_ in maps: self.check_map_properties(map_) def test_from_str(self): maps = procmaps.from_str("55d5564b4000-55d5564b6000 r--p 00000000 08:11 6553896 /bin/cat") self.assertEqual(len(maps), 1) self.check_map_properties(maps[0]) if __name__ == "__main__": unittest.main()
1,645
digital-curling/named/network/hogehoge.py
km-t/dcpython
0
2171353
import numpy as np a = np.arange(9).reshape(3, 3) b = np.zeros((3, 3)) for i in range(3): for j in range(3): b[i][j] = i+3*j print(a) print(b) c = np.array(a[0:3, 0:1]-b[0:3, 0:1]) print(c)
203
ex8/ex8.py
mlyundin/Machine-Learning
13
2171259
import matplotlib.pyplot as plt import numpy as np from numpy.linalg import det, pinv import scipy.io as sio def visualize_data(X, title): x1, x2 = X.T plt.plot(x1, x2, 'bx') plt.xlabel('Latency (ms)') plt.ylabel('Throughput (mb/s)') plt.xlim([0, 30]) plt.ylim([0, 30]) plt.title(title) return plt def visualize_fit(X, mu, sigma2): visualize_data(X, 'Visualizing Gaussian fit.') x = np.arange(0, 35, 0.5) x1, x2 = np.meshgrid(x, x) z = multivariate_gaussian(np.hstack((x1.reshape(-1,1), x2.reshape(-1,1))), mu, sigma2).reshape(x1.shape) plt.contour(x1, x2, z) return plt def estimate_gaussian(X): return np.mean(X, axis=0)[:, np.newaxis], np.var(X, axis=0)[:, np.newaxis] def multivariate_gaussian(X, mu, Sigma2): k = float(len(mu)) X = np.copy(X) if any(s == 1 for s in Sigma2.shape): Sigma2 = np.diag(Sigma2.ravel()) X -= mu.reshape(1, -1) return (2*np.pi)**(-k/2)*det(Sigma2)**(-0.5)*np.exp(-0.5*np.sum(np.dot(X, pinv(Sigma2))*X, axis=1)) def select_threshold(yval, pval): yval = yval.ravel() best_epsilon = 0 best_F1 = 0 stepsize = (np.max(pval) - np.min(pval)) / 1000 for epsilon in np.arange(np.min(pval), np.max(pval), stepsize): cvPredictions = pval < epsilon tp = np.sum((cvPredictions == 1) & (yval == 1), dtype=float) fp = np.sum((cvPredictions == 1) & (yval == 0)) fn = np.sum((cvPredictions == 0) & (yval == 1)) recall = tp/(tp+fn) precision = tp/(tp+fp) F1 = 2*recall*precision/(recall+precision) if F1 > best_F1: best_F1, best_epsilon = F1, epsilon return best_epsilon, best_F1 if __name__ == '__main__': data = sio.loadmat('ex8data1.mat') X = data['X'] Xval = data['Xval'] yval = data['yval'] visualize_data(X, 'Visualizing example dataset for outlier detection').show() mu, sigma2 = estimate_gaussian(X) p = multivariate_gaussian(X, mu, sigma2) visualize_fit(X, mu, sigma2).show() pval = multivariate_gaussian(Xval, mu, sigma2) epsilon, F1 = select_threshold(yval, pval) print('Best epsilon found using cross-validation: %s' % epsilon) print('Best F1 on Cross Validation Set: %s' % F1) print(' (you should see a value epsilon of about 8.99e-05)') visualize_data(X, 'The classified anomalies.') x1, x2 = X[p < epsilon, :].T plt.plot(x1, x2, 'ro') plt.show() data = sio.loadmat('ex8data2.mat') X = data['X'] Xval = data['Xval'] yval = data['yval'] mu, sigma2 = estimate_gaussian(X) p = multivariate_gaussian(X, mu, sigma2) pval = multivariate_gaussian(Xval, mu, sigma2) epsilon, F1 = select_threshold(yval, pval) print('Best epsilon found using cross-validation: %s' % epsilon) print('Best F1 on Cross Validation Set: %s' % F1) print('# Outliers found: %s' % np.sum(p < epsilon)) print(' (you should see a value epsilon of about 1.38e-18)')
3,091
meggie/actions/raw_ica/controller/ica.py
Teekuningas/meggie
4
2170086
""" Contains controlling logic for the ICA. """ import logging from copy import deepcopy import numpy as np import mne from meggie.utilities.compare import compare_raws def compute_ica(raw, n_components, method, max_iter, random_state): """ Computes ICA using MNE implementation. """ ica = mne.preprocessing.ICA( n_components=n_components, method=method, max_iter=max_iter, random_state=random_state) ica.fit(raw) return ica def plot_topographies(ica, n_components): """ Plots topographies from the ICA solution. """ figs = ica.plot_components(title='') for fig in figs: fig.canvas.set_window_title('ICA topographic maps') def update_topography_texts(): """ Change texts in the axes to match names in the dialog """ idx = 0 for fig in figs: for ax in fig.get_axes(): if idx > n_components: return ax.set_title('Component ' + str(idx), fontsize=12) idx += 1 update_topography_texts() def plot_sources(raw, ica): """ Plots sources of the ica solution. """ sources = ica.get_sources(raw) sources.plot(title='ICA time courses') def plot_properties(raw, ica, picks): """ Plots properties for specific ICA components. """ figs = ica.plot_properties( raw, picks) for fig in figs: fig.canvas.set_window_title('ICA properties') # fix the names idx = 0 for fig in figs: for ax_idx, ax in enumerate(fig.get_axes()): if ax_idx == 0: ax.set_title("Component " + str(picks[idx])) idx += 1 break def plot_changes(raw, ica, indices): """ Plot a raw comparison plot for ICA solution. """ raw_removed = raw.copy() ica.apply(raw_removed, exclude=indices) compare_raws(raw, raw_removed)
1,921
algo/test/test_backtracking.py
ssavinash1/Algorithm_stanford
24
2170619
# -*- coding: utf-8 -*- import unittest from src.backtracking import QueenPuzzle, TravelingSalesman, SubsetsOfGivenSum from src.graph import Graph class TestBacktracking(unittest.TestCase): def test_queen_puzzle_3(self): puzzle = QueenPuzzle(3) puzzle.run() expected_solutions = [] self.assertEqual(puzzle.solutions, expected_solutions, 'should not find any solutions for the three problem') def test_queen_puzzle_4(self): puzzle = QueenPuzzle(4) puzzle.run() expected_solutions = [[(0, 1), (1, 3), (2, 0), (3, 2)], [(0, 2), (1, 0), (2, 3), (3, 1)]] self.assertItemsEqual(puzzle.solutions, expected_solutions, 'should not find any solutions for the three problem') def test_queen_puzzle_8(self): puzzle = QueenPuzzle(8) puzzle.run() self.assertEqual(len(puzzle.solutions), 92, 'should not find any solutions for the three problem') def test_traveling_salesman(self): """ Given the following graph: 2 (a)----(b) | \4 / | | \/ |5 1| /\ | | /3 \ | |/ \| (c)----(d) 6 """ g = Graph.build(edges=[ ('a', 'b', 2), ('a', 'd', 4), ('a', 'c', 1), ('b', 'd', 5), ('b', 'c', 3), ('d', 'c', 6) ], directed=False) ts = TravelingSalesman(g) ts.run() expected_min_path = ['a', 'c', 'b', 'd', 'a'] expected_min_cost = 13 self.assertEqual(ts.solution, expected_min_path, 'should have computed the min path') self.assertEqual(ts.min_cost, expected_min_cost, 'should have computed the min cost') def test_subset_of_given_sum(self): S = [1,2,2,3,4,5] N = 5 sogs = SubsetsOfGivenSum(S, N) sogs.run() expected_solutions = [[1,2,2], [1,4], [2,3], [5]] self.assertItemsEqual(expected_solutions, sogs.solutions, 'should produce the correct solution')
2,172
test/test_borehole_ground_water.py
ArnaudCrl/pywellcad
6
2171321
import pathlib import unittest import pywintypes import wellcad.com import random import pywintypes from ._extra_asserts import ExtraAsserts from ._sample_path import SamplePath class TestBoreholeGroundWater(unittest.TestCase, ExtraAsserts, SamplePath): @classmethod def setUpClass(cls): cls.app = wellcad.com.Application() cls.sample_path = cls._find_sample_path() cls.fixture_path = pathlib.Path(__file__).parent / "fixtures" cls.borehole = cls.app.open_borehole(str(cls.fixture_path / "groundwater/groundwater.wcl")) @classmethod def tearDownClass(cls): cls.app.quit(False) def test_water_salinity(self): config = "Temperature=Temperature (C),TemperatureUnit=degC" output_log = self.borehole.water_salinity("Conductivity", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_water_salinity_documentation(self): self.fail("water salinity chm documentation : input restricted to conductivity") def test_water_resistivity(self): config = "Temperature=25,TemperatureUnit=degC,RefTemperature=25,RefTemperatureUnit=degC,Method=0" output_log = self.borehole.water_resistivity("Fluid Resistivity", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_water_resistivity_documentation(self): self.fail("water_resistivity chm documentation : Method is missing") def test_shale_volume(self): config = "Equation=0,ShaleValueType=0,Shale=500,ShaleTopDepth=30,ShaleBotDepth=80,\ SandstoneValueType=2,Sandstone=0,SandstoneTopDepth=160,SandstoneBotDepth=180" output_log = self.borehole.shale_volume("Gamma Ray", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_porosity_sonic(self): config = "Method=0,MatrixSlowness=50,MatrixSlownessUnit=us/m,FluidSlowness=189,\ FluidSlownessUnit=us/m,C=0.67,Compaction=1" output_log = self.borehole.porosity_sonic("P-Slowness", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_porosity_archie(self): config = "Method=0,Rw=Rw,RwUnit=ohm.m,Vsh=0,Rsh=30,RshUnit=ohm.m,CementationFactor=1,\ CementationExponent=2,Cs=1" output_log = self.borehole.porosity_archie("Normal Resistivity", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_porosity_density(self): config = "Method=0,MatrixDensity=2.7,MatrixDensityUnit=g/cc,FluidDensity=1.0,\ FluidDensityUnit=g/cc,ShaleVolume=0,ShaleDensity=1.5,ShaleDensityUnit=g/cc" output_log = self.borehole.porosity_density("Bulk Density", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_porosity_neutron(self): config = "Vsh=Vsh,ShaleNPhi=50" output_log = self.borehole.porosity_neutron("NPhi (Sandstone)", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_permeability(self): config = "CementationFactor=1" output_log = self.borehole.permeability("DPhi (Sandstone)", False, config) self.assertIsInstance(output_log, wellcad.com.Log) def test_hydraulic_conductivity(self): config = str(self.fixture_path / "groundwater/groundwater.ini") output_log = self.borehole.hydraulic_conductivity("Permeability", False, config) self.assertIsInstance(output_log, wellcad.com.Log) if __name__ == '__main__': unittest.main()
3,524
wordbook/services.py
lostsquirrel/words
0
2170933
from wordbook.models import wordbookDAO, Wordbook def all(): data = wordbookDAO.find_all() return [Wordbook.from_db(*r) for r in data] def get_wordbook(book_id: int): _b = wordbookDAO.find(book_id) if _b is not None: return Wordbook.from_db(*_b) def get_wordbook_by_guid(guid: int): _b = wordbookDAO.find_by_guid(guid) if _b is not None: return Wordbook.from_db(*_b)
413
src/models/metrics/sequence_confusion_matrix.py
V1ct0reo/lightning-fast-hydra
0
2171493
from typing import Optional, Any import numpy as np import pandas as pd import torchmetrics.classification from torch import Tensor from sklearn.metrics import confusion_matrix from src.datamodules.datasets.window_makers.sliding_window_maker import MovementDataWindowMaker class BasicSequenceConfusionMatrix(torchmetrics.classification.ConfusionMatrix): def __init__(self, num_classes: int, window_maker: MovementDataWindowMaker, normalize: Optional[str] = None, threshold: float = 0.5, multilabel: bool = False, compute_on_step: bool = False, dist_sync_on_step: bool = False, process_group: Optional[Any] = None, file_name='basic_sequence_confusion_matrix.csv', ): """ """ super().__init__(num_classes=num_classes, normalize=normalize, threshold=threshold, multilabel=multilabel, compute_on_step=compute_on_step, dist_sync_on_step=dist_sync_on_step, process_group=process_group) self.file_name = file_name self.window_maker = window_maker self.seq_id_col = window_maker.sequenz_identifier self.seq_id_list = window_maker.seq_id_list first_frame_idx = self.window_maker.window_size - 1 self.seq_id_preds_targtes = pd.DataFrame( columns=['seq_id', 'predicted', 'target'], index=range(first_frame_idx, self.window_maker.num_entries)) def add_batch(self, preds_batch, target_batch, sample_idxs): # preds_batch (batch_size, n_classes) <- softmax output # target_batch (batch_size) # sample_idxs (batch_size, windoiw_size) <- for each batch, windowsize[-1] would be the Frame, tht got predicted # # the idx for each predicted frame from this batch. Should be used to get the right row from windowmakers data df predicted_frames_idxs = sample_idxs[:, -1] predicted_frames_idxs = predicted_frames_idxs.detach().numpy() predicted_labels = preds_batch.argmax(axis=1) predicted_labels = predicted_labels.detach().numpy() target_batch_labels = target_batch.detach().numpy() self.seq_id_preds_targtes.loc[predicted_frames_idxs] = np.array([ self.seq_id_list[predicted_frames_idxs], # the seq_id for this window predicted_labels, # the prediction for this window target_batch_labels # the target for this window ]).T def compute_and_save_csv(self): preds = []#np.zeros((self.num_classes)) targets = []#np.zeros((self.num_classes)) for seq in sorted(self.seq_id_preds_targtes.seq_id.unique()): if not isinstance(seq, int): continue seq_df = self.seq_id_preds_targtes[self.seq_id_preds_targtes['seq_id'] == seq] majority = seq_df.mode(axis=0, dropna=True) t = majority['target'].values[0] if not isinstance(t, int): t = t[0] p = majority['predicted'].values[0] if not isinstance(p, int): p = p[0] targets.append(t) preds.append(p) conf_mat = confusion_matrix(y_true=targets, y_pred=preds) conf_df = pd.DataFrame(conf_mat, index=np.arange(self.num_classes), columns=np.arange(self.num_classes)) conf_df.to_csv(self.file_name, index=False) # preds_majority_vote = self.seq_id_preds_targtes.groupby(self.seq_id_col).predicted.agg(pd.Series.mode) # pred_counts = preds_majority_vote.value_counts() # seq_preds[pred_counts.index] = pred_counts # targets_majority_vote = self.seq_id_preds_targtes.groupby(self.seq_id_col).target.agg(pd.Series.mode) # targets_counts = targets_majority_vote.value_counts() # seq_targets[targets_counts.index] = targets_counts
3,950
app/treasure/migrations/0001_initial.py
yohan394/adverstisement_backend
0
2171590
# Generated by Django 3.1.7 on 2021-05-24 15:11 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('user', '0001_initial'), ('commercial', '0001_initial'), ] operations = [ migrations.CreateModel( name='RewardCap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_at', models.DateField(default=django.utils.timezone.now)), ('daily_cap', models.IntegerField()), ], ), migrations.CreateModel( name='TransactionVideo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rewarded', models.IntegerField()), ('date_at', models.DateField(default=django.utils.timezone.now)), ('info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.info')), ('video', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='commercial.video')), ], ), migrations.CreateModel( name='TransactionQuiz', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rewarded', models.IntegerField()), ('date_at', models.DateField(default=django.utils.timezone.now)), ('info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.info')), ('quiz', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='commercial.quiz')), ('user_choice', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='commercial.quizchoices')), ], ), ]
2,005
code/DataBase.py
danielt17/Triplet-loss-few-shot-learning
2
2170710
# -*- coding: utf-8 -*- """ Created on Fri Jun 4 14:02:13 2021 @author: danie """ # %% Imports from __future__ import print_function import torch.utils.data as data from PIL import Image import os import os.path import errno import torch import codecs import numpy as np import csv # %% # %% New data set defnition class FashionMNIST_t(data.Dataset): ''' Description: This function creates a dataset object of triplet tuples. Inputs: root: path of downloaded datasest n_train_triplets: amount of training set samples n_test_triplets: amount of test set samples train: return train or test set transform: preform transformations and/or augmentations on input data target_tranform: preform transformations and/or augmentations on target data download: download the dataset Returns: Dataset (training or test set) ''' urls = [ 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz', ] raw_folder = 'raw' processed_folder = 'processed' training_file = 'training.pt' test_file = 'test.pt' train_triplet_file = 'train_triplets.txt' test_triplet_file = 'test_triplets.txt' def __init__(self, root, n_train_triplets=50000, n_test_triplets=10000, train=True, transform=None, target_transform=None, download=False): self.root = root self.transform = transform self.train = train # training set or test set if download: self.download() if not self._check_exists(): raise RuntimeError('Dataset not found.' + ' You can use download=True to download it') if self.train: self.train_data, self.train_labels = torch.load( os.path.join(root, self.processed_folder, self.training_file)) self.make_triplet_list(n_train_triplets) triplets = [] for line in open(os.path.join(root, self.processed_folder, self.train_triplet_file)): if len(line) == 1: continue else: triplets.append((int(line.split()[0]), int(line.split()[1]), int(line.split()[2]))) # anchor, close, far self.triplets_train = triplets else: self.test_data, self.test_labels = torch.load(os.path.join(root, self.processed_folder, self.test_file)) self.make_triplet_list(n_test_triplets) triplets = [] for line in open(os.path.join(root, self.processed_folder, self.test_triplet_file)): if len(line) == 1: continue else: triplets.append((int(line.split()[0]), int(line.split()[1]), int(line.split()[2]))) # anchor, close, far self.triplets_test = triplets def __getitem__(self, index): if self.train: idx1, idx2, idx3 = self.triplets_train[index] img1, img2, img3 = self.train_data[idx1], self.train_data[idx2], self.train_data[idx3] else: idx1, idx2, idx3 = self.triplets_test[index] img1, img2, img3 = self.test_data[idx1], self.test_data[idx2], self.test_data[idx3] # doing this so that it is consistent with all other datasets # to return a PIL Image img1 = Image.fromarray(img1.numpy(), mode='L') img2 = Image.fromarray(img2.numpy(), mode='L') img3 = Image.fromarray(img3.numpy(), mode='L') if self.transform is not None: img1 = self.transform(img1) img2 = self.transform(img2) img3 = self.transform(img3) return img1, img2, img3 def __len__(self): if self.train: return len(self.triplets_train) else: return len(self.triplets_test) def _check_exists(self): return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \ os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file)) def _check_triplets_exists(self): return os.path.exists(os.path.join(self.root, self.processed_folder, self.train_triplet_file)) and \ os.path.exists(os.path.join(self.root, self.processed_folder, self.test_triplet_file)) def download(self): from six.moves import urllib import gzip if self._check_exists(): return # download files try: os.makedirs(os.path.join(self.root, self.raw_folder)) os.makedirs(os.path.join(self.root, self.processed_folder)) except OSError as e: if e.errno == errno.EEXIST: pass else: raise for url in self.urls: print('Downloading ' + url) data = urllib.request.urlopen(url) filename = url.rpartition('/')[2] file_path = os.path.join(self.root, self.raw_folder, filename) with open(file_path, 'wb') as f: f.write(data.read()) with open(file_path.replace('.gz', ''), 'wb') as out_f, \ gzip.GzipFile(file_path) as zip_f: out_f.write(zip_f.read()) os.unlink(file_path) # process and save as torch files print('Processing...') training_set = ( read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')), read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte')) ) test_set = ( read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')), read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte')) ) with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f: torch.save(training_set, f) with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f: torch.save(test_set, f) print('Done!') def make_triplet_list(self, ntriplets): if self._check_triplets_exists(): return print('Processing Triplet Generation ...') if self.train: np_labels = self.train_labels.numpy() filename = self.train_triplet_file else: np_labels = self.test_labels.numpy() filename = self.test_triplet_file triplets = [] for class_idx in range(10): a = np.random.choice(np.where(np_labels==class_idx)[0], int(ntriplets/10), replace=True) b = np.random.choice(np.where(np_labels==class_idx)[0], int(ntriplets/10), replace=True) while np.any((a-b)==0): np.random.shuffle(b) c = np.random.choice(np.where(np_labels!=class_idx)[0], int(ntriplets/10), replace=True) for i in range(a.shape[0]): triplets.append([int(a[i]), int(c[i]), int(b[i])]) with open(os.path.join(self.root, self.processed_folder, filename), "w") as f: writer = csv.writer(f, delimiter=' ') writer.writerows(triplets) print('Done!') def get_int(b): return int(codecs.encode(b, 'hex'), 16) def parse_byte(b): if isinstance(b, str): return ord(b) return b def read_label_file(path): with open(path, 'rb') as f: data = f.read() assert get_int(data[:4]) == 2049 length = get_int(data[4:8]) labels = [parse_byte(b) for b in data[8:]] assert len(labels) == length return torch.LongTensor(labels) def read_image_file(path): with open(path, 'rb') as f: data = f.read() assert get_int(data[:4]) == 2051 length = get_int(data[4:8]) num_rows = get_int(data[8:12]) num_cols = get_int(data[12:16]) images = [] idx = 16 for l in range(length): img = [] images.append(img) for r in range(num_rows): row = [] img.append(row) for c in range(num_cols): row.append(parse_byte(data[idx])) idx += 1 assert len(images) == length return torch.ByteTensor(images).view(-1, 28, 28)
8,912
preprocess/dataPreprocess.py
zyt4321/bio
1
2171400
import sys sys.path.append("./") import numpy as np from scipy import sparse from scipy.sparse.linalg import spsolve from scipy.signal import savgol_filter import os import config import matplotlib.pyplot as plt import pandas import progressbar def baseline_als(y, lam, p, niter=10): ''' find baseline according to y ''' L = len(y) D = sparse.diags([1,-2,1],[0,-1,-2], shape=(L,L-2)) w = np.ones(L) for i in range(niter): W = sparse.spdiags(w, 0, L, L) Z = W + lam * D.dot(D.transpose()) z = spsolve(Z, w*y) w = p * (y > z) + (1-p) * (y < z) return z def savitzkyGolay(x, window_length=41, polyorder=2): ''' savitzkyGolay smoothing ''' return savgol_filter(x, window_length, polyorder) def align(a_l, a_r, b_l, b_r, z): ''' align by 2 points ''' return (a_l - a_r) * (z - b_l) / (b_l - b_r) + a_l def main(subPosition=2, datapath=config.DATA_PATH, afterpath=config.DATA_PREPROCESS_PATH): totalNum = len(os.listdir(datapath)) bar = progressbar.ProgressBar(max_value=totalNum) cnt = 0 for path in os.listdir(datapath): cnt += 1 bar.update(cnt) sType = path.split('_')[subPosition] # if sType not in ['A6','A8','S2','C2']: # continue testFilePath = os.path.join(datapath, path) df = pandas.read_csv(testFilePath) matrix = df.values x = matrix[:,0] y = matrix[:,1] y = np.maximum(y, 0) y = np.sqrt(y) y = savitzkyGolay(y, window_length=11) z = baseline_als(y, 10**9, 0.001) y = np.maximum(0, y-z).astype(np.float) # plt.plot(x, y ,color='y') # plt.show() bList = [] bList_y = [] interval = 0.5 if sType == 'S2': aList = [103.9136, 108.90324, 164.9097] elif sType == 'C2': aList = [146.01622, 265.91915, 656.02131] elif sType in ['A6','A8']: aList = [196.9738, 393.9404, 590.907] else: aList = [] for a in aList: xSubList = np.logical_and(a - interval <= x, x <= a + interval) ySubList = y[xSubList] firstIndex = np.where(xSubList==True)[0][0] subIndex = np.argmax(ySubList) if subIndex == 0 or subIndex == (len(ySubList) - 1): print(cnt, totalNum) print(path) totalIndex = firstIndex + subIndex bList.append(x[totalIndex]) if len(bList) != 0: x_new = [] for x_item in x: new = x_item if x_item < bList[0] or x_item >= bList[2]: new = align(aList[0], aList[2], bList[0], bList[2], x_item) elif x_item >= bList[0] and x_item < bList[1]: new = align(aList[0], aList[1], bList[0], bList[1], x_item) elif x_item >= bList[1] and x_item < bList[2]: new = align(aList[1], aList[2], bList[1], bList[2], x_item) x_new.append(new) df['mass'] = x_new else: df['mass'] = x df['intensity'] = y df.to_csv(os.path.join(afterpath, path), index=False) if __name__ == "__main__": # The path where the CSV format file is located DATA_PATH='/opt/BioData_Base/data20190708/csv' # The path where the processed file will be saved DATA_PREPROCESS_PATH='/opt/BioData_Base/data20190708/csv-after-py' main(2,DATA_PATH, DATA_PREPROCESS_PATH)
3,562
tools/nightly-unit-tests/tests/test_semver.py
winstondu/dd-sdk-ios
93
2170426
# ----------------------------------------------------------- # Unless explicitly stated otherwise all files in this repository are licensed under the Apache License Version 2.0. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-2020 Datadog, Inc. # ----------------------------------------------------------- import unittest from src.semver import Version class VersionTestCase(unittest.TestCase): def test_parsing(self): self.assertEqual(Version.parse('10.0.3'), Version(major=10, minor=0, patch=3)) self.assertEqual(Version.parse('11.4'), Version(major=11, minor=4, patch=0)) self.assertEqual(Version.parse('12'), Version(major=12, minor=0, patch=0)) def test_comparing(self): self.assertTrue(Version.parse('14.0.0').is_newer_than(Version.parse('13.1.2'))) self.assertTrue(Version.parse('14.1.1').is_newer_than(Version.parse('14.1.0'))) self.assertTrue(Version.parse('14.2.3').is_newer_than(Version.parse('14.2.2'))) self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('15.0.2'))) self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('14.1.0'))) self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('14.0.4'))) self.assertFalse(Version.parse('14.0.3').is_newer_than(Version.parse('14.0.3'))) self.assertTrue(Version.parse('14.0.3').is_newer_than_or_equal(Version.parse('14.0.3'))) self.assertFalse(Version.parse('14.0.2').is_newer_than_or_equal(Version.parse('14.0.3')))
1,582
leetCode/cheapest_flights_within_k_stops.py
yskang/AlgorithmPracticeWithPython
0
2170071
# Title: Cheapest Flights Withn K Stops # Link: https://leetcode.com/problems/cheapest-flights-within-k-stops/ from collections import defaultdict, deque from heapq import heappop, heappush from typing import List INF = 10**10 class Problem: def find_cheapest_price(self, n: int, flights: List[List[int]], src: int, dst: int, k: int) -> int: graph = defaultdict(lambda: []) for start, end, time in flights: graph[start].append((end, time)) return self.dijkstra(graph, src, dst, k) def dijkstra(self, graph: defaultdict, start: int, dst: int, k: int): pq = [] heappush(pq, (0, start, k+1)) while pq: cost, node, count = heappop(pq) if node == dst: return cost if count > 0: for child, time in graph[node]: heappush(pq, (cost + time, child, count-1)) return -1 def solution(): n = 4 edges = [[0,1,1],[0,2,5],[1,2,1],[2,3,1]] src = 0 dst = 3 k = 1 problem = Problem() return problem.find_cheapest_price(n, edges, src, dst, k) def main(): print(solution()) if __name__ == '__main__': main()
1,198
01_Implementation.py
MaxLiu728/Genre-Prediction
0
2170450
"""Personal Challenge_Draft.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1-25-B3CO6yVCH9u2vgbhIjyyFeU3tJ3w """ # Working environment set up import pandas as pd from sklearn.metrics import accuracy_score from sklearn.feature_extraction.text import CountVectorizer import string import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer import seaborn as sns from nltk.corpus import wordnet import matplotlib.pyplot as plt from matplotlib.legend_handler import HandlerLine2D import numpy as np from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.ensemble import RandomForestClassifier nltk.download('stopwords') nltk.download('punkt') nltk.download('wordnet') nltk.download('averaged_perceptron_tagger') def load_data(): ''' This function will separately return the features and response variable for the input data ''' data = pd.read_csv('data.csv') X = data['Lyric'] y = data['Genre'] return X, y # Use pos_tag to get the type of the world and then map the tag to the format wordnet lemmatizer would accept. def get_wordnet_pos(word): """Map POS tag to first character lemmatize() accepts""" tag = nltk.pos_tag([word])[0][1][0].upper() tag_dict = {"J": wordnet.ADJ, "N": wordnet.NOUN, "V": wordnet.VERB, "R": wordnet.ADV} return tag_dict.get(tag, wordnet.NOUN) def transform_data(): ''' This function will transform the features and will reuturn the countvectorized features. Steps are: 1. Remove punctuations 2. Tokenize 3. Lemmatization 4. Remove stop words 5. CountVectorize ''' X, y = load_data() X = X.apply(lambda x: x.translate(str.maketrans('', '', string.punctuation))) # To remove the punctuations X_Tokenize = X.apply(lambda x: word_tokenize(x)) # To tokenize lemmatizer = WordNetLemmatizer() X_lemmatize = X_Tokenize.apply(lambda x: ' '.join([lemmatizer.lemmatize(w, pos='v') for w in x])) stop_words = set(stopwords.words('english')) stop_words_more = ('10', '100', '20', '2x', '3x', '4x', '50', 'im') # Add more stop words stop_words = stop_words.add(x for x in stop_words_more) CountVect = CountVectorizer(stop_words=stop_words, min_df=300, lowercase=True, ngram_range=(1, 1)) Transformmed_array = CountVect.fit_transform(X_lemmatize) X_vectorized = pd.DataFrame(Transformmed_array.toarray(), columns=CountVect.get_feature_names()) return X_vectorized, y def EDA_visualize(X, y, N): ''' :para X: X is the features to be trained :para y: y is the Gnere classification to be trained :para N: nlargest frequencied words for each type of Genre :return: 1. Barplot to visulize the counts for each type of y 2. Return the n largest frequencies words for each type of y ''' sns.catplot(x='Genre', kind='count', data=pd.DataFrame(y[:50000])) DF_Combine = pd.concat([X, y], axis=1) DF_nlargest = pd.DataFrame(np.ones((3, 1)), columns=['exm'], index=['Hip Hop', 'Pop', 'Rock']) # Initilnize for value in DF_Combine.columns[:-1]: DF_nlargest[value] = pd.DataFrame(DF_Combine.groupby('Genre')[value].sum()) print(DF_nlargest.apply(lambda s, n: s.nlargest(n).index, axis=1, n=N)) # X_temp, y_temp = transform_data() def TuneParameter_visulize(X_train, y_train, X_hold, y_hold): ''' It will return severl plots aims to tune paramters. parameters are: 1. max_depth 2. n_estimators 3. max_features... Todo: plotting more parameters ''' # Tune max_depth max_depths = np.linspace(10, 200, 15, endpoint=True) train_results = [] validation_results = [] for depth in max_depths: rf = RandomForestClassifier(max_depth=depth, n_jobs=-1) rf.fit(X_train, y_train) train_results.append(accuracy_score(y_train, rf.predict(X_train))) validation_results.append(accuracy_score(y_hold, rf.predict(X_hold))) line1 = plt.plot(max_depths, train_results, 'b', label='Train accuracy') line2 = plt.plot(max_depths, validation_results, 'r', label='Estimated accuracy') plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}) plt.ylabel('accuracy score') plt.xlabel('Tree depth') plt.show() def main(): ''' It will return: 1. EDA visulization 2. Visulize parameter tuning process 3. Series include Expected accuracy 4. Series include the predicted y_test ''' # Load data X_input, y_input = transform_data() # Train, holdset, test split y_test = pd.DataFrame(y_input[-5000:], columns=['Genre']) y_train = pd.DataFrame(y_input[:50000], columns=['Genre']) X_train = pd.DataFrame(X_input.iloc[:50000, :], columns=X_input.columns) X_test = pd.DataFrame(X_input.iloc[-5000:, :], columns=X_input.columns) X_holdout_set = X_train.sample(5000, random_state=66) y_holdout_set = y_train.iloc[X_holdout_set.index, :] X_train_new = X_train.drop(X_holdout_set.index) y_train_new = y_train.drop(X_holdout_set.index) EDA_visualize(X_train, y_train, 10) # For EDA purpose # Build classifier ''' The RF model will be used. Few reasons below: 1. An ensemble (bootstrap) approach might make stronger predictions, without causing serious overfitting 2. Compared with distance methods, it needs less datapreprocessing (such as scaling data) 3. Non-parametric estimation However, it may have an obvious drawback: 1. May set large max_features 2. Should consider more deeper depth The drawbacks above will directly triggle the large training workload. ''' # TuneParameter_visulize(X_train_new,y_train_new, X_holdout_set, y_holdout_set) # Tune parameters RF_Model = RandomForestClassifier(criterion='entropy', n_estimators=100, max_depth=56, max_features=666) RF_Model.fit(X_train_new, y_train_new) estimated_accuracy = accuracy_score(y_holdout_set, RF_Model.predict(X_holdout_set)) pd.Series(estimated_accuracy).to_csv('ea.csv', index=False, header=False) # Predict testing set test_pred = RF_Model.predict(X_test) pd.Series(test_pred).to_csv('pred.csv', index=False, header=False) if __name__ == '__main__': main()
6,417
Modulo1/saludo.py
EUD-curso-python/control_de_flujo-ipsuarezc
0
2169960
def espanol(): return 'hola' def ingles(): return 'hello' def aleman(): return 'hallo' def hawai(): return 'aloha'
133
PBO_18117/latihan_4.2.list2.py
daniel8117/PBO
1
2170223
nilai_matakuliah=[70,80,90,13] rata_rata= (sum(nilai_matakuliah)/len(nilai_matakuliah)) print("nilai matakuliah=", nilai_matakuliah) print("nilai rata-rata=", rata_rata)
172
tests/test_dependency_parallel.py
ariloulaleelay/fastapi
0
2171143
from asyncio import sleep from time import time from fastapi import Depends, FastAPI from fastapi.testclient import TestClient app = FastAPI() client = TestClient(app) async def dependency1(): ts = time() await sleep(0.1) return ts async def dependency2(): ts = time() await sleep(0.1) return ts @app.get("/parallel-dependencies") async def parallel_dependencies( ts1=Depends(dependency1), ts2=Depends(dependency2), ): return abs(ts1 - ts2) def test_dependencies_run_in_parallel(): response = client.get('/parallel-dependencies') assert 200 == response.status_code, response.text assert response.json() < 0.1
664
maverick_api/modules/api/maverick/maverick_shutdown.py
deodates-dev/UAV-maverick-api
4
2169107
import logging import copy from maverick_api.modules import schemaBase from maverick_api.modules.base.util.process_runner import ProcessRunner # graphql imports from graphql import ( GraphQLField, GraphQLObjectType, GraphQLString, GraphQLBoolean, GraphQLInt, ) from graphql.pyutils.simple_pub_sub import SimplePubSubIterator application_log = logging.getLogger("tornado.application") class MaverickShutdownSchema(schemaBase): def __init__(self): super().__init__(self) self.name = "MaverickShutdown" self.shutdown_command_defaults = { "running": False, "uptime": None, "stdout": None, "stderror": None, "returncode": None, } self.shutdown_command = copy.deepcopy(self.shutdown_command_defaults) self.shutdown_proc = None self.shutdown_command_type = GraphQLObjectType( self.name, lambda: { "running": GraphQLField(GraphQLBoolean, description=""), "uptime": GraphQLField( GraphQLInt, description="Number of seconds the process has been running for", ), "stdout": GraphQLField(GraphQLString, description=""), "stderror": GraphQLField(GraphQLString, description=""), "returncode": GraphQLField(GraphQLInt, description=""), }, description="Maverick shutdown interface", ) self.q = { self.name: GraphQLField( self.shutdown_command_type, resolve=self.get_shutdown_command_status ) } self.m = { self.name: GraphQLField( self.shutdown_command_type, args=self.get_mutation_args(self.shutdown_command_type), resolve=self.run_shutdown_command, ) } self.s = { self.name: GraphQLField( self.shutdown_command_type, subscribe=self.sub_shutdown_command_status, resolve=None, ) } async def run_shutdown_command(self, root, info, **kwargs): application_log.debug(f"run_shutdown_command {kwargs}") cmd = "sudo shutdown -a now" if self.shutdown_proc: # already running? if self.shutdown_proc.complete: self.shutdown_proc = None if not self.shutdown_proc: # try to run the command self.shutdown_proc = ProcessRunner( cmd, started_callback=self.process_callback, output_callback=self.process_callback, complete_callback=self.process_callback, ) self.shutdown_proc.start() return self.shutdown_command def process_callback(self, *args, **kwargs): self.shutdown_command["running"] = self.shutdown_proc.running self.shutdown_command["uptime"] = self.shutdown_proc.uptime self.shutdown_command["stdout"] = self.shutdown_proc.stdout self.shutdown_command["stderror"] = self.shutdown_proc.stderror self.shutdown_command["returncode"] = self.shutdown_proc.returncode self.subscriptions.emit( self.subscription_string + self.name, {self.name: self.shutdown_command}, ) return self.shutdown_command def sub_shutdown_command_status(self, root, info): return SimplePubSubIterator( self.subscriptions, self.subscription_string + self.name, ) def get_shutdown_command_status(self, root, info): return self.shutdown_command
3,686
darktorch/utils/data/datasets.py
hpennington/darktorch
2
2171608
import os import sys import pdb import numpy as np import cv2 # import matplotlib # matplotlib.use('Agg') # import matplotlib.pyplot as plt # import visdom from torchvision import transforms from torch.utils.data import Dataset import darktorch class ListDataset(Dataset): def __init__(self, filepath, fp_transform_fn, transform=None, target_transform=None, shuffle_labels=True): self.fp_transform_fn = fp_transform_fn self.transform = transform self.target_transform = target_transform self.shuffle_labels = shuffle_labels with open(filepath, 'r') as f: self.image_paths = f.read().splitlines() def __len__(self): return len(self.image_paths) def __getitem__(self, index): img_path = self.image_paths[index] img = cv2.imread(img_path) label_path = self.fp_transform_fn(img_path) label = np.loadtxt(label_path).reshape((-1, 5)) if self.shuffle_labels == True: np.random.shuffle(label) if self.transform is not None: img = self.transform(img) #darktorch.utils.write_tensor(img, 'sized-{}.bin'.format(index), True, append=False) #plt.imshow(img.numpy().transpose([1, 2, 0])) #self.vis.matplot(plt) # pdb.set_trace() if self.target_transform is not None: label = self.target_transform(label) return img, label
1,507
__main__.py
GRAYgoose124/mushishi
2
2171236
# Mushishi: A smart discord bot using the discord.py[rewrite] API. # Copyright (C) 2018 <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. import os from mushishi import Mushishi def main(): dir_path = os.path.dirname(os.path.realpath(__file__)) config_path = os.path.join(dir_path, 'config.json') bot = Mushishi(config_path) try: bot.run() except KeyboardInterrupt: print("Interrupted by user. Data may not be saved.") return print("---Shutdown complete---\nGoodbye.") if __name__ == '__main__': main()
1,177
app/domain/model/blacklist_token.py
minhtuan221/architecture-collection
3
2169329
import datetime from sqlalchemy import Column, String, Integer, DateTime from app.domain.model import Base class BlacklistToken(Base): """ Token Model for storing JWT tokens """ __tablename__ = 'blacklist_tokens' id: int = Column(Integer, primary_key=True, autoincrement=True) token: str = Column(String(500), unique=True, nullable=False) blacklisted_on: datetime.datetime = Column(DateTime, nullable=False) def __repr__(self): return '<id: token: {}'.format(self.token)
518
testsuite/string-reg/run.py
LongerVision/OpenShadingLanguage
1,105
2171057
#!/usr/bin/env python # Copyright Contributors to the Open Shading Language project. # SPDX-License-Identifier: BSD-3-Clause # https://github.com/AcademySoftwareFoundation/OpenShadingLanguage #osl_concat command += testshade("-t 1 -g 64 64 str_concat -od uint8 -o res concat_ref.tif -o res_m concat_m_ref.tif") #osl_stoi command += testshade("-t 1 -g 64 64 str_stoi -od uint8 -o res stoi_ref.tif -o res_m stoi_m_ref.tif") #osl_endswith command += testshade("-t 1 -g 64 64 str_endswith -od uint8 -o res_t endswith_t_ref.tif -o res_f endswith_f_ref.tif" " -o res_t_m endswith_t_m_ref.tif -o res_f_m endswith_f_m_ref.tif") #osl_getchar command += testshade("-t 1 -g 64 64 str_getchar -od uint8 str_getchar -o res_t1 getchar_t1_ref.tif -o res_t2 getchar_t2_ref.tif" " -o res_f1 getchar_f1_ref.tif -o res_f2 getchar_f2_ref.tif" " -o res_t1_m getchar_t1_m_ref.tif -o res_t2_m getchar_t2_m_ref.tif" " -o res_f1_m getchar_f1_m_ref.tif -o res_f2_m getchar_f2_m_ref.tif") #osl_hash command += testshade("-t 1 -g 64 64 str_hash -od uint8 -o res hash_ref.tif -o res_m hash_m_ref.tif") #osl_startswith command += testshade("-t 1 -g 64 64 str_startswith -od uint8 -o res_t startswith_t_ref.tif -o res_f startswith_f_ref.tif" " -o res_t_m startswith_t_m_ref.tif -o res_f_m startswith_f_m_ref.tif") #osl_stof command += testshade("-t 1 -g 64 64 str_stof -od uint8 -o res stof_ref.tif -o res_m stof_m_ref.tif") #osl_strlen command += testshade("-t 1 -g 64 64 str_strlen -od uint8 -o res strlen_ref.tif -o res_m strlen_m_ref.tif") #osl_substr command += testshade("-t 1 -g 64 64 str_substr -od uint8 -o res sub_ref.tif -o res1 sub1_ref.tif -o res2 sub2_ref.tif" " -o res_m sub_m_ref.tif -o res1_m sub1_m_ref.tif -o res2_m sub2_m_ref.tif") outputs = [ "concat_ref.tif", "concat_m_ref.tif", "stoi_ref.tif", "stoi_m_ref.tif", "endswith_t_ref.tif", "endswith_f_ref.tif", "endswith_t_m_ref.tif", "endswith_f_m_ref.tif", "getchar_t1_ref.tif", "getchar_t2_ref.tif", "getchar_f1_ref.tif", "getchar_f2_ref.tif", "getchar_t1_m_ref.tif", "getchar_t2_m_ref.tif", "getchar_f1_m_ref.tif", "getchar_f2_m_ref.tif", "hash_ref.tif", "hash_m_ref.tif", "startswith_t_ref.tif", "startswith_f_ref.tif", "startswith_t_m_ref.tif", "startswith_f_m_ref.tif", "stof_ref.tif", "stof_m_ref.tif", "strlen_ref.tif", "strlen_m_ref.tif", "sub_ref.tif", "sub1_ref.tif", "sub2_ref.tif", "sub_m_ref.tif", "sub1_m_ref.tif", "sub2_m_ref.tif", ] # expect a few LSB failures failthresh = 0.008 failpercent = 3
2,776
dae/dae/utils/sql_utils.py
iossifovlab/gpf
0
2170149
from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import Executable, ClauseElement class CreateView(Executable, ClauseElement): def __init__(self, name, select): self.name = name self.select = select @compiles(CreateView) def visit_create_view(element, compiler, **kw): create = "CREATE VIEW {name} AS {sql}".format( name=element.name, sql=compiler.process(element.select, literal_binds=True) ) return create
487
dbms/TravellingMamas/migrations/0001_initial.py
compiletimeterror/Traveling_Mamas
0
2171428
# Generated by Django 2.2.2 on 2019-10-23 15:03 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='airline', fields=[ ('airline_id', models.AutoField(primary_key=True, serialize=False)), ('name', models.CharField(max_length=100)), ('head_quarter_city', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='contact', fields=[ ('user_id', models.AutoField(primary_key=True, serialize=False)), ('contact', models.IntegerField()), ], ), migrations.CreateModel( name='flight', fields=[ ('flight_number', models.AutoField(primary_key=True, serialize=False)), ('origin', models.CharField(max_length=100)), ('place', models.CharField(max_length=100)), ('destination', models.CharField(max_length=100)), ('arrival_time', models.TimeField(blank=True)), ('departure_time', models.TimeField(blank=True)), ], ), migrations.CreateModel( name='hotel', fields=[ ('hotel_id', models.AutoField(primary_key=True, serialize=False)), ('hotel_name', models.CharField(max_length=100)), ('description', models.CharField(max_length=100)), ('location', models.CharField(max_length=100)), ('reviews', models.IntegerField()), ], ), migrations.CreateModel( name='hotel_class', fields=[ ('flight_number', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='rooms', fields=[ ('room_id', models.AutoField(primary_key=True, serialize=False)), ('accomadation', models.IntegerField()), ], ), migrations.CreateModel( name='tour', fields=[ ('tour_id', models.AutoField(primary_key=True, serialize=False)), ('tour_name', models.CharField(max_length=100)), ('place', models.CharField(max_length=100)), ('location', models.CharField(max_length=100)), ('description', models.CharField(max_length=100)), ('fare', models.IntegerField()), ('reviews', models.IntegerField()), ], ), migrations.CreateModel( name='user', fields=[ ('user_id', models.AutoField(primary_key=True, serialize=False)), ('email', models.CharField(max_length=100)), ('first_name', models.CharField(max_length=100)), ('middle_name', models.CharField(max_length=100)), ('last_name', models.CharField(max_length=100)), ('password', models.CharField(max_length=100)), ('dob', models.DateField(blank=True)), ], options={ 'unique_together': {('user_id', 'email')}, }, ), migrations.CreateModel( name='room_type', fields=[ ('room_id', models.AutoField(primary_key=True, serialize=False)), ('room_number', models.IntegerField()), ('price', models.IntegerField()), ], options={ 'unique_together': {('room_number', 'room_id')}, }, ), migrations.CreateModel( name='has_room', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('room_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.rooms')), ('room_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.room_type')), ], options={ 'unique_together': {('room_id', 'room_number')}, }, ), migrations.CreateModel( name='has_flight', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('flight_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.flight')), ('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.hotel_class')), ], options={ 'unique_together': {('flight_number', 'type')}, }, ), migrations.CreateModel( name='consists', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('hotel_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.hotel')), ('room_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.room_type')), ], options={ 'unique_together': {('hotel_id', 'room_id')}, }, ), migrations.CreateModel( name='book', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('flight_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.flight')), ('hotel_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.hotel')), ('tour_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.tour')), ('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.user')), ], options={ 'unique_together': {('user_id', 'flight_number', 'hotel_id', 'tour_id')}, }, ), migrations.CreateModel( name='belong_to', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('airline_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.airline')), ('flight_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravellingMamas.flight')), ], options={ 'unique_together': {('airline_id', 'flight_number')}, }, ), ]
7,004
burner_flame.py
cattech-lab/cantera_examples
4
2171157
""" Burner-stabilized flat flame of the premixed hydrogen-oxygen """ import cantera as ct # Simulation parameters p = 0.05 * ct.one_atm # pressure [Pa] Tin = 373.0 # temperature [K] mdot = 0.06 # mass flux [kg/s/m^2] comp = 'H2:1.5, O2:1, AR:7' # premixed gas composition width = 0.5 # region width [m] # IdealGasMix object used to compute mixture properties gas = ct.Solution('h2o2.yaml') gas.TPX = Tin, p, comp # Set up flame object f = ct.BurnerFlame(gas, width=width) f.burner.mdot = mdot f.set_refine_criteria(ratio=3.0, slope=0.05, curve=0.1) f.show_solution() # Solve f.transport_model = 'Multi' f.solve(loglevel=1, auto=True) f.show_solution() # write the velocity, temperature, density, and mole fractions to a CSV file f.write_csv('burner_flame.csv', quiet=False)
782
src/app/main.py
starlite-api/backend-starlite-postgres
3
2171553
import sentry_sdk from sentry_sdk.integrations.asgi import SentryAsgiMiddleware from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR from starlite import Starlite from starlite.plugins.sql_alchemy import SQLAlchemyPlugin from app import api, cache, db, exceptions, health, openapi from app.config import app_settings, sentry_settings from app.logging import log_config sentry_sdk.init( dsn=sentry_settings.DSN, environment=app_settings.ENVIRONMENT, integrations=[SqlalchemyIntegration()], traces_sample_rate=sentry_settings.TRACES_SAMPLE_RATE, ) app = Starlite( after_request=db.session_after_request, cache_config=cache.config, debug=app_settings.DEBUG, exception_handlers={ HTTP_500_INTERNAL_SERVER_ERROR: exceptions.logging_exception_handler }, middleware=[SentryAsgiMiddleware], on_shutdown=[db.on_shutdown, cache.on_shutdown], on_startup=[log_config.configure], openapi_config=openapi.config, plugins=[SQLAlchemyPlugin()], route_handlers=[health.check, api.v1_router], )
1,123
sickbeard/lib/guessit/matcher.py
Branlala/docker-sickbeardfr
0
2170772
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 <NAME> <<EMAIL>> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import PY3, u, base_text_type from guessit.matchtree import MatchTree from guessit.textutils import normalize_unicode import logging log = logging.getLogger(__name__) class IterativeMatcher(object): def __init__(self, filename, filetype='autodetect', opts=None): """An iterative matcher tries to match different patterns that appear in the filename. The 'filetype' argument indicates which type of file you want to match. If it is 'autodetect', the matcher will try to see whether it can guess that the file corresponds to an episode, or otherwise will assume it is a movie. The recognized 'filetype' values are: [ autodetect, subtitle, movie, moviesubtitle, episode, episodesubtitle ] The IterativeMatcher works mainly in 2 steps: First, it splits the filename into a match_tree, which is a tree of groups which have a semantic meaning, such as episode number, movie title, etc... The match_tree created looks like the following: 0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111 0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000 0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000 __________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___ xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc [XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv The first 3 lines indicates the group index in which a char in the filename is located. So for instance, x264 is the group (0, 4, 1), and it corresponds to a video codec, denoted by the letter'v' in the 4th line. (for more info, see guess.matchtree.to_string) Second, it tries to merge all this information into a single object containing all the found properties, and does some (basic) conflict resolution when they arise. """ valid_filetypes = ('autodetect', 'subtitle', 'video', 'movie', 'moviesubtitle', 'episode', 'episodesubtitle') if filetype not in valid_filetypes: raise ValueError("filetype needs to be one of %s" % valid_filetypes) if not PY3 and not isinstance(filename, unicode): log.warning('Given filename to matcher is not unicode...') filename = filename.decode('utf-8') filename = normalize_unicode(filename) if opts is None: opts = [] elif isinstance(opts, base_text_type): opts = opts.split() self.match_tree = MatchTree(filename) mtree = self.match_tree mtree.guess.set('type', filetype, confidence=1.0) def apply_transfo(transfo_name, *args, **kwargs): transfo = __import__('guessit.transfo.' + transfo_name, globals=globals(), locals=locals(), fromlist=['process'], level=0) transfo.process(mtree, *args, **kwargs) # 1- first split our path into dirs + basename + ext apply_transfo('split_path_components') # 2- guess the file type now (will be useful later) apply_transfo('guess_filetype', filetype) if mtree.guess['type'] == 'unknown': return # 3- split each of those into explicit groups (separated by parentheses # or square brackets) apply_transfo('split_explicit_groups') # 4- try to match information for specific patterns # NOTE: order needs to comply to the following: # - website before language (eg: tvu.org.ru vs russian) # - language before episodes_rexps # - properties before language (eg: he-aac vs hebrew) # - release_group before properties (eg: XviD-?? vs xvid) if mtree.guess['type'] in ('episode', 'episodesubtitle'): strategy = [ 'guess_date', 'guess_website', 'guess_release_group', 'guess_properties', 'guess_language', 'guess_video_rexps', 'guess_episodes_rexps', 'guess_weak_episodes_rexps' ] else: strategy = [ 'guess_date', 'guess_website', 'guess_release_group', 'guess_properties', 'guess_language', 'guess_video_rexps' ] if 'nolanguage' in opts: strategy.remove('guess_language') for name in strategy: apply_transfo(name) # more guessers for both movies and episodes for name in ['guess_bonus_features', 'guess_year']: apply_transfo(name) if 'nocountry' not in opts: apply_transfo('guess_country') # split into '-' separated subgroups (with required separator chars # around the dash) apply_transfo('split_on_dash') # 5- try to identify the remaining unknown groups by looking at their # position relative to other known elements if mtree.guess['type'] in ('episode', 'episodesubtitle'): apply_transfo('guess_episode_info_from_position') else: apply_transfo('guess_movie_title_from_position') # 6- perform some post-processing steps apply_transfo('post_process') log.debug('Found match tree:\n%s' % u(mtree)) def matched(self): return self.match_tree.matched()
6,481
entertainment_center.py
1UnboundedSentience/MovieTrailerApp
0
2170875
import media import fresh_tomatoes shawshank = media.Movie('Shawshank Redemption', 'http://t0.gstatic.com/images?q=tbn:ANd9GcSkmMH-bEDUS2TmK8amBqgIMgrfzN1_mImChPuMrunA1XjNTSKm', 'https://www.youtube.com/watch?v=6hB3S9bIaco') titanic = media.Movie('Titantic', 'https://titanicsound.files.wordpress.com/2014/11/titanic_movie-hd-1.jpg', 'https://www.youtube.com/watch?v=thrdkT9vE3k') star_wars_8 = media.Movie('Star Wars 8', 'http://static.srcdn.com/wp-content/uploads/2017/01/star-wars-8-last-jedi.jpg','https://www.youtube.com/watch?v=Yw_rdbY2I1c' ) print shawshank movies_list = [shawshank, titanic, star_wars_8] fresh_tomatoes.open_movies_page( movies_list )
666
01-algorithm-design-and-techniques/2_algorithmic_warmup/fibonacci_huge.py
hamidgasmi/training.computerscience.algorithms-datastructures
8
2171540
# Problem Introduction: # In this problem, your goal is to compute F n modulo m, where n may be really huge: up to 10 18 . For such # values of n, an algorithm looping for n iterations will not fit into one second for sure. Therefore we need to # avoid such a loop. import sys from fibonacci_last_digit import get_fibonacci_last_digit #O(m)? def get_fibonacci_huge(n, m): if n <= 1: return n previous = 0 current = 1 period = [] period.append(previous) period.append(current) tmp = [] for _ in range(n - 1): previous, current = current, (previous + current) % m if len(period) > 0 and period[len(tmp)] == current: tmp.append(current) if len(period) == len(tmp): break else: if len(tmp) > 0: period.extend(tmp) #O(m)? tmp.clear() period.append(current) return period[n % len(period)] if __name__ == '__main__': input = sys.stdin.read() n, m = map(int, input.split()) for i in range(n): assert(get_fibonacci_last_digit(i, m) == get_fibonacci_huge(i, m)) print(get_fibonacci_huge(n, m))
1,156
src/control.py
shrick/shricktris
0
2171230
# control.py import pygame MIN_FPS = 1 MAX_FPS = 24 class Control: def __init__(self, fps): self._clock = pygame.time.Clock() self._to_quit = False self._keystates = {} self._fps = 0 self.adjust_fps(fps) def process_events(self): self._clock.tick(self._fps) for event in pygame.event.get(): if event.type == pygame.QUIT: self._to_quit = True elif event.type == pygame.KEYDOWN: self._keystates[event.key] = True elif event.type == pygame.KEYUP: self._keystates[event.key] = False def adjust_fps(self, dfps): self._fps = max(min(self._fps + dfps, MAX_FPS), MIN_FPS) print("[DEBUG] fps = " + str(self._fps)) def _is_pressed(self, keys): return all(k in self._keystates and self._keystates[k] for k in keys) def quit(self): return self._to_quit or self._is_pressed([pygame.K_q]) def pause(self): return self._is_pressed([pygame.K_PAUSE]) def step_left(self): return self._is_pressed([pygame.K_LEFT]) def step_right(self): return self._is_pressed([pygame.K_RIGHT]) def step_down(self): return self._is_pressed([pygame.K_DOWN]) def fall_down(self): return self._is_pressed([pygame.K_SPACE]) def rotate(self): return self._is_pressed([pygame.K_UP]) def speed_up(self): return self._is_pressed([pygame.K_PLUS]) def speed_down(self): return self._is_pressed([pygame.K_MINUS])
1,609
ssd_data/_utils.py
star-baba/res50_sa_ssd
1
2170797
import os, fnmatch import numpy as np def _get_recurrsive_paths(basedir, ext): """ :param basedir: :param ext: :return: list of path of files including basedir and ext(extension) """ matches = [] for root, dirnames, filenames in os.walk(basedir): for filename in fnmatch.filter(filenames, '*.{}'.format(ext)): matches.append(os.path.join(root, filename)) return sorted(matches) def _get_xml_et_value(xml_et, key, rettype=str): """ :param xml_et: Elementtree's element :param key: :param rettype: class, force to convert it from str :return: rettype's value Note that if there is no keys in xml object, return None """ elm = xml_et.find(key) if elm is None: return elm if isinstance(rettype, str): return elm.text else: return rettype(elm.text) def _one_hot_encode(indices, class_num): """ :param indices: list of index :param class_num: :return: ndarray, relu_one-hot vectors """ size = len(indices) one_hot = np.zeros((size, class_num)) one_hot[np.arange(size), indices] = 1 return one_hot def _separate_ignore(target_transform): """ Separate Ignore by target_transform :param target_transform: :return: ignore, target_transform """ if target_transform: from .target_transforms import Ignore, Compose if isinstance(target_transform, Ignore): return target_transform, None if not isinstance(target_transform, Compose): return None, target_transform # search existing target_transforms.Ignore in target_transform new_target_transform = [] ignore = None for t in target_transform.target_transforms: if isinstance(t, Ignore): ignore = t else: new_target_transform += [t] return ignore, Compose(new_target_transform) else: return None, target_transform def _contain_ignore(target_transform): if target_transform: from .target_transforms import Ignore, Compose if isinstance(target_transform, Ignore): raise ValueError('target_transforms.Ignore must be passed to \'ignore\' argument') if isinstance(target_transform, Compose): for t in target_transform.target_transforms: if isinstance(t, Ignore): raise ValueError('target_transforms.Ignore must be passed to \'ignore\' argument') return target_transform def _check_ins(name, val, cls, allow_none=False): if allow_none and val is None: return val if not isinstance(val, cls): raise ValueError('Argument \'{}\' must be {}, but got {}'.format(name, cls.__name__, type(val).__name__)) return val DATA_ROOT = os.path.join(os.path.expanduser('~'), 'data')
2,866
src/config/svc-monitor/svc_monitor/services/loadbalancer/drivers/f5/db.py
srajag/contrail-controller
0
2171552
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # """ F5 DB to store Pool and associated objects """ import pycassa from pycassa.system_manager import * import json import time class F5LBDB(object): _KEYSPACE = 'f5_lb_keyspace' _F5_LB_CF = 'pool_table' def __init__(self, args): self._args = args if args.cluster_id: self._keyspace = '%s_%s' % (args.cluster_id, F5LBDB._KEYSPACE) else: self._keyspace = F5LBDB._KEYSPACE def init_database(self): self._cassandra_init() def pool_get(self, pool_id): json_str = self._db_get(self._f5_lb_cf, pool_id) if json_str: return json.loads(json_str['info']) else: return None def pool_insert(self, pool_id, pool_obj): entry = json.dumps(pool_obj) return self._db_insert(self._f5_lb_cf, pool_id, {'info': entry}) def pool_remove(self, pool_id, columns=None): return self._db_remove(self._f5_lb_cf, pool_id, columns) def pool_list(self): ret_list = [] for each_entry in self._db_list(self._f5_lb_cf) or []: obj_dict = json.loads(each_entry['info']) ret_list.append(obj_dict) return ret_list # db CRUD def _db_get(self, table, key): try: entry = table.get(key) except Exception as e: return None return entry def _db_insert(self, table, key, entry): try: table.insert(key, entry) except Exception as e: return False return True def _db_remove(self, table, key, columns=None): try: if columns: table.remove(key, columns=columns) else: table.remove(key) except Exception as e: return False return True def _db_list(self, table): try: entries = list(table.get_range()) except Exception as e: return None return entries # initialize cassandra def _cassandra_init(self): server_idx = 0 num_dbnodes = len(self._args.cassandra_server_list) connected = False while not connected: try: cass_server = self._args.cassandra_server_list[server_idx] sys_mgr = SystemManager(cass_server) connected = True except Exception as e: server_idx = (server_idx + 1) % num_dbnodes time.sleep(3) if self._args.reset_config: try: sys_mgr.drop_keyspace(self._keyspace) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) try: sys_mgr.create_keyspace(self._keyspace, SIMPLE_STRATEGY, {'replication_factor': str(num_dbnodes)}) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) # set up column families column_families = [self._F5_LB_CF] for cf in column_families: try: sys_mgr.create_column_family(self._keyspace, cf) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) conn_pool = pycassa.ConnectionPool(self._keyspace, self._args.cassandra_server_list, max_overflow=10, use_threadlocal=True, prefill=True, pool_size=10, pool_timeout=30, max_retries=-1, timeout=0.5) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM self._f5_lb_cf = pycassa.ColumnFamily(conn_pool, self._F5_LB_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency)
4,321
core/models.py
thewolfcommander/IDCardGen
2
2171587
from django.db import models from django.contrib.auth.models import PermissionsMixin, AbstractBaseUser, BaseUserManager from django.conf import settings UPLOAD_DIRECTORY_PROFILEPHOTO = 'images_profilephoto' UPLOAD_DIRECTORY_ID_CARD_PHOTO = 'images_idcardphoto' class CustomUserManager(BaseUserManager): """ Custom user manager to handle all the operations for the Custom User model """ def create_user(self, user_id, mobile_number, email, password, **extra_fields): user = self.model(user_id=user_id, mobile_number=mobile_number, email=email, *extra_fields) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, user_id, mobile_number, email, password, **extra_fields): user = self.create_user(user_id, mobile_number, email, password, **extra_fields) user.is_admin=True user.is_superuser = True user.save(using=self._db) return user def get_by_natural_key(self, username): return self.get(user_id=username) class User(AbstractBaseUser, PermissionsMixin): """ User that is capable of using the Information System """ GENDER = [ ('MALE', "MALE"), ('FEMALE', "FEMALE"), ('TRANSGENDER', "TRANSGENDER"), ('PREFER_NOT_TO_SAY', "PREFER_NOT_TO_SAY") ] upload_directory = 'user_images' user_id = models.CharField(max_length=24, null=True, blank=True, unique=True, help_text="User's unique user id that is always used to login.") full_name = models.CharField(max_length=255, null=True, blank=True, help_text="User's full name") gender = models.CharField(max_length=255, choices=GENDER, null=True, blank=True, help_text="User's Gender") email = models.EmailField(max_length=255, blank=True, null=True, default='', help_text="User's Email") mobile_number = models.CharField(max_length=10, blank=True, null=True, help_text="User's Mobile number") profile_photo = models.ImageField(max_length=255, blank=True, null=True, upload_to=UPLOAD_DIRECTORY_PROFILEPHOTO, help_text="User's Profile photo") is_admin = models.BooleanField(default=False) is_executive = models.BooleanField(default=False) is_photographer = models.BooleanField(default=False) is_student = models.BooleanField(default=True) objects = CustomUserManager() USERNAME_FIELD = 'user_id' REQUIRED_FIELDS = ['mobile_number', 'email',] UNIQUE_TOGETHER = ['user_id', 'email'] def __str__(self): return '%s - %s'%(self.id, self.full_name) @property def is_staff(self): "Is the user a member of staff?" # Simplest possible answer: All admins are staff return self.is_admin class StudentInformation(models.Model): """ Student Information being uploaded by the Executive """ user = models.ForeignKey(User, on_delete=models.DO_NOTHING, help_text="User Relationship with this model.") full_name = models.CharField(max_length=255, null=True, blank=True, help_text="Student's full name") father_name = models.CharField(max_length=255, null=True, blank=True, help_text="Father's name of student.") mother_name = models.CharField(max_length=255, null=True, blank=True, help_text="Mother's name of student.") studying_class = models.CharField(max_length=10, null=True, blank=True, help_text="Class in which student is currently studying.") section = models.CharField(max_length=10, null=True, blank=True, help_text="Section of class in which student is currently studying.") id_card_photo = models.ImageField(max_length=255, upload_to=UPLOAD_DIRECTORY_ID_CARD_PHOTO, null=True, blank=True, help_text="Student's ID card photo that has been uploaded by Photographer.") school_name = models.CharField(max_length=255, null=True, blank=True, help_text="Student's school name.") school_address_full = models.CharField(max_length=524, null=True, blank=True, help_text="Student's School complete address.") mobile_number = models.CharField(max_length=20, null=True, blank=True, help_text="Student's mobile number.") email = models.CharField(max_length=255, null=True, blank=True, help_text="Student's email id.") class Meta: unique_together = ['user', 'full_name', 'father_name', 'email'] class SchoolInformation(models.Model): """ School Information Complete """ school_code = models.CharField(max_length=255, null=True,blank=True, help_text="School Code by which it is unique") school_name = models.CharField(max_length=255, null=True,blank=True, help_text="School Name full") school_address_line1 = models.CharField(max_length=255, null=True,blank=True, help_text="School address line 1") school_address_line2 = models.CharField(max_length=255, null=True,blank=True, help_text="School Address line 2") school_city = models.CharField(max_length=255, null=True,blank=True, help_text="School City in which it is located.") school_state = models.CharField(max_length=255, null=True,blank=True, help_text="School state in which it is located.") zipcode = models.CharField(max_length=255, null=True,blank=True, help_text="School Area's pincode.") school_principal_name = models.CharField(max_length=255, null=True,blank=True, help_text="School's prinicipal name.") def __str__(self): return "%s - %s, %s, %s, %s, %s"%(self.school_code, self.school_name, self.school_address_line1, self.school_address_line2, self.school_city, self.school_state) class Feedback(models.Model): email = models.CharField(max_length=255, null=True, blank=True, help_text="Email of the person who is giving feedback on the system.") name = models.CharField(max_length=255, null=True, blank=True, help_text="Name of the person who is giving feedback on the system.") message = models.CharField(max_length=1055, null=True, blank=True, help_text="Message of the person who is giving feedback on the system.") class VerificationCard(models.Model): user = models.ForeignKey(User, on_delete=models.DO_NOTHING, help_text="User model Relationship") student_information = models.ForeignKey(StudentInformation, on_delete=models.DO_NOTHING, help_text="Student Information model Relationship") is_verified_by_student = models.BooleanField(default=False)
6,307
PLC/Methods/SliceListNames.py
dreibh/planetlab-lxc-plcapi
0
2171130
from PLC.Method import Method from PLC.Parameter import Parameter, Mixed from PLC.Filter import Filter from PLC.Auth import Auth from PLC.Slices import Slice, Slices from PLC.Methods.GetSlices import GetSlices class SliceListNames(GetSlices): """ Deprecated. Can be implemented with GetSlices. List the names of registered slices. Users may only query slices of which they are members. PIs may query any of the slices at their sites. Admins may query any slice. If a slice that cannot be queried is specified in slice_filter, details about that slice will not be returned. """ status = "deprecated" roles = ['admin', 'pi', 'user'] accepts = [ Auth(), Parameter(str, "Slice prefix", nullok = True) ] returns = [Slice.fields['name']] def call(self, auth, prefix=None): slice_filter = None if prefix: slice_filter = {'name': prefix+'*'} slices = GetSlices.call(self, auth, slice_filter) if not slices: raise PLCInvalidArgument("No such slice") slice_names = [slice['name'] for slice in slices] return slice_names
1,172
PythonExercicios/ex097.py
Lucas-ns/Python-3-Curso-Em-Video
0
2171603
def escreva(msg): print('~' * (len(msg) + 4)) print(f' {msg}') print('~' * (len(msg) + 4)) escreva('Olá, Mundo!') escreva('<NAME>') escreva('CeV') escreva('Curso em Vídeo de Python 3')
200
backend/src/publisher/aggregation/user/models.py
rutvikpadhiyar000/github-trends
157
2171609
from typing import List, Optional from pydantic import BaseModel class LanguageStats(BaseModel): lang: str loc: int percent: float color: Optional[str] class RepoLanguage(BaseModel): lang: str color: Optional[str] loc: int class RepoStats(BaseModel): repo: str private: bool langs: List[RepoLanguage] loc: int
361
Exercicios/Ex_054.py
jotmar/PythonEx
0
2170667
import datetime ano = int((str(datetime.datetime.now()))[:4]) maior = 0 menor = 0 nas = 0 idade = 0 for c in range(1, 8): nas = int(input('Em que ano você nasceu? ')) if (ano - nas) >= 18: maior += 1 else: menor += 1 print(f'Existem {menor} menores de idade e {maior} maiores de idade')
314
src/utils/draw.py
hoel-bagard/MNIST-TensorFlow
0
2170546
import cv2 import numpy as np def draw_pred(imgs: np.ndarray, predictions: np.ndarray, labels: np.ndarray): """ Draw predictions and labels on the image to help with TensorBoard visualisation. Args: imgs: Raw images. predictions: Predictions of the network, after softmax but before taking argmax labels: Labels corresponding to the images Returns: images with information written on them """ new_imgs = [] for img, preds, label in zip(imgs, predictions, labels): img = np.asarray(img * 255.0, dtype=np.uint8) img = cv2.resize(img, (480, 480), interpolation=cv2.INTER_AREA) preds = str([round(float(conf), 2) for conf in preds]) + f" ==> {np.argmax(preds)}" img = cv2.putText(img, preds, (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1, cv2.LINE_AA) new_imgs.append(cv2.putText(img, f"Label: {label}", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 0, 0), 1, cv2.LINE_AA)) return np.asarray(new_imgs)
1,044
ukz/interface/gradientresolver.py
clauderichard/Ultrakazoid
0
2168240
# Like note resolver, but for gradients. # TODO: use this properly from song.py class GradientResolver: def __init__(self): self.gradients = [] def addGradients(self,gradients): self.gradients.extend(gradients) def resolve(self): latestGradients = {} gradients = self.gradients for cur in gradients: key = (cur.typ,cur.c) prev = latestGradients.get(key,None) if prev is None: # new typ latestGradients[key] = cur continue if prev.t+prev.d < cur.t: # gap found, update hm latestGradients[key] = cur else: # TODO: truncate prev, delete stuff before next.t prev.tcut = cur.t #raise Exception("Gradient truncation not implemented!") if cur.t==prev.t: prev.bend = None latestGradients[key] = cur else: latestGradients[key] = cur self.gradients = list(filter(lambda prev: \ prev.bend is not None, gradients)) self.gradients.sort()
1,233
agents/python/directoryHarvester.py
matt-handy/TheAllCommander
0
2171060
import os from threading import Thread import socket class DirectoryHarvester(Thread): def __init__(self, dirname, daemon, target_host, target_port): Thread.__init__(self) self.dirname = dirname self.stayAlive = True self.isHarvestComplete = False; self.daemon = daemon self.target_host = target_host self.target_port = target_port def run(self): self.walkDir(self.dirname) def kill(self): self.stayAlive = False def isComplete(self): return self.isHarvestComplete def walkDir(self, dirname): #TODO: Add recovery remoteSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) remoteSocket.connect((self.target_host, self.target_port)) hostname = socket.gethostname() hostnameLen = len(hostname) #print("Hostname: " + hostname + " " + str(hostnameLen)) hostnameLenBytes = hostnameLen.to_bytes(4, 'big') remoteSocket.send(hostnameLenBytes) remoteSocket.send(hostname.encode('ascii')) for root, dirs, files in os.walk(dirname): for file in files: if self.stayAlive: absFilename = os.path.abspath(root + "//" + file) #print("Processing file: " + absFilename) absFilenameLen = len(absFilename) absFilenameBytes = absFilenameLen.to_bytes(4, 'big') remoteSocket.send(absFilenameBytes) remoteSocket.send(absFilename.encode('ascii')) fileSize = os.path.getsize(absFilename) fileSizeBytes = fileSize.to_bytes(8, 'big') remoteSocket.send(fileSizeBytes) with open(absFilename, 'rb') as f: while True and self.stayAlive: buf = f.read(1024) if buf: remoteSocket.send(buf)#Note, I'm assuming that read(int) will return a partial if EOF is hit #Python docs are stupid and don't list edge case behavior. else: break endMsg = "End of transmission" endMsgLen = len(endMsg) endMsgLenBytes = endMsgLen.to_bytes(4, 'big') remoteSocket.send(endMsgLenBytes) remoteSocket.send(endMsg.encode('ascii')) self.isHarvestComplete = True self.daemon.postResponse("Harvest complete: " + dirname)
2,098
koku/api/migrations/0030_auto_20201007_1403.py
cgoodfred/koku
2
2171421
# Generated by Django 3.1.2 on 2020-10-07 14:03 from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [("api", "0029_auto_20200921_2016")] operations = [ migrations.AlterField(model_name="user", name="is_active", field=models.BooleanField(default=True, null=True)) ]
353
codecademy_scripts/reversed_pair.py
Faraaz54/python_training_problems
0
2171577
def check_reversed(word, let, length): for i in range(0, len(word_list) - 1): buff = word_list[i] if buff[0] == let: if len(buff) == len(word): if buff == word: return True fin = open('words.txt', 'r') word_list = [] while True: line = fin.readline() word = line.strip() word_list.append(word) if not line: break match_list = [] for i in range(0, len(word_list) - 1): buff = word_list[i] rev = buff[::-1] le = rev[0] l = len(buff) if check_reversed(rev, le, l): print buff, rev #match_list.append((buff, rev)) print match_list fin.close()
764
train/custom_train_3.py
bjw806/Crypto-Deep-Learning-test1
0
2171267
from keras import models, layers from keras import Input from keras.models import Model, load_model from keras.preprocessing.image import ImageDataGenerator from keras import optimizers, initializers, regularizers, metrics from keras.callbacks import ModelCheckpoint, EarlyStopping from keras.layers import BatchNormalization, Conv2D, Activation, Dense, GlobalAveragePooling2D, MaxPooling2D, \ ZeroPadding2D, Add from keras.optimizer_v2.rmsprop import RMSprop import os import matplotlib.pyplot as plt import numpy as np import math epochs = 100 train_samples = 7836 validation_samples = 2000 batch_size = 16 train_datagen = ImageDataGenerator(rescale=1. / 255) val_datagen = ImageDataGenerator(rescale=1. / 255) train_dir = os.path.join('./data/train/') val_dir = os.path.join('./data/validation/') train_generator = train_datagen.flow_from_directory(train_dir, batch_size=batch_size, target_size=(356, 295), color_mode='rgb') val_generator = val_datagen.flow_from_directory(val_dir, batch_size=batch_size, target_size=(356, 295), color_mode='rgb') # number of classes K = 2 input_tensor = Input(shape=(356, 295, 3), dtype='float32', name='input') def conv1_layer(x): x = ZeroPadding2D(padding=(3, 3))(x) x = Conv2D(64, (7, 7), strides=(2, 2))(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = ZeroPadding2D(padding=(1, 1))(x) return x def conv2_layer(x): x = MaxPooling2D((3, 3), 2)(x) shortcut = x for i in range(3): if (i == 0): x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x) shortcut = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(shortcut) x = BatchNormalization()(x) shortcut = BatchNormalization()(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x else: x = Conv2D(64, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x return x def conv3_layer(x): shortcut = x for i in range(4): if (i == 0): x = Conv2D(128, (1, 1), strides=(2, 2), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x) shortcut = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(shortcut) x = BatchNormalization()(x) shortcut = BatchNormalization()(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x else: x = Conv2D(128, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x return x def conv4_layer(x): shortcut = x for i in range(6): if (i == 0): x = Conv2D(256, (1, 1), strides=(2, 2), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x) shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='valid')(shortcut) x = BatchNormalization()(x) shortcut = BatchNormalization()(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x else: x = Conv2D(256, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(1024, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x return x def conv5_layer(x): shortcut = x for i in range(3): if (i == 0): x = Conv2D(512, (1, 1), strides=(2, 2), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x) shortcut = Conv2D(2048, (1, 1), strides=(2, 2), padding='valid')(shortcut) x = BatchNormalization()(x) shortcut = BatchNormalization()(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x else: x = Conv2D(512, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(2048, (1, 1), strides=(1, 1), padding='valid')(x) x = BatchNormalization()(x) x = Add()([x, shortcut]) x = Activation('relu')(x) shortcut = x return x x = conv1_layer(input_tensor) x = conv2_layer(x) x = conv3_layer(x) x = conv4_layer(x) x = conv5_layer(x) x = GlobalAveragePooling2D()(x) output_tensor = Dense(K, activation='softmax')(x) resnet50 = Model(input_tensor, output_tensor) resnet50.summary() resnet50.compile(optimizer=RMSprop(), loss='categorical_crossentropy', metrics=['acc'])#SGD #다른 곳에서 불러오면 안됨 metric = 'acc' #val_acc target_dir = "./models/weights-improvement/" if not os.path.exists(target_dir): os.mkdir(target_dir) resnet50.save('./models/model.h5') resnet50.save_weights('./models/weights.h5') checkpoint = ModelCheckpoint(filepath=target_dir + 'weights-improvement-{epoch:02d}-{acc:.2f}.hdf5', monitor=metric, verbose=2, save_best_only=True, mode='max') callbacks_list = [checkpoint] resnet50.fit( train_generator, steps_per_epoch=train_samples // batch_size, epochs=epochs, shuffle=True, validation_data=val_generator, callbacks=callbacks_list,#[checkpoint], #callbacks=[tensorboard_callback],#텐서보드 validation_steps=validation_samples // batch_size)
7,958