{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \n \"\"\"\n\n url1 = 'http://example.com/'\n httpretty.register_uri(httpretty.GET, url1, body=page_body)\n\n url2 = 'http://www.example.com/'\n httpretty.register_uri(httpretty.GET, url2, body=page_body)\n\n results = {}\n\n config = Config(urls=[url1, url2])\n page_content_checker = page_content.Checker(config=config, previous_results={})\n results['page_content'] = page_content_checker.run()\n\n checker = duplicate_content.Checker(config=page_content_checker.config,\n previous_results=results)\n result = checker.run()\n urls_after = checker.config.urls\n\n self.assertEqual(result, {\n 'http://example.com/ http://www.example.com/': {\n 'exception': None,\n 'similarity': 1.0\n }\n })\n self.assertEqual(urls_after, ['http://example.com/'])\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"avg_line_length":{"kind":"number","value":30.6315789474,"string":"30.631579"},"max_line_length":{"kind":"number","value":92,"string":"92"},"alphanum_fraction":{"kind":"number","value":0.5486827033,"string":"0.548683"}}},{"rowIdx":47236,"cells":{"hexsha":{"kind":"string","value":"1e5477fff3f0d8e4e575997d9206dddf56821f65"},"size":{"kind":"number","value":189,"string":"189"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py"},"max_stars_repo_name":{"kind":"string","value":"shihab4t/Books-Code"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b637b6b2ad42e11faf87d29047311160fe3b2490"},"max_stars_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py"},"max_issues_repo_name":{"kind":"string","value":"shihab4t/Books-Code"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b637b6b2ad42e11faf87d29047311160fe3b2490"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py"},"max_forks_repo_name":{"kind":"string","value":"shihab4t/Books-Code"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b637b6b2ad42e11faf87d29047311160fe3b2490"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"def add_numbers(numbers):\n result = 0\n for i in numbers:\n result += i\n #print(\"number =\", i)\n return result\n\n\nresult = add_numbers([1, 2, 30, 4, 5, 9])\nprint(result)\n"},"avg_line_length":{"kind":"number","value":17.1818181818,"string":"17.181818"},"max_line_length":{"kind":"number","value":41,"string":"41"},"alphanum_fraction":{"kind":"number","value":0.5661375661,"string":"0.566138"}}},{"rowIdx":47237,"cells":{"hexsha":{"kind":"string","value":"1e8ce7e39fa909fe47e567c835da6345fb42a02c"},"size":{"kind":"number","value":485,"string":"485"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"infrastructure/azwrapper.py"},"max_stars_repo_name":{"kind":"string","value":"lizzyTheLizard/homeserver-azure"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"infrastructure/azwrapper.py"},"max_issues_repo_name":{"kind":"string","value":"lizzyTheLizard/homeserver-azure"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"infrastructure/azwrapper.py"},"max_forks_repo_name":{"kind":"string","value":"lizzyTheLizard/homeserver-azure"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from az.cli import az\nimport sys\n\n# General Wrapper for azure cli calls\ndef azSafe(command):\n exit_code, result_dict, logs = az(command)\n if exit_code != 0:\n print(\"az \" + command + \" failed\")\n print(logs)\n sys.exit(-1)\n return result_dict\n\n# General call to check if a resource already exists\ndef resourceExists(group, name):\n resourceExists = \"resource list -g {} -n {}\"\n list = azSafe(resourceExists.format(group, name))\n return len(list) != 0"},"avg_line_length":{"kind":"number","value":28.5294117647,"string":"28.529412"},"max_line_length":{"kind":"number","value":53,"string":"53"},"alphanum_fraction":{"kind":"number","value":0.6597938144,"string":"0.659794"}}},{"rowIdx":47238,"cells":{"hexsha":{"kind":"string","value":"1ea0c610076601a406a8cb69dd2c1e21593a40b9"},"size":{"kind":"number","value":1835,"string":"1,835"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"import demistomock as demisto # noqa: F401\nfrom CommonServerPython import * # noqa: F401\n\n\ndef main():\n args = demisto.args()\n match_target = args['value']\n capture_groups = args.get('groups')\n dict_keys = args.get('keys')\n regex_flags = 0\n for flag in argToList(args.get('flags', '')):\n if flag in ('dotall', 's'):\n regex_flags |= re.DOTALL\n elif flag in ('multiline', 'm'):\n regex_flags |= re.MULTILINE\n elif flag in ('ignorecase', 'i'):\n regex_flags |= re.IGNORECASE\n elif flag in ('unicode', 'u'):\n regex_flags |= re.UNICODE\n else:\n raise ValueError(f'Unknown flag: {flag}')\n regex_pattern = re.compile(r'{}'.format(args['regex']), regex_flags)\n\n if capture_groups:\n capture_groups = capture_groups.split(',')\n # Validating groups input to be integers\n if not all(x.isdigit() for x in capture_groups):\n raise ValueError('Error: groups must be integers')\n\n if dict_keys:\n dict_keys = dict_keys.split(',')\n\n pattern_match = re.search(regex_pattern, match_target)\n matches = []\n if pattern_match:\n for i in pattern_match.groups():\n matches.append(i)\n\n if capture_groups:\n for j in capture_groups:\n if len(matches) - 1 < int(j):\n raise ValueError('Error: Regex group (' + j + ') out of range')\n matches = [matches[int(x)] for x in capture_groups]\n\n if dict_keys:\n if len(dict_keys) != len(matches):\n raise ValueError('Error: Number of keys does not match number of items')\n else:\n dict_matches = dict(zip(dict_keys, matches))\n demisto.results(dict_matches)\n else:\n demisto.results(matches)\n\n\nif __name__ in ('__builtin__', 'builtins'):\n main()\n"},"avg_line_length":{"kind":"number","value":32.1929824561,"string":"32.192982"},"max_line_length":{"kind":"number","value":84,"string":"84"},"alphanum_fraction":{"kind":"number","value":0.5950953678,"string":"0.595095"}}},{"rowIdx":47239,"cells":{"hexsha":{"kind":"string","value":"7897cac0ea4d830a3b8371de9199f19f03d22f71"},"size":{"kind":"number","value":2632,"string":"2,632"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py"},"max_stars_repo_name":{"kind":"string","value":"felixdittrich92/DeepLearning-tensorflow-keras"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2880d8ed28ba87f28851affa92b6fa99d2e47be9"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py"},"max_issues_repo_name":{"kind":"string","value":"felixdittrich92/DeepLearning-tensorflow-keras"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2880d8ed28ba87f28851affa92b6fa99d2e47be9"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py"},"max_forks_repo_name":{"kind":"string","value":"felixdittrich92/DeepLearning-tensorflow-keras"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2880d8ed28ba87f28851affa92b6fa99d2e47be9"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import os\n\nimport numpy as np\n\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.activations import *\nfrom tensorflow.keras.models import *\nfrom tensorflow.keras.optimizers import *\nfrom tensorflow.keras.initializers import *\nfrom tensorflow.keras.callbacks import * # für Tensorboard\n\n# Log erstellen/speichern\ndir_path = os.path.abspath(\"../DeepLearning/logs\") # Linux und Windows\n\n# Dataset\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# Cast to np.float32\nx_train = x_train.astype(np.float32)\ny_train = y_train.astype(np.float32)\nx_test = x_test.astype(np.float32)\ny_test = y_test.astype(np.float32)\n\n# Dataset Variablen\ntrain_size = x_train.shape[0]\ntest_size = x_test.shape[0]\nnum_features = 784 # 28x28\nnum_classes = 10\n\n# kategorisieren\ny_train = to_categorical(y_train, num_classes=10)\ny_test = to_categorical(y_test, num_classes=10)\n\n# input Daten reshapen\nx_train = x_train.reshape(train_size, num_features)\nx_test = x_test.reshape(test_size, num_features)\n\n# Modell Parameter\ninit_w = TruncatedNormal(mean=0.0, stddev=0.01)\ninit_b = Constant(value=0.0)\nlr = 0.001\noptimizer = Adam(lr=lr)\nepochs = 20\nbatch_size = 256 # [32, 1024] Werte dazwischen gibt an wieviele Datenpunkte parrallel verwendet werden zum trainieren\n\n# Modell definieren\nmodel = Sequential()\n\nmodel.add(Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b, input_shape=(num_features, )))\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Dense(units=300, kernel_initializer=init_w, bias_initializer=init_b))\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Dense(units=100, kernel_initializer=init_w, bias_initializer=init_b))\nmodel.add(Activation(\"relu\"))\n\nmodel.add(Dense(units=num_classes, kernel_initializer=init_w, bias_initializer=init_b))\nmodel.add(Activation(\"softmax\"))\nmodel.summary()\n\n# Modell kompilieren, trainieren und evaluieren\nmodel.compile(\n loss=\"categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"])\n\n# Tensorboard Callback\ntb = TensorBoard(\n log_dir=dir_path,\n histogram_freq=1, # jede Epoche 2 = alle 2 Epochen etc.\n write_graph=True)\n\nmodel.fit(\n x=x_train, \n y=y_train, \n epochs=epochs,\n batch_size=batch_size,\n validation_data=[x_test, y_test],\n callbacks=[tb]) # benötigt für Tensorboard\n\nscore = model.evaluate(\n x_test, \n y_test, \n verbose=0)\nprint(\"Score: \", score)\n\n\n# USE: in Konsole tensorboard --logdir LOGSORDNER\n\n# mehrere Modelle vergleichen -> für jedes Modell in Logs Unterordner erstellen und mit Tensorboard den Oberordner angeben"},"avg_line_length":{"kind":"number","value":28.3010752688,"string":"28.301075"},"max_line_length":{"kind":"number","value":122,"string":"122"},"alphanum_fraction":{"kind":"number","value":0.7659574468,"string":"0.765957"}}},{"rowIdx":47240,"cells":{"hexsha":{"kind":"string","value":"15b74e5fe7101d3d254659d6739b6dff810a68af"},"size":{"kind":"number","value":489,"string":"489"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/decorator/class_based_decorators_with_arguments.py"},"max_stars_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_stars_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"python/decorator/class_based_decorators_with_arguments.py"},"max_issues_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_issues_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/decorator/class_based_decorators_with_arguments.py"},"max_forks_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_forks_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import functools\n\n\nclass ClassDecorator(object):\n def __init__(self, arg1, arg2):\n print(f'Arguments of decorators {arg1}, {arg2}')\n self.arg1 = arg1\n self.arg2 = arg2\n\n def __call__(self, func):\n functools.update_wrapper(self, func)\n\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n return wrapper\n\n\n@ClassDecorator(\"arg1\", \"arg2\")\ndef print_args(*args):\n for arg in args:\n print(arg)\n\n\nprint_args(1, 2, 3)"},"avg_line_length":{"kind":"number","value":20.375,"string":"20.375"},"max_line_length":{"kind":"number","value":56,"string":"56"},"alphanum_fraction":{"kind":"number","value":0.6073619632,"string":"0.607362"}}},{"rowIdx":47241,"cells":{"hexsha":{"kind":"string","value":"01be7042a6f0b68cc5fa4de5ee66956770d65fa3"},"size":{"kind":"number","value":794,"string":"794"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"docs/API/Users_Guide/scripts/BAM_Slice.py"},"max_stars_repo_name":{"kind":"string","value":"ZhenyuZ/gdc-docs"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f024d5d4cd86dfa2c9e7d63850eee94d975b7948"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":67,"string":"67"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-06-09T14:11:51.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-16T07:54:44.000Z"},"max_issues_repo_path":{"kind":"string","value":"docs/API/Users_Guide/scripts/BAM_Slice.py"},"max_issues_repo_name":{"kind":"string","value":"ZhenyuZ/gdc-docs"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f024d5d4cd86dfa2c9e7d63850eee94d975b7948"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":19,"string":"19"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-06-21T15:51:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-06-07T09:22:20.000Z"},"max_forks_repo_path":{"kind":"string","value":"docs/API/Users_Guide/scripts/BAM_Slice.py"},"max_forks_repo_name":{"kind":"string","value":"ZhenyuZ/gdc-docs"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f024d5d4cd86dfa2c9e7d63850eee94d975b7948"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":32,"string":"32"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-07-15T01:24:19.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-03-25T10:42:28.000Z"},"content":{"kind":"string","value":"import requests\nimport json\n\n'''\n This script will not work until $TOKEN_FILE_PATH\n is replaced with an actual path.\n'''\ntoken_file = \"$TOKEN_FILE_PATH\"\n\nfile_id = \"11443f3c-9b8b-4e47-b5b7-529468fec098\"\n\ndata_endpt = \"https://api.gdc.cancer.gov/slicing/view/{}\".format(file_id)\n\nwith open(token_file,\"r\") as token:\n token_string = str(token.read().strip())\n\nparams = {\"gencode\": [\"BRCA1\", \"BRCA2\"]}\n\nresponse = requests.post(data_endpt,\n data = json.dumps(params),\n headers = {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token_string\n })\n\nfile_name = \"brca_slices.bam\"\n\nwith open(file_name, \"wb\") as output_file:\n output_file.write(response.content)\n"},"avg_line_length":{"kind":"number","value":26.4666666667,"string":"26.466667"},"max_line_length":{"kind":"number","value":73,"string":"73"},"alphanum_fraction":{"kind":"number","value":0.6007556675,"string":"0.600756"}}},{"rowIdx":47242,"cells":{"hexsha":{"kind":"string","value":"da7a049f44bfb52f1ea5a044260e1bee670f6bac"},"size":{"kind":"number","value":1567,"string":"1,567"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"MockServer/server.py"},"max_stars_repo_name":{"kind":"string","value":"mcteacraft/MovingSpirit"},"max_stars_repo_head_hexsha":{"kind":"string","value":"90fb85809a46f286b55ecc4e1d2adbe9579ca713"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"MockServer/server.py"},"max_issues_repo_name":{"kind":"string","value":"mcteacraft/MovingSpirit"},"max_issues_repo_head_hexsha":{"kind":"string","value":"90fb85809a46f286b55ecc4e1d2adbe9579ca713"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"MockServer/server.py"},"max_forks_repo_name":{"kind":"string","value":"mcteacraft/MovingSpirit"},"max_forks_repo_head_hexsha":{"kind":"string","value":"90fb85809a46f286b55ecc4e1d2adbe9579ca713"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from http.server import BaseHTTPRequestHandler, HTTPServer\n\nhostName = \"localhost\"\nserverPort = 8080\n\nclass MyServer(BaseHTTPRequestHandler):\n state = \"Stopped\"\n \n def __init__(self, request, client_address, server):\n BaseHTTPRequestHandler.__init__(self, request, client_address, server)\n\n def do_GET(self):\n self.send_response(200)\n\n if(self.path == \"/minecraft/start\"):\n if(MyServer.state == \"Stopped\"):\n MyServer.state = \"Starting\"\n elif(self.path == \"/minecraft/stop\"):\n if(MyServer.state == \"Running\"):\n MyServer.state = \"Stopping\"\n else:\n if(MyServer.state == \"Starting\"):\n MyServer.state = \"Running\"\n if(MyServer.state == \"Stopping\"):\n MyServer.state = \"Stopped\"\n\n if (self.path == \"/minecraft/status\"):\n responseJson = '{\"status\" : \"' + MyServer.state + '\"}';\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes(responseJson, \"utf-8\"));\n else:\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(MyServer.state, \"utf-8\"));\n\nif __name__ == \"__main__\": \n webServer = HTTPServer((hostName, serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (hostName, serverPort))\n\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n\n webServer.server_close()\n print(\"Server stopped.\")"},"avg_line_length":{"kind":"number","value":33.3404255319,"string":"33.340426"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.589023612,"string":"0.589024"}}},{"rowIdx":47243,"cells":{"hexsha":{"kind":"string","value":"16f23c8bc19c75ec9e1270d93c6f8d77b4b97e77"},"size":{"kind":"number","value":9916,"string":"9,916"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pythonProj/FZPython/pyquant/db_models/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"iHamburg/FZQuant"},"max_stars_repo_head_hexsha":{"kind":"string","value":"86b750ec33d01badfd3f324d6f1599118b9bf8ff"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pythonProj/FZPython/pyquant/db_models/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"iHamburg/FZQuant"},"max_issues_repo_head_hexsha":{"kind":"string","value":"86b750ec33d01badfd3f324d6f1599118b9bf8ff"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pythonProj/FZPython/pyquant/db_models/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"iHamburg/FZQuant"},"max_forks_repo_head_hexsha":{"kind":"string","value":"86b750ec33d01badfd3f324d6f1599118b9bf8ff"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-04-10T10:05:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-11-24T17:17:23.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf8\n\n\nimport json\nimport datetime\nfrom pprint import pprint\nfrom sqlalchemy import Column, String,Integer, Float, DateTime\nfrom sqlalchemy import Table, Text\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import relationship\nimport pandas as pd\nfrom pyquant.libs.mysqllib import session\nfrom pyquant.libs.mysqllib import BaseModel as Base\nimport pyquant.libs.utillib as utillib\nfrom pyquant.libs.cachelib import cache\nfrom pyquant.utils.monitor import listener, Monitor, addCache\n\n\n# Many-Many Relation\nsymbolgroup_symbol = Table('symbolgroup_symbol', Base.metadata,\n Column('symbol_id', ForeignKey('symbol.id'), primary_key=True),\n Column('symbolgroup_id', ForeignKey('symbolgroup.id'), primary_key=True))\n\nstockIndex_symbol = Table('stockIndex_symbol', Base.metadata,\n Column('stockIndex_id', ForeignKey('stockIndex.id'), primary_key=True),\n Column('symbol_id', ForeignKey('symbol.id'), primary_key=True))\n\n\nclass User(Base):\n __tablename__ = 'user'\n\n id = Column(Integer, primary_key=True)\n username = Column(String)\n # fullname = Column(String)\n password = Column(String)\n\n # def __repr__(self):\n # return \"\" % (\n # self.name, self.fullname, self.password)\n\n\n\n\nclass Symbol(Base):\n\n __tablename__ = 'symbol'\n __table_args__ = {\n 'mysql_engine': 'InnoDB',\n 'mysql_charset': 'utf8',\n }\n\n id = Column(Integer, primary_key=True)\n exchange_id = Column(String)\n ticker = Column(String)\n instrument = Column(String)\n name = Column(String)\n sector = Column(String)\n symbolgroup = relationship('SymbolGroup',secondary = symbolgroup_symbol,back_populates = 'symbol')\n stockIndex = relationship('StockIndex',secondary = stockIndex_symbol,back_populates = 'symbol')\n\n\n def __repr__(self):\n return \"\" % (\n self.id, self.exchange_id, self.ticker, self.instrument, self.name, self.sector)\n\n @classmethod\n # @listener(Monitor)\n def get_by_ticker(cls, ticker, index=False, lock_mode=None):\n\n cache_key = '%s-%s-%s-%s-%s' % (cls.__name__,'get_stock_by_ticker',ticker, index, lock_mode)\n cache_value = cache.get(cache_key)\n\n if cache_value: #如果有缓存,直接返回缓存\n return cache_value\n\n query = session.query(cls)\n\n if lock_mode:\n query = query.with_lockmode(lock_mode)\n\n query = query.filter(cls.ticker==ticker, cls.instrument==('index' if index else 'stock'))\n\n obj = query.first()\n cache.set(cache_key, obj)\n\n return obj\n\n @staticmethod\n def get_list_by_symbolgroup_id(symbolgroup_id, limit=30, offset=0):\n return session.query(Symbol).filter(Symbol.symbolgroup.any(id=symbolgroup_id)).\\\n limit(limit).offset(offset).all()\n\n @classmethod\n def get_index_list(cls):\n return session.query(cls).filter(Symbol.instrument == 'index').all()\n\n\n @property\n def index(self):\n return True if self.instrument == 'index' else False\n\n\ndef _test_get_all():\n objs = Symbol.get_all(columns='id', limit=None)\n for row in objs:\n print(row)\n\n\ndef _test_index():\n symbol = Symbol.get_by_id(2408)\n print('index', symbol.index)\n\n\nclass DailyPrice(Base):\n \"\"\"\n 每日价格\n\n \"\"\"\n __tablename__ = 'dailyPrice'\n\n id = Column(Integer, primary_key=True)\n symbol_id = Column(Integer, ForeignKey('symbol.id'))\n price_date = Column(DateTime)\n open_price = Column(Float)\n high_price = Column(Float)\n low_price = Column(Float)\n close_price = Column(Float)\n volume = Column(Integer)\n symbol = relationship(\"Symbol\",back_populates=\"daily_price\")\n\n\n def __repr__(self):\n return \"\" % (\n self.symbol_id, self.price_date, self.open_price, self.high_price, self.low_price, self.close_price, self.volume)\n\n def to_dict(self):\n obj = super(DailyPrice, self).to_dict()\n obj['price_date'] = str(obj['price_date'])\n\n return obj\n\n\n @classmethod\n # @listener(Monitor)\n def get_by_symbol_id(cls, symbol_id, fromdate=None, todate=None, output = 'dict', isCache=True):\n \"\"\"\n 根据symbol_id 查daily price\n\n :param symbol_id:\n :param fromdate:\n :param todate:\n :param output:\n :return:\n \"\"\"\n\n if not todate:\n todate = str(datetime.date.today())\n\n cache_key = '%s-%s-%s-%s-%s-%s' % (cls.__name__, 'get_by_symbol_id', symbol_id, fromdate, todate, output)\n # print('cache key', cache_key)\n\n if isCache: #用cache\n\n cache_value = cache.get(cache_key)\n if isinstance(cache_value, pd.DataFrame):\n if not cache_value.empty:\n return cache_value\n\n if cache_value: #如果有缓存,直接返回缓存\n return cache_value\n\n\n where = []\n\n if isinstance(symbol_id, (list, tuple)): #\n where.append(DailyPrice.symbol_id.in_(symbol_id))\n else:\n where.append(DailyPrice.symbol_id == symbol_id)\n\n\n if fromdate:\n where.append(DailyPrice.price_date >= fromdate)\n\n if todate:\n where.append(DailyPrice.price_date < todate)\n\n # print(where)\n\n if output == 'df':\n df = pd.read_sql(session.query(DailyPrice).filter(*where).statement, session.bind)\n del df['id']\n del df['symbol_id']\n #设置index\n df['price_date'] = df['price_date'].astype('datetime64[ns]')\n\n df = df.set_index('price_date')\n\n # columns 改名\n df.columns = ['open', 'high', 'low','close', 'volume']\n\n # 更换columns顺序\n cols = ['open', 'high', 'close', 'low', 'volume']\n objs = df.ix[:, cols]\n\n\n elif output == 'dict':\n\n # print('保存缓存', cache_key)\n\n objs = [row.to_dict() for row in session.query(DailyPrice).filter(*where).all()]\n\n else: #models\n objs = session.query(DailyPrice).filter(*where).all()\n\n cache.set(cache_key, objs)\n\n return objs\n\n\n\n\ndef _test_multi_symbol():\n\n print(DailyPrice.get_by_symbol_id([17,18], fromdate='2017-01-01', output='df')[:5])\n\n\n\nclass StockIndex(Base):\n __tablename__ = 'stockIndex'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n symbol_id = Column(Integer, ForeignKey('symbol.id'))\n symbol = relationship('Symbol', secondary=stockIndex_symbol, back_populates='stockIndex')\n\n\ndef _test_get_symbols():\n stockindex = StockIndex.get_by_id(2)\n print(stockindex.symbol)\n\nclass SymbolGroup(Base):\n __tablename__ = 'symbolgroup'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n user_id = Column(Integer, ForeignKey('user.id'))\n user = relationship(\"User\", back_populates=\"symbolgroup\")\n symbol = relationship('Symbol', secondary=symbolgroup_symbol, back_populates='symbolgroup')\n\n\n @staticmethod\n def get_system_groups():\n return session.query(__class__).filter(SymbolGroup.user_id == 0).all()\n\n\n\nclass Strategy(Base):\n __tablename__ = 'strategy'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n user_id = Column(Integer, ForeignKey('user.id'))\n filePath = Column(String)\n desc = Column(String)\n\n\n\n# One-Many Relations\nSymbol.daily_price = relationship(\"DailyPrice\", back_populates=\"symbol\")\nUser.symbolgroup = relationship(\"SymbolGroup\", back_populates=\"user\")\n\n\n\ndef _query_relation():\n\n query = session.query(DailyPrice).filter(DailyPrice.id == '7798525').all()\n pprint.pprint([row.to_dict() for row in query])\n\n\ndef _query_join():\n pprint.pprint(session.query(DailyPrice).join(Symbol).filter(Symbol.id == 2433).limit(10).all())\n\n\ndef _symbol_find_all():\n # print(Symbol.find_all((Symbol.id > 200),10))\n print(Symbol.find_all(limit = 10))\n\ndef _add_user_symbolgroup():\n user = User(username='new user', password='123')\n print(user)\n\n sg = SymbolGroup(name='上证50')\n\n user.symbolgroup = [sg]\n session.add(user)\n session.commit()\n\ndef _test_m_m_relation1():\n # sd = SymbolGroup.get_by_id(3)\n # print(sd.symbol)\n\n si = StockIndex.get_by_id(2)\n print(si.symbol)\n\ndef _test_add_m_m_relation():\n sd = SymbolGroup.get(3)\n sd.symbol.append(Symbol.get(19))\n session.commit()\n\ndef _test_delete_m_m_relation():\n sd = SymbolGroup.get(3)\n sd.symbol.remove(Symbol.get(19))\n session.commit()\n\ndef _test_add_user():\n user = User(username='new user222', password='123')\n session.add(user)\n session.commit()\n\nif __name__ == '__main__':\n \"\"\"\"\"\"\n import pprint\n # _query()\n #\n # _get()\n # print(Symbol.get_by_ticker('000001', True))\n # query_relation()\n # _query_join()\n\n # _symbol_find_all()\n # print(Symbol.query().limit(10).all())\n\n # print(User.get(1).to_dict())\n # print(Symbol.get(17).to_dict())\n\n # query = session.query(Symbol). \\\n # filter(Symbol.symbolgroup.any(id=3)). \\\n # all()\n #\n # print(query)\n\n # _test_add_m_m_relation()\n # _test_delete_m_m_relation()\n\n # print(session.query(SymbolGroup).filter(SymbolGroup.user_id == 0).all())\n # _symbol_find_all()\n # print(DailyPrice.get_by_id(100).to_dict())\n # _test_m_m_relation1()\n # print(StockIndex.get_all())\n # _test_m_m_relation1()\n\n # print(Symbol.get_stock_by_ticker('000001', index=True))\n\n # print(Symbol.get_by_id(20))\n\n\n\n # print(DailyPrice.get_by_symbol_id(17, fromdate='2017-01-01', output='df'))\n\n # _test_multi_symbol()\n\n # arr = [12,34]\n # s = 'key:%s' % arr\n # print(s)\n # _test_get_max_date()\n # _test_get_all()\n # _test_index()\n # _test_m_m_relation1()\n # _test_add_user()\n _test_get_symbols()\n"},"avg_line_length":{"kind":"number","value":26.6559139785,"string":"26.655914"},"max_line_length":{"kind":"number","value":125,"string":"125"},"alphanum_fraction":{"kind":"number","value":0.6405808794,"string":"0.640581"}}},{"rowIdx":47244,"cells":{"hexsha":{"kind":"string","value":"c146650f80b6d9b0345e1e6e8e2d34975d1f6326"},"size":{"kind":"number","value":1149,"string":"1,149"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"api/clean/sequence_num.py"},"max_stars_repo_name":{"kind":"string","value":"Latent-Lxx/dazhou-dw"},"max_stars_repo_head_hexsha":{"kind":"string","value":"902b4b625cda4c9e4eb205017b8955b81f37a0b5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"api/clean/sequence_num.py"},"max_issues_repo_name":{"kind":"string","value":"Latent-Lxx/dazhou-dw"},"max_issues_repo_head_hexsha":{"kind":"string","value":"902b4b625cda4c9e4eb205017b8955b81f37a0b5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"api/clean/sequence_num.py"},"max_forks_repo_name":{"kind":"string","value":"Latent-Lxx/dazhou-dw"},"max_forks_repo_head_hexsha":{"kind":"string","value":"902b4b625cda4c9e4eb205017b8955b81f37a0b5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2022-02-11T04:44:37.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-02-11T04:44:37.000Z"},"content":{"kind":"string","value":"# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2021/7/19 下午7:00\n# @Author : Latent\n# @Email : latentsky@gmail.com\n# @File : sequence_num.py\n# @Software: PyCharm\n# @class : 对于库存的清洗\n\"\"\"\n字段说明:\n1.inventory_id ---->数据库自增\n2.num ---> 当前库存\n3.num_level ---> 库存等级\n\n\"\"\"\n\n\nclass Sequence_Num(object):\n\n # 1. 库存等级换算 ------> 库存0-50->紧张 50-100 -> 正常 100以上充足\n @classmethod\n def sequence_num_level(cls, data):\n platform = data['platform']\n if platform != 'pdd':\n _func_none = (lambda x: x if type(x) == int else 0)\n item_num = int(_func_none(data['public']['num']))\n if item_num <= 50:\n num_level = '紧张'\n elif 50 < item_num <= 100:\n num_level = '正常'\n else:\n num_level = '充足'\n else:\n item_num = int(data['public']['num'])\n if item_num <= 300:\n num_level = '紧张'\n elif 300 < item_num <= 999:\n num_level = '正常'\n else:\n num_level = '充足'\n\n num_info = {'num': item_num,\n 'num_level': num_level}\n\n return num_info\n"},"avg_line_length":{"kind":"number","value":24.9782608696,"string":"24.978261"},"max_line_length":{"kind":"number","value":63,"string":"63"},"alphanum_fraction":{"kind":"number","value":0.4917319408,"string":"0.491732"}}},{"rowIdx":47245,"cells":{"hexsha":{"kind":"string","value":"c1e4b11d7e74da3041c02009a83ef0d1d92d36f0"},"size":{"kind":"number","value":1936,"string":"1,936"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"main.py"},"max_stars_repo_name":{"kind":"string","value":"alexpod1000/TF_PoseNet"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0329a16275ec974d660e99564949ca95d71389ff"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-04T02:32:07.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-03-04T02:32:07.000Z"},"max_issues_repo_path":{"kind":"string","value":"main.py"},"max_issues_repo_name":{"kind":"string","value":"alexpod1000/TF_PoseNet"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0329a16275ec974d660e99564949ca95d71389ff"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"main.py"},"max_forks_repo_name":{"kind":"string","value":"alexpod1000/TF_PoseNet"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0329a16275ec974d660e99564949ca95d71389ff"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import cv2\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nfrom utils.model_utils import perform_prediction, decode_predictions\n\nparts = [\n \"nose\",\n \"leftEye\",\n \"rightEye\",\n \"leftEar\",\n \"rightEar\",\n \"leftShoulder\",\n \"rightShoulder\",\n \"leftElbow\",\n \"rightElbow\",\n \"leftWrist\",\n \"rightWrist\",\n \"leftHip\",\n \"rightHip\",\n \"leftKnee\",\n \"rightKnee\",\n \"leftAnkle\",\n \"rightAnkle\"\n]\n\nmin_conf_score = 0.2\nmodel_path = 'models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite'\n\n# Resolution = ((InputImageSize - 1) / OutputStride) + 1\n# (513 - 1 / 32) + 1 = 17 (our case), so we are using the \"worst\", accuracy wise\n\ninterpreter = tf.lite.Interpreter(model_path=model_path)\n\nimage = cv2.imread('images/1.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\nheatmaps, offsets, displacements_fwd, displacements_bwd, resized_image = perform_prediction(image, interpreter)\nkeypoints = decode_predictions(heatmaps, offsets, output_stride=32)\n\nresize_y_ratio = image.shape[0]/resized_image.shape[0]\nresize_x_ratio = image.shape[1]/resized_image.shape[1]\n\nimage_cpy = np.copy(image)\n\npose_conf = np.array([keypoint[\"confidence\"] for keypoint in keypoints]).mean()\n\nfor keypoint in keypoints:\n scale = 5\n\n # rescale to original (not resized by model) image coordinates\n pos_y = int(keypoint[\"y\"] * resize_y_ratio)\n pos_x = int(keypoint[\"x\"] * resize_x_ratio)\n confidence_score = keypoint[\"confidence\"]\n if confidence_score > min_conf_score:\n cv2.circle(image_cpy, (pos_x, pos_y), scale, (255, 0, 0), thickness=cv2.FILLED)\n cv2.putText(image_cpy, parts[keypoint[\"part_index\"]], (pos_x, pos_y), 0, 0.4, (0, 255, 0))\n\n print(\"Confidence for {}: {}\".format(parts[keypoint[\"part_index\"]], confidence_score))\n\nprint(\"Confidence for pose {}\".format(pose_conf))\n\nplt.imshow(image_cpy)\nplt.show()\n"},"avg_line_length":{"kind":"number","value":28.4705882353,"string":"28.470588"},"max_line_length":{"kind":"number","value":111,"string":"111"},"alphanum_fraction":{"kind":"number","value":0.708677686,"string":"0.708678"}}},{"rowIdx":47246,"cells":{"hexsha":{"kind":"string","value":"a9b755b3d21103a71c140e3ae9bbb69470f88938"},"size":{"kind":"number","value":2157,"string":"2,157"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py"},"max_stars_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py"},"max_issues_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py"},"max_forks_repo_name":{"kind":"string","value":"eengineergz/Lambda"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1fe511f7ef550aed998b75c18a432abf6ab41c5f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nThis is the same test, but with big hash tables that are _unlikely_ to\nhave collisions after the 3 inserts we do.\n\nDoes not collide with DJB2 or FNV-1-64. But could collide with other hashes.\n\"\"\"\n\nimport unittest\nfrom hashtable import HashTable\n\nclass TestHashTable(unittest.TestCase):\n\n def test_hash_table_insertion_and_retrieval(self):\n ht = HashTable(0x10000)\n\n ht.put(\"key-0\", \"val-0\")\n ht.put(\"key-1\", \"val-1\")\n ht.put(\"key-2\", \"val-2\")\n\n return_value = ht.get(\"key-0\")\n self.assertTrue(return_value == \"val-0\")\n return_value = ht.get(\"key-1\")\n self.assertTrue(return_value == \"val-1\")\n return_value = ht.get(\"key-2\")\n self.assertTrue(return_value == \"val-2\")\n\n def test_hash_table_pution_overwrites_correctly(self):\n ht = HashTable(0x10000)\n\n ht.put(\"key-0\", \"val-0\")\n ht.put(\"key-1\", \"val-1\")\n ht.put(\"key-2\", \"val-2\")\n\n ht.put(\"key-0\", \"new-val-0\")\n ht.put(\"key-1\", \"new-val-1\")\n ht.put(\"key-2\", \"new-val-2\")\n\n return_value = ht.get(\"key-0\")\n self.assertTrue(return_value == \"new-val-0\")\n return_value = ht.get(\"key-1\")\n self.assertTrue(return_value == \"new-val-1\")\n return_value = ht.get(\"key-2\")\n self.assertTrue(return_value == \"new-val-2\")\n\n def test_hash_table_removes_correctly(self):\n ht = HashTable(0x10000)\n\n ht.put(\"key-0\", \"val-0\")\n ht.put(\"key-1\", \"val-1\")\n ht.put(\"key-2\", \"val-2\")\n\n return_value = ht.get(\"key-0\")\n self.assertTrue(return_value == \"val-0\")\n return_value = ht.get(\"key-1\")\n self.assertTrue(return_value == \"val-1\")\n return_value = ht.get(\"key-2\")\n self.assertTrue(return_value == \"val-2\")\n\n ht.delete(\"key-2\")\n ht.delete(\"key-1\")\n ht.delete(\"key-0\")\n\n return_value = ht.get(\"key-0\")\n self.assertTrue(return_value is None)\n return_value = ht.get(\"key-1\")\n self.assertTrue(return_value is None)\n return_value = ht.get(\"key-2\")\n self.assertTrue(return_value is None)\n\nif __name__ == '__main__':\n unittest.main()\n"},"avg_line_length":{"kind":"number","value":29.9583333333,"string":"29.958333"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.5906351414,"string":"0.590635"}}},{"rowIdx":47247,"cells":{"hexsha":{"kind":"string","value":"8243304ed31524da22fd730f6960d925ef517d1c"},"size":{"kind":"number","value":4232,"string":"4,232"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"vkapp/bot/migrations/0001_initial.py"},"max_stars_repo_name":{"kind":"string","value":"ParuninPavel/lenta4_hack"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6d3340201deadf5757e37ddd7cf5580b928d7bda"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-11-23T13:33:13.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2017-11-23T13:33:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"vkapp/bot/migrations/0001_initial.py"},"max_issues_repo_name":{"kind":"string","value":"ParuninPavel/lenta4_hack"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6d3340201deadf5757e37ddd7cf5580b928d7bda"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"vkapp/bot/migrations/0001_initial.py"},"max_forks_repo_name":{"kind":"string","value":"ParuninPavel/lenta4_hack"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6d3340201deadf5757e37ddd7cf5580b928d7bda"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.10.8 on 2017-10-21 13:14\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Admin',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='AdminReview',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('rating', models.IntegerField()),\n ('date_time', models.DateTimeField(auto_now_add=True)),\n ('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Admin')),\n ],\n ),\n migrations.CreateModel(\n name='Blogger',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('balance', models.FloatField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Income',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('type', models.CharField(choices=[('PROP', 'Предложение новости'), ('PUB', 'Опубликование новости')], default='PROP', max_length=4)),\n ('amount', models.FloatField(default=0)),\n ('date_time', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='News',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('link', models.CharField(blank=True, max_length=300, null=True)),\n ('media', models.CharField(blank=True, max_length=3000, null=True)),\n ('date_time', models.DateTimeField(auto_now_add=True)),\n ('blogger', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bot.Blogger')),\n ],\n ),\n migrations.CreateModel(\n name='Payment',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('mount', models.FloatField(default=0)),\n ('date_time', models.DateTimeField(auto_now_add=True)),\n ('blogger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Blogger')),\n ],\n ),\n migrations.CreateModel(\n name='Publication',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('date_time', models.DateTimeField(auto_now_add=True)),\n ('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Admin')),\n ('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News')),\n ],\n ),\n migrations.CreateModel(\n name='VKUser',\n fields=[\n ('vk_id', models.IntegerField(primary_key=True, serialize=False)),\n ],\n ),\n migrations.AddField(\n model_name='payment',\n name='payer',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'),\n ),\n migrations.AddField(\n model_name='income',\n name='news',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News'),\n ),\n migrations.AddField(\n model_name='blogger',\n name='vk_user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'),\n ),\n migrations.AddField(\n model_name='adminreview',\n name='news',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News'),\n ),\n migrations.AddField(\n model_name='admin',\n name='vk_user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'),\n ),\n ]\n"},"avg_line_length":{"kind":"number","value":39.1851851852,"string":"39.185185"},"max_line_length":{"kind":"number","value":150,"string":"150"},"alphanum_fraction":{"kind":"number","value":0.5536389414,"string":"0.553639"}}},{"rowIdx":47248,"cells":{"hexsha":{"kind":"string","value":"6b7e59d167fcf735ba0f1030602e287f95102618"},"size":{"kind":"number","value":112,"string":"112"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"comp/microsoft/has_solution/009_longest_semi_alernating_substr.py"},"max_stars_repo_name":{"kind":"string","value":"cc13ny/all-in"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bc0b01e44e121ea68724da16f25f7e24386c53de"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-05-18T06:11:02.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2017-05-18T06:11:02.000Z"},"max_issues_repo_path":{"kind":"string","value":"comp/microsoft/has_solution/009_longest_semi_alernating_substr.py"},"max_issues_repo_name":{"kind":"string","value":"cc13ny/all-in"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bc0b01e44e121ea68724da16f25f7e24386c53de"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-02-09T06:00:07.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2016-02-09T07:20:13.000Z"},"max_forks_repo_path":{"kind":"string","value":"comp/microsoft/has_solution/009_longest_semi_alernating_substr.py"},"max_forks_repo_name":{"kind":"string","value":"cc13ny/all-in"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bc0b01e44e121ea68724da16f25f7e24386c53de"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-06-27T09:07:26.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-07-01T04:40:13.000Z"},"content":{"kind":"string","value":"'''\nsame as 005.\n\n- 002: the substring\n- 005: the length\n'''\n\n\nclass Solution:\n def longest_substr(self, s):\n"},"avg_line_length":{"kind":"number","value":10.1818181818,"string":"10.181818"},"max_line_length":{"kind":"number","value":32,"string":"32"},"alphanum_fraction":{"kind":"number","value":0.6339285714,"string":"0.633929"}}},{"rowIdx":47249,"cells":{"hexsha":{"kind":"string","value":"d407e07335c75b4a785c579550380e60429aee7c"},"size":{"kind":"number","value":4724,"string":"4,724"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/cv/Pix2Pix/src/utils/config.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-11-18T08:17:44.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-18T08:17:44.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/cv/Pix2Pix/src/utils/config.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"research/cv/Pix2Pix/src/utils/config.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-01T06:17:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-10-04T08:39:45.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===========================================================================\n\n\"\"\"\n Define the common options that are used in both training and test.\n\"\"\"\n\nimport argparse\nimport ast\n\n\ndef get_args():\n '''\n get args.\n '''\n parser = argparse.ArgumentParser(description='Pix2Pix Model')\n\n # parameters\n parser.add_argument('--device_target', type=str, default='Ascend', choices=('Ascend', 'GPU'),\n help='device where the code will be implemented (default: Ascend)')\n parser.add_argument('--run_distribute', type=int, default=0, help='distributed training, default is 0.')\n parser.add_argument('--device_num', type=int, default=1, help='device num, default is 1.')\n parser.add_argument('--device_id', type=int, default=6, help='device id, default is 0.')\n parser.add_argument('--save_graphs', type=ast.literal_eval, default=False,\n help='whether save graphs, default is False.')\n parser.add_argument('--init_type', type=str, default='normal', help='network initialization, default is normal.')\n parser.add_argument('--init_gain', type=float, default=0.02,\n help='scaling factor for normal, xavier and orthogonal, default is 0.02.')\n parser.add_argument('--pad_mode', type=str, default='CONSTANT', choices=('CONSTANT', 'REFLECT', 'SYMMETRIC'),\n help='scale images to this size, default is CONSTANT.')\n parser.add_argument('--load_size', type=int, default=286, help='scale images to this size, default is 286.')\n parser.add_argument('--batch_size', type=int, default=1, help='batch_size, default is 1.')\n parser.add_argument('--LAMBDA_Dis', type=float, default=0.5, help='weight for Discriminator Loss, default is 0.5.')\n parser.add_argument('--LAMBDA_GAN', type=int, default=1, help='weight for GAN Loss, default is 1.')\n parser.add_argument('--LAMBDA_L1', type=int, default=100, help='weight for L1 Loss, default is 100.')\n parser.add_argument('--beta1', type=float, default=0.5, help='adam beta1, default is 0.5.')\n parser.add_argument('--beta2', type=float, default=0.999, help='adam beta2, default is 0.999.')\n parser.add_argument('--lr', type=float, default=0.0002, help='the initial learning rate, default is 0.0002.')\n parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy, default is linear.')\n parser.add_argument('--epoch_num', type=int, default=200, help='epoch number for training, default is 200.')\n parser.add_argument('--n_epochs', type=int, default=100,\n help='number of epochs with the initial learning rate, default is 100.')\n parser.add_argument('--n_epochs_decay', type=int, default=100,\n help='number of epochs with the dynamic learning rate, default is 100.')\n parser.add_argument('--dataset_size', type=int, default=400, choices=(400, 1096),\n help='for Facade_dataset,the number is 400; for Maps_dataset,the number is 1096.')\n\n # The location of input and output data\n parser.add_argument('--train_data_dir', type=str, default=None, help='the file path of input data during training.')\n parser.add_argument('--val_data_dir', type=str, default=None, help='the file path of input data during validating.')\n parser.add_argument('--train_fakeimg_dir', type=str, default='./results/fake_img/',\n help='during training, the file path of stored fake img.')\n parser.add_argument('--loss_show_dir', type=str, default='./results/loss_show',\n help='during training, the file path of stored loss img.')\n parser.add_argument('--ckpt_dir', type=str, default='./results/ckpt/',\n help='during training, the file path of stored CKPT.')\n parser.add_argument('--ckpt', type=str, default=None, help='during validating, the file path of the CKPT used.')\n parser.add_argument('--predict_dir', type=str, default='./results/predict/',\n help='during validating, the file path of Generated image.')\n args = parser.parse_args()\n return args\n"},"avg_line_length":{"kind":"number","value":63.8378378378,"string":"63.837838"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.6712531753,"string":"0.671253"}}},{"rowIdx":47250,"cells":{"hexsha":{"kind":"string","value":"d474d5eb551dacae95e039d18d26a387b48c2cc2"},"size":{"kind":"number","value":2903,"string":"2,903"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"indl/metrics.py"},"max_stars_repo_name":{"kind":"string","value":"SachsLab/indl"},"max_stars_repo_head_hexsha":{"kind":"string","value":"531d2e0c2ee765004aedc553af40e258262f86cb"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-02-22T01:39:50.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-02-22T01:39:50.000Z"},"max_issues_repo_path":{"kind":"string","value":"indl/metrics.py"},"max_issues_repo_name":{"kind":"string","value":"SachsLab/indl"},"max_issues_repo_head_hexsha":{"kind":"string","value":"531d2e0c2ee765004aedc553af40e258262f86cb"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"indl/metrics.py"},"max_forks_repo_name":{"kind":"string","value":"SachsLab/indl"},"max_forks_repo_head_hexsha":{"kind":"string","value":"531d2e0c2ee765004aedc553af40e258262f86cb"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from typing import List\n\n\n__all__ = ['dprime', 'quickplot_history']\n\n\ndef dprime(y_true, y_pred, pmarg: float = 0.01, outputs: List[str] = ['dprime', 'bias', 'accuracy']) -> tuple:\n \"\"\"\n Calculate D-Prime for binary data.\n 70% for both classes is d=1.0488.\n Highest possible is 6.93, but effectively 4.65 for 99%\n\n http://www.birmingham.ac.uk/Documents/college-les/psych/vision-laboratory/sdtintro.pdf\n\n This function is not designed to behave as a valid 'Tensorflow metric'.\n\n Args:\n y_true (array-like): True labels.\n y_pred (array-like): Predicted labels.\n pmarg:\n outputs: list of outputs among 'dprime', 'bias', 'accuracy'\n\n Returns:\n Calculated d-prime value.\n \"\"\"\n\n import numpy as np\n from scipy.stats import norm\n\n # TODO: Adapt this function for tensorflow\n # y_pred = ops.convert_to_tensor(y_pred)\n # y_true = math_ops.cast(y_true, y_pred.dtype)\n # return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)\n\n # TODO: Check that true_y only has 2 classes, and test_y is entirely within true_y classes.\n b_true = y_pred == y_true\n b_pos = np.unique(y_true, return_inverse=True)[1].astype(bool)\n\n true_pos = np.sum(np.logical_and(b_true, b_pos))\n true_neg = np.sum(np.logical_and(b_true, ~b_pos))\n false_pos = np.sum(np.logical_and(~b_true, b_pos))\n false_neg = np.sum(np.logical_and(~b_true, ~b_pos))\n\n tpr = true_pos / (true_pos + false_neg)\n tpr = max(pmarg, min(tpr, 1-pmarg))\n fpr = false_pos / (false_pos + true_neg)\n fpr = max(pmarg, min(fpr, 1 - pmarg))\n ztpr = norm.ppf(tpr, loc=0, scale=1)\n zfpr = norm.ppf(fpr, loc=0, scale=1)\n\n # Other measures of performance:\n # sens = tp ./ (tp+fp)\n # spec = tn ./ (tn+fn)\n # balAcc = (sens+spec)/2\n # informedness = sens+spec-1\n\n output = tuple()\n for out in outputs:\n if out == 'dprime':\n dprime = ztpr - zfpr\n output += (dprime,)\n elif out == 'bias':\n bias = -(ztpr + zfpr) / 2\n output += (bias,)\n elif out == 'accuracy':\n accuracy = 100 * (true_pos + true_neg) / (true_pos + false_pos + false_neg + true_neg)\n output += (accuracy,)\n\n return output\n\n\ndef quickplot_history(history) -> None:\n \"\"\"\n A little helper function to do a quick plot of model fit results.\n Args:\n history (tf.keras History):\n \"\"\"\n import matplotlib.pyplot as plt\n if hasattr(history, 'history'):\n history = history.history\n hist_metrics = [_ for _ in history.keys() if not _.startswith('val_')]\n\n for m_ix, m in enumerate(hist_metrics):\n plt.subplot(len(hist_metrics), 1, m_ix + 1)\n plt.plot(history[m], label='Train')\n plt.plot(history['val_' + m], label='Valid.')\n plt.xlabel('Epoch')\n plt.ylabel(m)\n plt.legend()\n plt.tight_layout()\n plt.show()\n"},"avg_line_length":{"kind":"number","value":31.5543478261,"string":"31.554348"},"max_line_length":{"kind":"number","value":110,"string":"110"},"alphanum_fraction":{"kind":"number","value":0.6176369273,"string":"0.617637"}}},{"rowIdx":47251,"cells":{"hexsha":{"kind":"string","value":"2e0c32714f6d997eca9a7323c26f7d7c44b13150"},"size":{"kind":"number","value":1007,"string":"1,007"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py"},"max_stars_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py"},"max_issues_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py"},"max_forks_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Beispielprogramm für das Buch \"Python Challenge\"\n#\n# Copyright 2020 by Michael Inden\n\n\nimport pytest\n\nfrom ch02_math.solutions.ex03_perfectnumber import is_perfect_number_simple, calc_perfect_numbers, is_perfect_number_based_on_proper_divisors\n\n\n@pytest.mark.parametrize(\"n, expected\",\n [(6, True), (28, True),\n (496, True), (8128, True)])\ndef test_is_perfect_number_simple(n, expected):\n assert is_perfect_number_simple(n) == expected\n\n\n@pytest.mark.parametrize(\"n, expected\", [(50, [6, 28]),\n (1000, [6, 28, 496]),\n (10000, [6, 28, 496, 8128])])\ndef test_calc_perfect_numbers(n, expected):\n assert calc_perfect_numbers(n) == expected\n\n\n@pytest.mark.parametrize(\"n, expected\", [\n (6, True), (28, True), (496, True), (8128, True)])\ndef test_is_perfect_number_based_on_proper_divisors(n, expected):\n assert is_perfect_number_based_on_proper_divisors(n) == expected\n"},"avg_line_length":{"kind":"number","value":34.724137931,"string":"34.724138"},"max_line_length":{"kind":"number","value":141,"string":"141"},"alphanum_fraction":{"kind":"number","value":0.6474677259,"string":"0.647468"}}},{"rowIdx":47252,"cells":{"hexsha":{"kind":"string","value":"d84fb925e5f198a242b49a3625906021e8cf6205"},"size":{"kind":"number","value":3412,"string":"3,412"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/torch/npu/random.py"},"max_stars_repo_name":{"kind":"string","value":"Ascend/pytorch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-02T03:07:35.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-02T03:07:35.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/torch/npu/random.py"},"max_issues_repo_name":{"kind":"string","value":"Ascend/pytorch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-11-12T07:23:03.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-11-12T08:28:13.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/torch/npu/random.py"},"max_forks_repo_name":{"kind":"string","value":"Ascend/pytorch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom . import _lazy_init, _lazy_call, device_count, current_device\n\n__all__ = ['manual_seed', 'manual_seed_all',\n 'seed', 'seed_all', 'initial_seed']\n\n\ndef manual_seed(seed):\n r\"\"\"Sets the seed for generating random numbers for the current NPU.\n It's safe to call this function if NPU is not available; in that\n case, it is silently ignored.\n\n Args:\n seed (int): The desired seed.\n\n .. warning::\n If you are working with a multi-NPU model, this function is insufficient\n to get determinism. To seed all NPUs, use :func:`manual_seed_all`.\n \"\"\"\n seed = int(seed)\n\n def cb():\n idx = current_device()\n default_generator = torch.npu.default_generators[idx]\n default_generator.manual_seed(seed)\n\n _lazy_call(cb)\n\n\ndef manual_seed_all(seed):\n r\"\"\"Sets the seed for generating random numbers on all NPUs.\n It's safe to call this function if NPU is not available; in that\n case, it is silently ignored.\n\n Args:\n seed (int): The desired seed.\n \"\"\"\n seed = int(seed)\n\n def cb():\n for i in range(device_count()):\n default_generator = torch.npu.default_generators[i]\n default_generator.manual_seed(seed)\n\n _lazy_call(cb)\n\n\ndef seed():\n r\"\"\"Sets the seed for generating random numbers to a random number for the current NPU.\n It's safe to call this function if NPU is not available; in that\n case, it is silently ignored.\n\n .. warning::\n If you are working with a multi-NPU model, this function will only initialize\n the seed on one NPU. To initialize all NPUs, use :func:`seed_all`.\n \"\"\"\n def cb():\n idx = current_device()\n default_generator = torch.npu.default_generators[idx]\n default_generator.seed()\n\n _lazy_call(cb)\n\n\ndef seed_all():\n r\"\"\"Sets the seed for generating random numbers to a random number on all NPUs.\n It's safe to call this function if NPU is not available; in that\n case, it is silently ignored.\n \"\"\"\n def cb():\n random_seed = 0\n seeded = False\n for i in range(device_count()):\n default_generator = torch.npu.default_generators[i]\n if not seeded:\n default_generator.seed()\n random_seed = default_generator.initial_seed()\n seeded = True\n else:\n default_generator.manual_seed(random_seed)\n\n _lazy_call(cb)\n\n\ndef initial_seed():\n r\"\"\"Returns the current random seed of the current NPU.\n\n .. warning::\n This function eagerly initializes NPU.\n \"\"\"\n _lazy_init()\n idx = current_device()\n default_generator = torch.npu.default_generators[idx]\n return default_generator.initial_seed()\n"},"avg_line_length":{"kind":"number","value":30.7387387387,"string":"30.738739"},"max_line_length":{"kind":"number","value":91,"string":"91"},"alphanum_fraction":{"kind":"number","value":0.6688159437,"string":"0.668816"}}},{"rowIdx":47253,"cells":{"hexsha":{"kind":"string","value":"d8b8db492830d698e6b17933d3730996e12d5da3"},"size":{"kind":"number","value":4114,"string":"4,114"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Scarky2/builder/views.py"},"max_stars_repo_name":{"kind":"string","value":"kopringo/Scarky2"},"max_stars_repo_head_hexsha":{"kind":"string","value":"93c59cd31113749045caff68274f779a61360167"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Scarky2/builder/views.py"},"max_issues_repo_name":{"kind":"string","value":"kopringo/Scarky2"},"max_issues_repo_head_hexsha":{"kind":"string","value":"93c59cd31113749045caff68274f779a61360167"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Scarky2/builder/views.py"},"max_forks_repo_name":{"kind":"string","value":"kopringo/Scarky2"},"max_forks_repo_head_hexsha":{"kind":"string","value":"93c59cd31113749045caff68274f779a61360167"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#-*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http.response import HttpResponseRedirect, HttpResponse\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nimport uuid\nimport json\n\n\nfrom models import Problem, Language\n# Create your views here.\n\ndef home(request):\n return HttpResponseRedirect(reverse('builder', args=['new',]))\n\ndef builder(request, pid):\n \n params = {}\n secret = ''\n problem = None\n \n if pid == 'new':\n params['new'] = True\n \n if request.POST:\n user = None\n if request.user.is_authenticated():\n user = request.user\n problem = Problem.create_problem(user)\n problem.name = request.POST.get('name', '')\n problem.content = request.POST.get('content', '')\n problem.save()\n \n if request.is_ajax():\n return HttpResponse(json.dumps({'pid': problem.code, 'secret': problem.secret}), content_type='application/json')\n else:\n return HttpResponseRedirect('%s?secret=%s' % (reverse('problem', args=[problem.code]), problem.secret))\n \n else:\n \n secret = request.GET.get('secret', request.POST.get('secret', ''))\n try:\n problem = Problem.objects.get(code=pid)\n \n if (not request.user.is_authenticated() and secret != problem.secret) or \\\n (request.user.is_authenticated() and problem.user != request.user):\n raise Exception('access-denied')\n \n except Problem.DoesNotExist as e:\n return HttpResponseRedirect('/?not-found')\n \n except Exception as e:\n return HttpResponseRedirect('/?access-denied')\n \n \n if request.POST:\n name = request.POST.get('name', '')\n content = request.POST.get('content', '')\n input = request.POST.get('input', '')\n output = request.POST.get('output', '')\n\n problem.name = name\n problem.content = content\n problem.input = input\n problem.output = output\n problem.save()\n\n return HttpResponseRedirect('/builder/%s?secret=%s' % (pid, problem.secret))\n \n languages = Language.objects.all().filter(visible=True)\n if len(languages) == 0:\n Language.sync_languages()\n languages = Language.objects.all().filter(visible=True)\n params['languages'] = languages\n \n params['problem'] = problem\n params['problem_code'] = pid\n params['problem_secret'] = secret\n return render(request, 'builder/home.html', params)\n\ndef builder_upload(request):\n file = 'sdf'\n return HttpResponse(json.dumps({'file': file}), content_type='application/json')\n\ndef problem(request, pid):\n params = {}\n \n try:\n problem = Problem.objects.get(code=pid)\n except Problem.DoesNotExist:\n return HttpResponseRedirect(reverse('problems'))\n \n #if problem.secret != request.GET.get('secret', '~!@#$%^#@#$@#!@!...'):\n # pass\n \n # jesli jest secret to edycja i statystyki\n \n \n params['problem'] = problem\n params['host'] = request.META['HTTP_HOST']\n return render(request, 'builder/problem.html', params)\n\ndef widget_js(request, pid):\n params = {pid: pid}\n \n try:\n problem = Problem.objects.get(code=pid)\n except Problem.DoesNotExist:\n pass\n \n params['host'] = request.META['HTTP_HOST']\n params['problem'] = problem\n return render(request, 'builder/widget_js.html', params)\n\ndef widget(request, pid):\n \n params = {}\n \n try:\n problem = Problem.objects.get(code=pid)\n except Problem.DoesNotExist:\n pass\n \n return render(request, 'builder/widget.html', params)\n\ndef problems(request):\n params = {}\n return render(request, 'builder/problems.html', params)\n\n\ndef api_1_problems(request):\n pass\n\ndef api_1_problem(request, pid):\n pass\n\ndef api_1_submissions(request, pid):\n pass\n\ndef api_1_submission(request, pid, sid):\n pass\n"},"avg_line_length":{"kind":"number","value":28.7692307692,"string":"28.769231"},"max_line_length":{"kind":"number","value":129,"string":"129"},"alphanum_fraction":{"kind":"number","value":0.5989304813,"string":"0.59893"}}},{"rowIdx":47254,"cells":{"hexsha":{"kind":"string","value":"993259bb8f1d2c008f6332049af9e6a0cef4bdf4"},"size":{"kind":"number","value":10355,"string":"10,355"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"paddlenlp/transformers/ppminilm/tokenizer.py"},"max_stars_repo_name":{"kind":"string","value":"mukaiu/PaddleNLP"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0315365dbafa6e3b1c7147121ba85e05884125a5"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"paddlenlp/transformers/ppminilm/tokenizer.py"},"max_issues_repo_name":{"kind":"string","value":"mukaiu/PaddleNLP"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0315365dbafa6e3b1c7147121ba85e05884125a5"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"paddlenlp/transformers/ppminilm/tokenizer.py"},"max_forks_repo_name":{"kind":"string","value":"mukaiu/PaddleNLP"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0315365dbafa6e3b1c7147121ba85e05884125a5"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport pickle\nimport six\nimport shutil\n\nfrom paddle.utils import try_import\nfrom paddlenlp.utils.env import MODEL_HOME\n\nfrom .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer\n\n__all__ = ['PPMiniLMTokenizer']\n\n\nclass PPMiniLMTokenizer(PretrainedTokenizer):\n r\"\"\"\n Constructs an PPMiniLM tokenizer. It uses a basic tokenizer to do punctuation\n splitting, lower casing and so on, and follows a WordPiece tokenizer to\n tokenize as subwords.\n\n This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`\n which contains most of the main methods. For more information regarding those methods,\n please refer to this superclass.\n\n Args:\n vocab_file (str): \n The vocabulary file path (ends with '.txt') required to instantiate\n a `WordpieceTokenizer`.\n do_lower_case (str, optional): \n Whether or not to lowercase the input when tokenizing.\n Defaults to`True`.\n unk_token (str, optional): \n A special token representing the *unknown (out-of-vocabulary)* token.\n An unknown token is set to be `unk_token` inorder to be converted to an ID.\n Defaults to \"[UNK]\".\n sep_token (str, optional): \n A special token separating two different sentences in the same input.\n Defaults to \"[SEP]\".\n pad_token (str, optional): \n A special token used to make arrays of tokens the same size for batching purposes.\n Defaults to \"[PAD]\".\n cls_token (str, optional): \n A special token used for sequence classification. It is the last token\n of the sequence when built with special tokens. Defaults to \"[CLS]\".\n mask_token (str, optional): \n A special token representing a masked token. This is the token used\n in the masked language modeling task which the model tries to predict the original unmasked ones.\n Defaults to \"[MASK]\".\n \n Examples:\n .. code-block::\n\n from paddlenlp.transformers import PPMiniLMTokenizer\n tokenizer = PPMiniLMTokenizer.from_pretrained('ppminilm-6l-768h')\n\n encoded_inputs = tokenizer('He was a puppeteer')\n # encoded_inputs: \n # { 'input_ids': [1, 4444, 4385, 1545, 6712, 10062, 9568, 9756, 9500, 2],\n # 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}\n # }\n\n \"\"\"\n resource_files_names = {\"vocab_file\": \"vocab.txt\"} # for save_pretrained\n pretrained_resource_files_map = {\n \"vocab_file\": {\n \"ppminilm-6l-768h\":\n \"https://bj.bcebos.com/paddlenlp/models/transformers/ppminilm-6l-768h/vocab.txt\",\n }\n }\n pretrained_init_configuration = {\n \"ppminilm-6l-768h\": {\n \"do_lower_case\": True\n },\n }\n\n def __init__(self,\n vocab_file,\n do_lower_case=True,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n **kwargs):\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\n \"Can't find a vocabulary file at path '{}'. To load the \"\n \"vocabulary from a pretrained model please use \"\n \"`tokenizer = PPMiniLMTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\"\n .format(vocab_file))\n self.do_lower_case = do_lower_case\n self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,\n unk_token=unk_token)\n\n @property\n def vocab_size(self):\n \"\"\"\n Return the size of vocabulary.\n\n Returns:\n int: The size of vocabulary.\n \"\"\"\n return len(self.vocab)\n\n def _tokenize(self, text):\n r\"\"\"\n End-to-end tokenization for PPMiniM models.\n\n Args:\n text (str): The text to be tokenized.\n \n Returns:\n List[str]: A list of string representing converted tokens.\n \"\"\"\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n return split_tokens\n\n def convert_tokens_to_string(self, tokens):\n r\"\"\"\n Converts a sequence of tokens (list of string) in a single string. Since\n the usage of WordPiece introducing `##` to concat subwords, also remove\n `##` when converting.\n\n Args:\n tokens (List[str]): A list of string representing tokens to be converted.\n\n Returns:\n str: Converted string from tokens.\n\n Examples:\n .. code-block::\n\n from paddlenlp.transformers import PPMiniLMTokenizer\n tokenizer = PPMiniLMTokenizer.from_pretrained('ppminilm-6l-768h')\n\n tokens = tokenizer.tokenize('He was a puppeteer')\n strings = tokenizer.convert_tokens_to_string(tokens)\n #he was a puppeteer\n\n \"\"\"\n out_string = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def num_special_tokens_to_add(self, pair=False):\n r\"\"\"\n Returns the number of added tokens when encoding a sequence with special tokens.\n\n Note:\n This encodes inputs and checks the number of added tokens, and is therefore not efficient. \n Do not put this inside your training loop.\n\n Args:\n pair (bool, optional):\n Whether the input is a sequence pair or a single sequence.\n Defaults to `False` and the input is a single sequence.\n\n Returns:\n int: Number of tokens added to sequences\n \"\"\"\n token_ids_0 = []\n token_ids_1 = []\n return len(\n self.build_inputs_with_special_tokens(\n token_ids_0, token_ids_1 if pair else None))\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n r\"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens.\n\n A sequence has the following format:\n\n - single sequence: ``[CLS] X [SEP]``\n - pair of sequences: ``[CLS] A [SEP] B [SEP]``\n\n Args:\n token_ids_0 (List[int]):\n List of IDs to which the special tokens will be added.\n token_ids_1 (List[int], optional):\n Optional second list of IDs for sequence pairs.\n Defaults to `None`.\n\n Returns:\n List[int]: List of input_id with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n _cls = [self.cls_token_id]\n _sep = [self.sep_token_id]\n return _cls + token_ids_0 + _sep + token_ids_1 + _sep\n\n def build_offset_mapping_with_special_tokens(self,\n offset_mapping_0,\n offset_mapping_1=None):\n r\"\"\"\n Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. \n \n An offset_mapping has the following format:\n\n - single sequence: ``(0,0) X (0,0)``\n - pair of sequences: ``(0,0) A (0,0) B (0,0)``\n \n Args:\n offset_mapping_ids_0 (List[tuple]):\n List of char offsets to which the special tokens will be added.\n offset_mapping_ids_1 (List[tuple], optional):\n Optional second list of wordpiece offsets for offset mapping pairs.\n Defaults to `None`.\n\n Returns:\n List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens.\n \"\"\"\n if offset_mapping_1 is None:\n return [(0, 0)] + offset_mapping_0 + [(0, 0)]\n\n return [(0, 0)] + offset_mapping_0 + [(0, 0)\n ] + offset_mapping_1 + [(0, 0)]\n\n def create_token_type_ids_from_sequences(self,\n token_ids_0,\n token_ids_1=None):\n r\"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. \n\n A sequence pair mask has the following format:\n ::\n\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n\n If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (List[int]):\n A list of `inputs_ids` for the first sequence.\n token_ids_1 (List[int], optional):\n Optional second list of IDs for sequence pairs. \n Defaults to `None`.\n\n Returns:\n List[int]: List of token_type_id according to the given sequence(s).\n \"\"\"\n _sep = [self.sep_token_id]\n _cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(_cls + token_ids_0 + _sep) * [0]\n return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +\n _sep) * [1]\n"},"avg_line_length":{"kind":"number","value":38.4944237918,"string":"38.494424"},"max_line_length":{"kind":"number","value":119,"string":"119"},"alphanum_fraction":{"kind":"number","value":0.5928536939,"string":"0.592854"}}},{"rowIdx":47255,"cells":{"hexsha":{"kind":"string","value":"cfd871f28f6c1b0bbbc8ffe42207aa261daa1915"},"size":{"kind":"number","value":549,"string":"549"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py"},"max_stars_repo_name":{"kind":"string","value":"mxsph/Data-Analytics"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c82ff54b78f50b6660d7640bfee96ea68bef598f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-08-24T19:02:09.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-05-27T20:22:41.000Z"},"max_issues_repo_path":{"kind":"string","value":"Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py"},"max_issues_repo_name":{"kind":"string","value":"mxsph/Data-Analytics"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c82ff54b78f50b6660d7640bfee96ea68bef598f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":342,"string":"342"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-08-13T10:24:23.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-08-12T14:01:52.000Z"},"max_forks_repo_path":{"kind":"string","value":"Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py"},"max_forks_repo_name":{"kind":"string","value":"visuanalytics/visuanalytics"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f9cce7bc9e3227568939648ddd1dd6df02eac752"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":8,"string":"8"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-09-01T07:11:18.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-04-09T09:02:11.000Z"},"content":{"kind":"string","value":"import matplotlib.pyplot as plt\r\n\r\n# Daten zum Plotten\r\nhoch = [11, 11, 12, 14, 18, 22, 20, 23, 23, 21, 20, 23, 11, 11, 16, 23, 24, 23, 15, 17, 18, 21, 22, 22, 16, 20, 22, 17,\r\n 18, 16]\r\ntief = [-5, -3, -2, 1, 1, 4, 6, 5, 6, 2, 5, 2, -2, -2, 1, 6, 8, 8, 5, 6, 7, 5, 3, 4, 2, 3, 6, 8, 7, 5]\r\ntage = list(range(1, 31))\r\n\r\nplt.plot(tage, hoch, \":r\", tage, hoch, \"or\", tage, tief, \":b\", tage, tief, \"ob\")\r\nplt.xlabel(\"Tag im April 2020\")\r\nplt.ylabel(\"Temperatur in Grad Celsius\")\r\nplt.title(\"Temperaturen im April 2020\")\r\nplt.grid()\r\nplt.show()\r\n"},"avg_line_length":{"kind":"number","value":36.6,"string":"36.6"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.5482695811,"string":"0.54827"}}},{"rowIdx":47256,"cells":{"hexsha":{"kind":"string","value":"cfe40386467f5a82bc05967bf49df0d025067384"},"size":{"kind":"number","value":10250,"string":"10,250"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/onegov/election_day/upgrade.py"},"max_stars_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/onegov/election_day/upgrade.py"},"max_issues_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/onegov/election_day/upgrade.py"},"max_forks_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\" Contains upgrade tasks that are executed when the application is being\nupgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.\n\n\"\"\"\nfrom onegov.ballot import Election\nfrom onegov.ballot import Vote\nfrom onegov.core.orm.types import JSON\nfrom onegov.core.orm.types import UTCDateTime\nfrom onegov.core.upgrade import upgrade_task\nfrom onegov.election_day.collections import ArchivedResultCollection\nfrom onegov.election_day.models import ArchivedResult\nfrom onegov.election_day.models import Subscriber\nfrom sqlalchemy import Column\nfrom sqlalchemy import Enum\nfrom sqlalchemy import Text\n\n\n@upgrade_task('Create archived results')\ndef create_archived_results(context):\n\n \"\"\" Create an initial archived result entry for all existing votes\n and elections.\n\n Because we don't have a real request here, the generated URL are wrong!\n To fix the links, login after the update and call the 'update-results'\n view.\n\n \"\"\"\n ArchivedResultCollection(context.session).update_all(context.request)\n\n\n@upgrade_task('Add ID to archived results')\ndef add_id_to_archived_results(context):\n\n \"\"\" Add the IDs of the elections/votes as meta information to the results.\n\n Normally, the right election and vote should be found. To be sure, you\n call the 'update-results' view to ensure that everything is right.\n \"\"\"\n session = context.session\n\n results = session.query(ArchivedResult)\n results = results.filter(ArchivedResult.schema == context.app.schema)\n\n for result in results:\n if result.type == 'vote':\n vote = session.query(Vote).filter(\n Vote.date == result.date,\n Vote.domain == result.domain,\n Vote.shortcode == result.shortcode,\n Vote.title_translations == result.title_translations\n ).first()\n if vote and vote.id in result.url:\n result.external_id = vote.id\n\n if result.type == 'election':\n election = session.query(Election).filter(\n Election.date == result.date,\n Election.domain == result.domain,\n Election.shortcode == result.shortcode,\n Election.title_translations == result.title_translations,\n Election.counted_entities == result.counted_entities,\n Election.total_entities == result.total_entities,\n ).first()\n if election and election.id in result.url:\n result.external_id = election.id\n\n\n@upgrade_task('Update vote progress')\ndef update_vote_progress(context):\n\n \"\"\" Recalculate the vote progress for the archived results.\n\n \"\"\"\n session = context.session\n\n results = session.query(ArchivedResult)\n results = results.filter(\n ArchivedResult.schema == context.app.schema,\n ArchivedResult.type == 'vote'\n )\n\n for result in results:\n vote = session.query(Vote).filter_by(id=result.external_id)\n vote = vote.first()\n if vote:\n result.counted_entities, result.total_entities = vote.progress\n\n\n@upgrade_task('Add elected candidates to archived results')\ndef add_elected_candidates(context):\n\n \"\"\" Adds the elected candidates to the archived results,\n\n \"\"\"\n session = context.session\n\n results = session.query(ArchivedResult)\n results = results.filter(\n ArchivedResult.schema == context.app.schema,\n ArchivedResult.type == 'election'\n )\n\n for result in results:\n election = session.query(Election).filter_by(id=result.external_id)\n election = election.first()\n if election:\n result.elected_candidates = election.elected_candidates\n\n\n@upgrade_task('Add content columns to archived results')\ndef add_content_columns_to_archived_results(context):\n if not context.has_column('archived_results', 'content'):\n context.operations.add_column(\n 'archived_results', Column('content', JSON)\n )\n\n\n@upgrade_task('Change last change columns')\ndef change_last_change_columns(context):\n if not context.has_column('archived_results', 'last_modified'):\n context.operations.add_column(\n 'archived_results',\n Column('last_modified', UTCDateTime, nullable=True)\n )\n if context.has_column('archived_results', 'last_result_change'):\n context.operations.execute(\n 'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format(\n 'archived_results', 'last_result_change'\n )\n )\n\n if (\n context.has_column('notifications', 'last_change')\n and not context.has_column('notifications', 'last_modified')\n ):\n context.operations.execute(\n 'ALTER TABLE {} RENAME COLUMN {} TO {};'.format(\n 'notifications', 'last_change', 'last_modified'\n )\n )\n\n if context.has_column('notifications', 'last_modified'):\n context.operations.execute(\n 'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format(\n 'notifications', 'last_modified'\n )\n )\n\n\n@upgrade_task('Make subscriber polymorphic')\ndef make_subscriber_polymorphic(context):\n if not context.has_column('subscribers', 'type'):\n context.operations.add_column(\n 'subscribers',\n Column('type', Text, nullable=True)\n )\n\n if (\n context.has_column('subscribers', 'phone_number')\n and not context.has_column('subscribers', 'address')\n ):\n context.operations.execute(\n 'ALTER TABLE {} RENAME COLUMN {} TO {};'.format(\n 'subscribers', 'phone_number', 'address'\n )\n )\n\n if context.has_column('subscribers', 'type'):\n susbscribers = context.session.query(Subscriber)\n susbscribers = susbscribers.filter(Subscriber.type.is_(None))\n for subscriber in susbscribers:\n subscriber.type = 'sms'\n\n\n@upgrade_task('Make notifications polymorphic')\ndef make_notifications_polymorphic(context):\n if (\n context.has_column('notifications', 'action')\n and not context.has_column('notifications', 'type')\n ):\n context.operations.execute(\n 'ALTER TABLE {} RENAME COLUMN {} TO {};'.format(\n 'notifications', 'action', 'type'\n )\n )\n context.operations.execute(\n 'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format(\n 'notifications', 'type'\n )\n )\n\n\n@upgrade_task(\n 'Apply static data',\n requires='onegov.ballot:Replaces results group with name and district'\n)\ndef apply_static_data(context):\n principal = getattr(context.app, 'principal', None)\n if not principal:\n return\n\n for vote in context.session.query(Vote):\n for ballot in vote.ballots:\n assert vote.date and vote.date.year in principal.entities\n for result in ballot.results:\n assert (\n result.entity_id in principal.entities[vote.date.year]\n or result.entity_id == 0\n )\n result.name = principal.entities.\\\n get(vote.date.year, {}).\\\n get(result.entity_id, {}).\\\n get('name', '')\n result.district = principal.entities.\\\n get(vote.date.year, {}).\\\n get(result.entity_id, {}).\\\n get('district', '')\n\n for election in context.session.query(Election):\n assert election.date and election.date.year in principal.entities\n for result in election.results:\n assert (\n result.entity_id in principal.entities[election.date.year]\n or result.entity_id == 0\n )\n result.name = principal.entities.\\\n get(election.date.year, {}).\\\n get(result.entity_id, {}).\\\n get('name', '')\n result.district = principal.entities.\\\n get(election.date.year, {}).\\\n get(result.entity_id, {}).\\\n get('district', '')\n\n\n@upgrade_task('Add election compound to archive')\ndef add_election_compound_to_archive(context):\n old_type = Enum('election', 'vote', name='type_of_result')\n new_type = Enum(\n 'election', 'election_compound', 'vote', name='type_of_result'\n )\n tmp_type = Enum(\n 'election', 'election_compound', 'vote', name='_type_of_result'\n )\n\n tmp_type.create(context.operations.get_bind(), checkfirst=False)\n context.operations.execute(\n 'ALTER TABLE archived_results ALTER COLUMN type '\n 'TYPE _type_of_result USING type::text::_type_of_result'\n )\n\n old_type.drop(context.operations.get_bind(), checkfirst=False)\n\n new_type.create(context.operations.get_bind(), checkfirst=False)\n context.operations.execute(\n 'ALTER TABLE archived_results ALTER COLUMN type '\n 'TYPE type_of_result USING type::text::type_of_result'\n )\n\n tmp_type.drop(context.operations.get_bind(), checkfirst=False)\n\n\n@upgrade_task('Add contraints to notifications and sources')\ndef add_contraints_to_notifications_and_sources(context):\n # We use SQL (rather than operations.xxx) so that we can drop and add\n # the constraints in one statement\n for ref in ('election', 'vote'):\n for table in ('notifications', 'upload_data_source_item'):\n context.operations.execute(\n f'ALTER TABLE {table} '\n f'DROP CONSTRAINT {table}_{ref}_id_fkey, '\n f'ADD CONSTRAINT {table}_{ref}_id_fkey'\n f' FOREIGN KEY ({ref}_id) REFERENCES {ref}s (id)'\n f' ON UPDATE CASCADE'\n )\n\n\n@upgrade_task('Enable expats on votes and elections')\ndef enable_expats(context):\n principal = getattr(context.app, 'principal', None)\n if not principal:\n return\n\n for vote in context.session.query(Vote):\n ballot = vote.ballots.first()\n if ballot:\n if ballot.results.filter_by(entity_id=0).first():\n vote.expats = True\n\n for election in context.session.query(Election):\n if election.results.filter_by(entity_id=0).first():\n election.expats = True\n"},"avg_line_length":{"kind":"number","value":34.8639455782,"string":"34.863946"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.6336585366,"string":"0.633659"}}},{"rowIdx":47257,"cells":{"hexsha":{"kind":"string","value":"cff8c74b564e9aa23283843ea7fd5738bfa7ce69"},"size":{"kind":"number","value":3009,"string":"3,009"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"timetable.py"},"max_stars_repo_name":{"kind":"string","value":"jerluebke/SOWAS"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d606bcd6757503257d01381da56602016261f578"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"timetable.py"},"max_issues_repo_name":{"kind":"string","value":"jerluebke/SOWAS"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d606bcd6757503257d01381da56602016261f578"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"timetable.py"},"max_forks_repo_name":{"kind":"string","value":"jerluebke/SOWAS"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d606bcd6757503257d01381da56602016261f578"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# pylint: disable = C, R\n\nfrom collections import namedtuple\n\nimport matplotlib as mpl\nfrom matplotlib.patches import Patch\nfrom matplotlib.gridspec import GridSpec\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmpl.rcParams[\"font.size\"] = 12\n\n\n# colors\ncmap = plt.get_cmap(\"Set1\")\ncolor_mapping = {\n \"th\" : cmap(0), # theory\n \"ex\" : cmap(1), # experiment\n \"ev\" : cmap(2) # evaluation\n }\n\n\nclass item(namedtuple(\"item\",\n [\"description\", \"start\", \"duration\", \"domain\"])):\n \"\"\"\n item in the timetable containing startpoint (starting with 0), duration,\n domain (theory, experiment, evaluation) and description (as it appears in\n the plot)\n \"\"\"\n __slots__ = ()\n @property\n def color(self):\n return color_mapping[self.domain]\n\n\n#############\n# data #\n#############\n\nitems = [\n # item: description, start, duration, domain\n item(\"Einarbeitung\\n in Python\", 0, 2, \"th\"),\n item(\"Erstellen der\\n Simulation\", 1, 4, \"th\"),\n # item(\"Ergänzung der\\n Simulation\", 5, 2, \"th\"),\n item(\"Bau der Mess-\\n vorrichtung\", 0, 2, \"ex\"),\n item(\"Aufnahme der\\n Messreihen\", 2, 4, \"ex\"),\n item(\"Anpassung des\\n Aufbaus\", 6, 3, \"ex\"),\n item(\"Auswertung der\\nDaten\", 7, 3, \"ev\"),\n item(\"Erstellung des\\nPosters\", 8, 11, \"ev\")\n ]\n\ndata_as_array = np.array([[item.start, item.duration] for item in items])\n\ny_values = np.arange(len(items))\nstarting_points = data_as_array[:,0]\ndurations = data_as_array[:,1]\ny_labels = [item.description for item in items]\ncolors = [item.color for item in items]\n\nkwargs = {\n \"height\" : .4,\n \"align\" : \"center\"\n }\n\n\n#################\n# plotting #\n#################\n\nfig = plt.figure(figsize=(8, 6))\n# make two subplots - actual plot and legend\ngs = GridSpec(2, 1, height_ratios=[11, 1])\n\n# make timetable\nax = plt.subplot(gs[0])\nax.barh(y_values, durations, left=starting_points, color=colors, **kwargs)\n\n# adjust yaxis\nax.invert_yaxis()\nax.set_yticks(y_values)\nax.set_yticklabels(y_labels)\n# hide yticks\nax.tick_params(axis='y', length=0)\n\n# xaxis: set ticks, label and limits\nax.set_xticks(np.arange(12))\nax.set_xlabel(\"Wochen (09.04.2018 - 22.06.2018)\")\nax.set_xlim((0, 11))\n# turn grid off to avoid conflicts with local settings\nax.grid(False)\n# place grid below elements in plot\nax.set_axisbelow(True)\nax.grid(axis='x')\n\n# make legend in second subplot\nlegend_ax = plt.subplot(gs[1])\n# remove ticks and boundary box\nlegend_ax.set(xticks=[], yticks=[])\nlegend_ax.set_axis_off()\n# make and map proxy artists to legend\nplt.legend(handles=[Patch(color=c, label=l)\n for c, l in zip(color_mapping.values(),\n (\"Theorie\", \"Experiment\", \"Auswertung\"))],\n loc=\"lower center\", ncol=3)\n\n# allign subplots\nplt.tight_layout()\n\nplt.savefig(\"timetable.png\", format=\"png\", dpi=300)\nplt.savefig(\"timetable.eps\", format=\"eps\", dpi=1000)\n"},"avg_line_length":{"kind":"number","value":26.6283185841,"string":"26.628319"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.630109671,"string":"0.63011"}}},{"rowIdx":47258,"cells":{"hexsha":{"kind":"string","value":"5c8ba138654ebae397537c0aba3ec6dff61d9382"},"size":{"kind":"number","value":771,"string":"771"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"01_class/transfer_parent_class_method.py"},"max_stars_repo_name":{"kind":"string","value":"wuyueCreator/python-test"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6072ac9264a257c89925469238c14fff3bda5630"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-03-25T03:44:54.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-03-25T03:44:54.000Z"},"max_issues_repo_path":{"kind":"string","value":"01_class/transfer_parent_class_method.py"},"max_issues_repo_name":{"kind":"string","value":"wuyueCreator/python-test"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6072ac9264a257c89925469238c14fff3bda5630"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"01_class/transfer_parent_class_method.py"},"max_forks_repo_name":{"kind":"string","value":"wuyueCreator/python-test"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6072ac9264a257c89925469238c14fff3bda5630"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"class A:\n def spam(self):\n print('A.spam')\n\n\nclass B(A):\n def spam(self):\n print('B.spam')\n super().spam() # Call parent spam()\n\n\nclass C:\n def __init__(self):\n self.x = 0\n\n\nclass D(C):\n def __init__(self):\n super().__init__()\n self.y = 1\n\n\n# super() 的另外一个常见用法出现在覆盖Python特殊方法的代码中,比如:\nclass Proxy:\n def __init__(self, obj):\n self._obj = obj\n\n # Delegate attribute lookup to internal obj\n def __getattr__(self, name):\n return getattr(self._obj, name)\n\n # Delegate attribute assignment\n def __setattr__(self, name, value):\n if name.startswith('_'):\n super().__setattr__(name, value) # Call original __setattr__\n else:\n setattr(self._obj, name, value)\n\n\n"},"avg_line_length":{"kind":"number","value":19.275,"string":"19.275"},"max_line_length":{"kind":"number","value":73,"string":"73"},"alphanum_fraction":{"kind":"number","value":0.5836575875,"string":"0.583658"}}},{"rowIdx":47259,"cells":{"hexsha":{"kind":"string","value":"5cdbb678909c4b3e3437233198e313bdeb3e63d0"},"size":{"kind":"number","value":4260,"string":"4,260"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"quant/api/kkex.py"},"max_stars_repo_name":{"kind":"string","value":"doubleDragon/QuantBot"},"max_stars_repo_head_hexsha":{"kind":"string","value":"53a1d6c62ecece47bf777da0c0754430b706b7fd"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-10-22T15:00:09.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-09-19T11:45:43.000Z"},"max_issues_repo_path":{"kind":"string","value":"quant/api/kkex.py"},"max_issues_repo_name":{"kind":"string","value":"doubleDragon/QuantBot"},"max_issues_repo_head_hexsha":{"kind":"string","value":"53a1d6c62ecece47bf777da0c0754430b706b7fd"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-01-19T16:19:40.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-01-19T16:19:40.000Z"},"max_forks_repo_path":{"kind":"string","value":"quant/api/kkex.py"},"max_forks_repo_name":{"kind":"string","value":"doubleDragon/QuantBot"},"max_forks_repo_head_hexsha":{"kind":"string","value":"53a1d6c62ecece47bf777da0c0754430b706b7fd"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":5,"string":"5"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2017-12-11T15:10:29.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-12-21T17:40:58.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nfrom urllib import urlencode\nfrom urlparse import urljoin\n\nimport requests\nfrom hashlib import md5\n\nBASE_URL = 'https://kkex.com/api/v1'\nTIMEOUT = 5\n\n\nclass PublicClient(object):\n def __init__(self):\n super(PublicClient, self).__init__()\n\n @classmethod\n def _build_parameters(cls, parameters):\n # sort the keys so we can test easily in Python 3.3 (dicts are not\n # ordered)\n keys = list(parameters.keys())\n keys.sort()\n\n return '&'.join([\"%s=%s\" % (k, parameters[k]) for k in keys])\n\n def url_for(self, path, path_arg=None, parameters=None):\n\n # build the basic url\n url = \"%s/%s\" % (BASE_URL, path)\n\n # If there is a path_arh, interpolate it into the URL.\n # In this case the path that was provided will need to have string\n # interpolation characters in it, such as PATH_TICKER\n if path_arg:\n url = url % (path_arg)\n\n # Append any parameters to the URL.\n if parameters:\n url = \"%s?%s\" % (url, self._build_parameters(parameters))\n\n return url\n\n @classmethod\n def _get(cls, url, params=None):\n try:\n resp = requests.get(url, timeout=TIMEOUT, params=params)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if resp.status_code == requests.codes.ok:\n return resp.json()\n\n def depth(self, symbol):\n url = self.url_for('depth')\n params = {\n 'symbol': symbol\n }\n return self._get(url, params)\n\n\nclass PrivateClient(PublicClient):\n def __init__(self, api_key, api_secret):\n super(PrivateClient, self).__init__()\n self._key = api_key\n self._secret = api_secret\n self.api_root = 'https://kkex.com'\n\n def _sign(self, params):\n sign = list(sorted(params.items()) + [('secret_key', self._secret)])\n signer = md5()\n signer.update(urlencode(sign).encode('utf-8'))\n return signer.hexdigest().upper()\n\n def _post(self, path, params=None):\n if params is None:\n params = {}\n\n params['api_key'] = self._key\n sign = self._sign(params)\n params['sign'] = sign\n\n url = urljoin(self.api_root, path)\n try:\n resp = requests.post(url, data=params, timeout=5)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if resp.status_code == requests.codes.ok:\n return resp.json()\n\n def profile(self):\n return self._post('/api/v1/profile')\n\n def balance(self):\n return self._post('/api/v1/userinfo')\n\n def buy_limit(self, symbol, amount, price):\n params = {\n 'symbol': symbol,\n 'type': 'buy',\n 'price': price,\n 'amount': amount\n }\n return self._post('/api/v1/trade', params)\n\n def sell_limit(self, symbol, amount, price):\n params = {\n 'symbol': symbol,\n 'type': 'sell',\n 'price': price,\n 'amount': amount\n }\n return self._post('/api/v1/trade', params)\n\n def cancel_order(self, symbol, order_id):\n params = {'symbol': symbol,\n 'order_id': order_id}\n return self._post('/api/v1/cancel_order', params)\n\n def cancel_all_orders(self, symbol):\n params = {\n 'symbol': symbol\n }\n return self._post('/api/v1/cancel_all_orders', params)\n\n def order_info(self, symbol, order_id):\n params = {\n 'symbol': symbol,\n 'order_id': order_id\n }\n return self._post('/api/v1/order_info', params)\n\n def orders_info(self, symbol, order_ids):\n order_id_p = ','.join(order_ids)\n params = {\n 'symbol': symbol,\n 'order_id': order_id_p\n }\n return self._post('/api/v1/orders_info', params)\n\n def _get_orders_history(self, symbol, status=0, page=1, pagesize=10):\n params = {\n 'symbol': symbol,\n 'status': status,\n 'current_page': page,\n 'page_length': pagesize\n }\n return self._post('/api/v1/order_history', params)\n"},"avg_line_length":{"kind":"number","value":28.5906040268,"string":"28.590604"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.5629107981,"string":"0.562911"}}},{"rowIdx":47260,"cells":{"hexsha":{"kind":"string","value":"a4603d1b25c4c01de3ede09c65761ebeeef0fc49"},"size":{"kind":"number","value":991,"string":"991"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"initial-settings.py"},"max_stars_repo_name":{"kind":"string","value":"gifted-nguvu/darkstar-dts-converter"},"max_stars_repo_head_hexsha":{"kind":"string","value":"aa17a751a9f3361ca9bbb400ee4c9516908d1297"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-18T18:23:27.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-08-02T15:59:16.000Z"},"max_issues_repo_path":{"kind":"string","value":"initial-settings.py"},"max_issues_repo_name":{"kind":"string","value":"gifted-nguvu/darkstar-dts-converter"},"max_issues_repo_head_hexsha":{"kind":"string","value":"aa17a751a9f3361ca9bbb400ee4c9516908d1297"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":5,"string":"5"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-07-07T16:47:47.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-08-10T16:20:00.000Z"},"max_forks_repo_path":{"kind":"string","value":"initial-settings.py"},"max_forks_repo_name":{"kind":"string","value":"gifted-nguvu/darkstar-dts-converter"},"max_forks_repo_head_hexsha":{"kind":"string","value":"aa17a751a9f3361ca9bbb400ee4c9516908d1297"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-03-18T18:23:30.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-03-18T18:23:30.000Z"},"content":{"kind":"string","value":"from conans import ConanFile, CMake, tools\nimport os.path\nimport sys\n\nclass LocalConanFile(ConanFile):\n settings = \"arch_build\"\n generator = []\n\n def requirements(self):\n profile = \"default\"\n\n if \"--profile\" in sys.argv:\n profile = sys.argv[sys.argv.index(\"--profile\") + 1]\n profile = os.path.abspath(profile) if os.path.exists(profile) else profile\n\n print(f\"Configuring CMake arch to {self.settings.arch_build} for {profile} profile. Helps cross-compiling.\")\n self.run(f\"conan profile update settings.cmake:arch={self.settings.arch_build} {profile}\")\n\n print(f\"Adding the bincrafters remote @ https://bincrafters.jfrog.io/artifactory/api/conan/public-conan.\")\n self.run(f\"conan remote add bincrafters https://bincrafters.jfrog.io/artifactory/api/conan/public-conan --force\")\n\n print(\"Configuring settings to work correctly with bincrafters.\")\n self.run(f\"conan config set general.revisions_enabled=1\")\n"},"avg_line_length":{"kind":"number","value":41.2916666667,"string":"41.291667"},"max_line_length":{"kind":"number","value":121,"string":"121"},"alphanum_fraction":{"kind":"number","value":0.6962663976,"string":"0.696266"}}},{"rowIdx":47261,"cells":{"hexsha":{"kind":"string","value":"778b53e8126b091296e7e1d7fb2989f969872569"},"size":{"kind":"number","value":1067,"string":"1,067"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"zencad/examples/1.GeomPrim/2.prim2d/textshape.py"},"max_stars_repo_name":{"kind":"string","value":"Spiritdude/zencad"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4e63b1a6306dd235f4daa2791b10249f7546c95b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-04-11T14:11:40.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-09-12T19:03:36.000Z"},"max_issues_repo_path":{"kind":"string","value":"zencad/examples/1.GeomPrim/2.prim2d/textshape.py"},"max_issues_repo_name":{"kind":"string","value":"Spiritdude/zencad"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4e63b1a6306dd235f4daa2791b10249f7546c95b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"zencad/examples/1.GeomPrim/2.prim2d/textshape.py"},"max_forks_repo_name":{"kind":"string","value":"Spiritdude/zencad"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4e63b1a6306dd235f4daa2791b10249f7546c95b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\"\"\"\nZenCad API example: textshape\ndate: 04.10.2019\n\"\"\"\n\nimport os\nfrom zencad import *\n\nzencad_example_directory = zencad.moduledir + \"/examples\"\n\ntestfont = os.path.join(zencad_example_directory, \"fonts/testfont.ttf\")\nmandarinc = os.path.join(zencad_example_directory, \"fonts/mandarinc.ttf\")\n\nregister_font(testfont)\nregister_font(mandarinc)\n\nm0 = textshape(text=\"ZenCad\", fontname=\"Ubuntu Mono\", size=100)\nm1 = textshape(text=\"ZenCad\", fontname=\"Mandarinc\", size=100)\n\ndisp(m0, color.white)\ndisp(m0.rotateX(deg(90)).translate(0, 70, 0))\n\ndisp(m1.translate( 0, 200, 0), color.green)\ndisp(m1.rotateX(deg(90)).translate( 0, 270, 0), color.yellow)\n\n#########################Advanced Example########################################\nx = 400\ny = 100\nz = 50\ndeep = 10\n\n#find the geometric center of the textshape\nm1center = m1.center()\nm2 = (\n\tbox(x, y, z) \n\t- m1.extrude(deep).up(z-deep).translate(x/2 - m1center.x, y/2 - m1center.y, 0)\n)\n\ndisp(m2.forw(400))\n################################################################################\n\nshow()\n"},"avg_line_length":{"kind":"number","value":24.25,"string":"24.25"},"max_line_length":{"kind":"number","value":81,"string":"81"},"alphanum_fraction":{"kind":"number","value":0.6157450797,"string":"0.615745"}}},{"rowIdx":47262,"cells":{"hexsha":{"kind":"string","value":"24af93a81c7b2b92ca665acd7e7576d818f94d98"},"size":{"kind":"number","value":2054,"string":"2,054"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"toolsparty-master/information-gathering/ip-extender.py"},"max_stars_repo_name":{"kind":"string","value":"Zusyaku/Termux-And-Lali-Linux-V2"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b1a1b0841d22d4bf2cc7932b72716d55f070871e"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-11-17T03:35:03.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-08T06:00:31.000Z"},"max_issues_repo_path":{"kind":"string","value":"toolsparty-master/information-gathering/ip-extender.py"},"max_issues_repo_name":{"kind":"string","value":"Zusyaku/Termux-And-Lali-Linux-V2"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b1a1b0841d22d4bf2cc7932b72716d55f070871e"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"toolsparty-master/information-gathering/ip-extender.py"},"max_forks_repo_name":{"kind":"string","value":"Zusyaku/Termux-And-Lali-Linux-V2"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b1a1b0841d22d4bf2cc7932b72716d55f070871e"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-11-05T18:07:48.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-02-24T21:25:07.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n# -**- Author: LandGrey -**-\n\nimport os\nimport sys\n\n\ndef ip_extender(ips=None, files=None, switch=3, extend=5, is_format=False):\n results = []\n cidr_groups = []\n none_cidr_groups = []\n cidr_dict = {}\n if not ips and files:\n ips = []\n with open(files, 'r') as f:\n for line in f.readlines():\n if line.strip():\n ips.append(line.strip())\n ips = list(set(ips))\n\n for ip in ips:\n prefix = \".\".join(ip.split(\".\")[:3])\n if prefix not in cidr_dict.keys():\n cidr_dict[prefix] = [1, ip]\n else:\n cidr_dict[prefix][0] += 1\n cidr_dict[prefix].append(ip)\n\n for k, v in cidr_dict.items():\n if v[0] >= switch:\n cidr_groups.append(k)\n else:\n for _ in v[1:]:\n none_cidr_groups.append(_)\n if not is_format:\n for _ in cidr_groups:\n results.extend(extend_ips([_ + \".128\"]))\n results.extend(extend_ips(none_cidr_groups, extend=extend))\n else:\n for _ in cidr_groups:\n results.append(_ + \".0/24\")\n for _ in none_cidr_groups:\n r = extend_ips([_], extend=extend)\n results.append(r[0] + \"-\" + r[-1])\n return results\n\n\ndef extend_ips(ips, extend=128):\n results = ips\n var0 = []\n for ip in ips:\n ip_chunk = ip.split(\".\")\n for chunk in range(min(int(ip_chunk[3]) - int(extend), int(ip_chunk[3]) - 1)\n if int(ip_chunk[3]) - int(extend) > 0 else 1, min(int(ip_chunk[3]) + int(extend) + 1, 256)):\n var0.append(\"{0}.{1}.{2}.{3}\".format(ip_chunk[0], ip_chunk[1], ip_chunk[2], str(chunk)))\n results.extend(var0)\n return sorted(list(set(results)), key=lambda x: (len(x), str(x)))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]):\n exit(\"[*] Usage: python ip-extender.py single_ip_list.txt\")\n for ip in ip_extender(files=sys.argv[1], is_format=True):\n print(ip)\n"},"avg_line_length":{"kind":"number","value":31.1212121212,"string":"31.121212"},"max_line_length":{"kind":"number","value":119,"string":"119"},"alphanum_fraction":{"kind":"number","value":0.5438169426,"string":"0.543817"}}},{"rowIdx":47263,"cells":{"hexsha":{"kind":"string","value":"24d549d8e10dc17807c702172c3db31e6793dde3"},"size":{"kind":"number","value":31,"string":"31"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"Semicheche/foa_frappe_docker"},"max_stars_repo_head_hexsha":{"kind":"string","value":"a186b65d5e807dd4caf049e8aeb3620a799c1225"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"Semicheche/foa_frappe_docker"},"max_issues_repo_head_hexsha":{"kind":"string","value":"a186b65d5e807dd4caf049e8aeb3620a799c1225"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"Semicheche/foa_frappe_docker"},"max_forks_repo_head_hexsha":{"kind":"string","value":"a186b65d5e807dd4caf049e8aeb3620a799c1225"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from bleach_whitelist import *\n"},"avg_line_length":{"kind":"number","value":15.5,"string":"15.5"},"max_line_length":{"kind":"number","value":30,"string":"30"},"alphanum_fraction":{"kind":"number","value":0.8387096774,"string":"0.83871"}}},{"rowIdx":47264,"cells":{"hexsha":{"kind":"string","value":"d9198234423e5d3c84ef6a9aac1201bc80bc1c51"},"size":{"kind":"number","value":1605,"string":"1,605"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"etl/io_config/server_protocol.py"},"max_stars_repo_name":{"kind":"string","value":"cloud-cds/cds-stack"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d68a1654d4f604369a071f784cdb5c42fc855d6e"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-06-27T00:09:55.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-03-07T14:06:53.000Z"},"max_issues_repo_path":{"kind":"string","value":"etl/io_config/server_protocol.py"},"max_issues_repo_name":{"kind":"string","value":"cloud-cds/cds-stack"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d68a1654d4f604369a071f784cdb5c42fc855d6e"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-03-31T18:37:46.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-06-01T21:49:41.000Z"},"max_forks_repo_path":{"kind":"string","value":"etl/io_config/server_protocol.py"},"max_forks_repo_name":{"kind":"string","value":"cloud-cds/cds-stack"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d68a1654d4f604369a071f784cdb5c42fc855d6e"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-01-24T16:40:49.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-09-30T02:28:55.000Z"},"content":{"kind":"string","value":"import asyncio\nimport json\nimport logging\nimport socket, errno\nfrom etl.io_config.core import get_environment_var\n\nSRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s'\nlogging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT)\n\nMAGIC_NUMBER = b'trews_magic_number'\nCONNECTION_CLOSED = 'Connection Closed'\n\nLMC_ALERT_SERVER_IP = get_environment_var('LMC_ALERT_SERVER_IP',\n 'alerts.default.svc.cluster.local')\nLMC_ALERT_SERVER_PORT = 31000\n\nTREWS_ALERT_SERVER_IP = get_environment_var('TREWS_ALERT_SERVER_IP',\n 'trews-alerts.default.svc.cluster.local')\nTREWS_ALERT_SERVER_PORT = 31000\n\nasync def read_message(reader, writer):\n try:\n data = await reader.readuntil(MAGIC_NUMBER)\n except asyncio.streams.IncompleteReadError:\n return CONNECTION_CLOSED\n\n # Decode and return message\n EOM = -1 * len(MAGIC_NUMBER)\n data = data[:EOM]\n logging.debug('Receiving from {}: {}'.format(writer.get_extra_info('peername'), data))\n return json.loads(data.decode())\n\n\n\nasync def write_message(writer, message):\n logging.debug('Sending to {}: {}'.format(writer.get_extra_info('sockname'), message))\n if type(message) != dict:\n raise ValueError('write_message takes a dictionary as the second argument')\n try:\n writer.write(json.dumps(message).encode() + MAGIC_NUMBER)\n await writer.drain()\n return True\n except (socket.error, IOError) as e:\n if e.errno == errno.EPIPE:\n logging.error(e)\n else:\n logging.error(\"Other error: {}\".format(e))\n writer.close()\n return False\n"},"avg_line_length":{"kind":"number","value":32.1,"string":"32.1"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.7096573209,"string":"0.709657"}}},{"rowIdx":47265,"cells":{"hexsha":{"kind":"string","value":"d9504bf6bd55f14bbf36d7503f8289596841f780"},"size":{"kind":"number","value":108,"string":"108"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from .pipeline_index_service import PipelineIndexService\nfrom .topic_index_service import TopicIndexService\n"},"avg_line_length":{"kind":"number","value":36,"string":"36"},"max_line_length":{"kind":"number","value":56,"string":"56"},"alphanum_fraction":{"kind":"number","value":0.9074074074,"string":"0.907407"}}},{"rowIdx":47266,"cells":{"hexsha":{"kind":"string","value":"d95f44159ce85d870bd47768845befcafc69f3e9"},"size":{"kind":"number","value":933,"string":"933"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"INBa/2015/Chinkirov_V_V/task_4_29.py"},"max_stars_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"INBa/2015/Chinkirov_V_V/task_4_29.py"},"max_issues_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"INBa/2015/Chinkirov_V_V/task_4_29.py"},"max_forks_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Задача 4. Вариант 28.\n# Напишите программу, которая выводит имя,\n# под которым скрывается Эмиль Эрзог.\n# Дополнительно необходимо вывести область интересов указанной личности,\n# место рождения, годы рождения и смерти (если человек умер),\n# вычислить возраст на данный момент (или момент смерти).\n# Для хранения всех необходимых данных требуется использовать переменные.\n# После вывода информации программа должна дожидаться пока пользователь\n# нажмет Enter для выхода.\n#Чинкиров.В.В.\n# 28.03.2016\nname = \"Эмиль Эрзог\"\ncity = \"Эльбёф,Франция\"\nrod = int (1895)\ndead = int (1934)\nage = int (dead - rod)\ninterest = \"Писатель\"\n\nprint(name+\" наиболее известен как Андреа Моруа - Французский писатель и член Французской академии. \")\nprint(\"Место рождения: \"+city)\nprint(\"Год рождения: \"+str(rod))\nprint(\"Год смерти: \"+str(dead))\nprint(\"Возраст смерти: \"+str(age))\nprint(\"Область интересов: \"+interest)\ninput(\"Нажмите Enter для закрытия\")\n"},"avg_line_length":{"kind":"number","value":35.8846153846,"string":"35.884615"},"max_line_length":{"kind":"number","value":102,"string":"102"},"alphanum_fraction":{"kind":"number","value":0.7577706324,"string":"0.757771"}}},{"rowIdx":47267,"cells":{"hexsha":{"kind":"string","value":"79d3fba129da163bfef8d0d4759998082ab4c008"},"size":{"kind":"number","value":1331,"string":"1,331"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"profiles/urls.py"},"max_stars_repo_name":{"kind":"string","value":"Thames1990/BadBatBets"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8dffb69561668b8991bf4103919e4b254d4ca56a"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"profiles/urls.py"},"max_issues_repo_name":{"kind":"string","value":"Thames1990/BadBatBets"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8dffb69561668b8991bf4103919e4b254d4ca56a"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"profiles/urls.py"},"max_forks_repo_name":{"kind":"string","value":"Thames1990/BadBatBets"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8dffb69561668b8991bf4103919e4b254d4ca56a"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'profiles'\n\nurlpatterns = [\n url(\n r'^$',\n views.profile,\n name='profile'\n ),\n\n # Login mechanism\n url(\n r'^login/$',\n views.login_user,\n name='login'\n ),\n\n # Logout mechanism\n url(\n r'^logout/$',\n views.logout_user,\n name='logout'\n ),\n\n # Signup mechanism\n url(\n r'^signup/$',\n views.signup,\n name='signup'\n ),\n\n # Change Password\n url(\n r'^change_password/$',\n views.change_password,\n name='change_password'\n ),\n\n # General terms and conditions\n url(\n r'^general_terms_and_conditions/$',\n views.general_terms_and_conditions_view,\n name='general_terms_and_conditions'\n ),\n\n # Privacy policy\n url(\n r'^privacy_policy/$',\n views.privacy_policy_view,\n name='privacy_policy'\n ),\n\n # Provide feedback\n url(\n r'^feedback/$',\n views.feedback,\n name='feedback'\n ),\n\n # Resolve Feedback\n url(\n r'^feedback/(?P[0-9]+)/resolve/$',\n views.resolve_feedback,\n name='resolve_feedback'\n ),\n\n # Deposit funds in account\n url(\n r'^payment/$',\n views.payment,\n name='payment'\n ),\n]\n"},"avg_line_length":{"kind":"number","value":17.2857142857,"string":"17.285714"},"max_line_length":{"kind":"number","value":48,"string":"48"},"alphanum_fraction":{"kind":"number","value":0.5251690458,"string":"0.525169"}}},{"rowIdx":47268,"cells":{"hexsha":{"kind":"string","value":"ccde64f635ab8dae261e9151bd5fa024ac46889a"},"size":{"kind":"number","value":6096,"string":"6,096"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pymantic/tests/test_primitives.py"},"max_stars_repo_name":{"kind":"string","value":"dnswd/blazegraph-python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"046a6b47406b0f56d71abc6039f4d7586a1708d2"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":42,"string":"42"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-01-15T14:31:48.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-10T14:32:25.000Z"},"max_issues_repo_path":{"kind":"string","value":"pymantic/tests/test_primitives.py"},"max_issues_repo_name":{"kind":"string","value":"igor-kim/blazegraph-python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"7be8d219e00acb51d949bf49aaaed90c2c2344e5"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-10-02T18:36:42.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-09-18T15:48:58.000Z"},"max_forks_repo_path":{"kind":"string","value":"pymantic/tests/test_primitives.py"},"max_forks_repo_name":{"kind":"string","value":"igor-kim/blazegraph-python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"7be8d219e00acb51d949bf49aaaed90c2c2344e5"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":11,"string":"11"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-18T09:47:52.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-12-26T06:22:18.000Z"},"content":{"kind":"string","value":"from nose.tools import *\nfrom pymantic.primitives import *\nimport random\n\ndef en(s):\n return Literal(s, \"en\")\n\ndef test_to_curie_multi_match():\n \"\"\"Test that the longest match for prefix is used\"\"\"\n namespaces = {'short': \"aa\", 'long': \"aaa\"}\n curie = to_curie(\"aaab\", namespaces)\n print curie\n assert curie == 'long:b' \n\ndef test_simple_add():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n assert t in g\n \ndef test_simple_remove():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n g.remove(t)\n assert t not in g\n \ndef test_match_VVV_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(None, None, None)\n assert t in matches\n\ndef test_match_sVV_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(NamedNode(\"http://example.com\"), None, None)\n assert t in matches\n \ndef test_match_sVo_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(NamedNode(\"http://example.com\"), None, en(\"Never!\"))\n assert t in matches\n \ndef test_match_spV_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"), None)\n assert t in matches\n \ndef test_match_Vpo_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(None, NamedNode(\"http://purl.org/dc/terms/issued\"), en(\"Never!\"))\n assert t in matches\n \ndef test_match_VVo_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(None, None, en(\"Never!\"))\n assert t in matches\n\ndef test_match_VpV_pattern():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),en(\"Never!\"))\n g = Graph()\n g.add(t)\n matches = g.match(None, NamedNode(\"http://purl.org/dc/terms/issued\"), None)\n assert t in matches\n \ndef generate_triples(n=10):\n for i in range(1,n):\n yield Triple(NamedNode(\"http://example/\" + str(random.randint(1,1000))),\n NamedNode(\"http://example/terms/\" + str(random.randint(1,1000))),\n Literal(random.randint(1,1000)))\n\ndef test_10000_triples():\n n = 10000\n g = Graph()\n for t in generate_triples(n):\n g.add(t)\n assert len(g) > n * .9\n matches = g.match(NamedNode(\"http://example.com/42\"), None, None)\n matches = g.match(None, NamedNode(\"http://example/terms/42\"), None)\n matches = g.match(None, None, Literal(42))\n\ndef test_iter_10000_triples():\n n = 10000\n g = Graph()\n triples = set()\n for t in generate_triples(n):\n g.add(t)\n triples.add(t)\n assert len(g) > n * .9\n for t in g:\n triples.remove(t)\n assert len(triples) == 0\n \n# Dataset Tests\n\ndef test_add_quad():\n q = Quad(NamedNode(\"http://example.com/graph\"),NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),Literal(\"Never!\"))\n ds = Dataset()\n ds.add(q)\n assert q in ds\n \ndef test_remove_quad():\n q = Quad(NamedNode(\"http://example.com/graph\"),NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),Literal(\"Never!\"))\n ds = Dataset()\n ds.add(q)\n ds.remove(q)\n assert q not in ds\n\ndef test_ds_len():\n n = 10\n ds = Dataset()\n for q in generate_quads(n):\n ds.add(q)\n assert len(ds) == 10\n \ndef test_match_ds_sVV_pattern():\n q = Quad(NamedNode(\"http://example.com\"), \n NamedNode(\"http://purl.org/dc/terms/issued\"),Literal(\"Never!\"),\n NamedNode(\"http://example.com/graph\"))\n ds = Dataset()\n ds.add(q)\n matches = ds.match(subject=NamedNode(\"http://example.com\"))\n assert q in matches\n \ndef test_match_ds_quad_pattern():\n q = Quad(NamedNode(\"http://example.com\"), \n NamedNode(\"http://purl.org/dc/terms/issued\"),Literal(\"Never!\"),\n NamedNode(\"http://example.com/graph\"))\n ds = Dataset()\n ds.add(q)\n matches = ds.match(graph=\"http://example.com/graph\")\n assert q in matches\n \ndef test_add_graph():\n t = Triple(NamedNode(\"http://example.com\"), NamedNode(\"http://purl.org/dc/terms/issued\"),Literal(\"Never!\"))\n g = Graph(\"http://example.com/graph\")\n g.add(t)\n ds = Dataset()\n ds.add_graph(g)\n assert t in ds\n\ndef generate_quads(n):\n for i in range(n):\n yield Quad(NamedNode(\"http://example/\" + str(random.randint(1,1000))),\n NamedNode(\"http://purl.org/dc/terms/\" + str(random.randint(1,100))),\n Literal(random.randint(1,1000)),\n NamedNode(\"http://example/graph/\"+str(random.randint(1,1000))))\n \ndef test_10000_quads():\n n = 10000\n ds = Dataset()\n for q in generate_quads(n):\n ds.add(q)\n assert len(ds) > n * .9\n matches = ds.match(subject=NamedNode(\"http://example.com/42\"), \n graph=NamedNode(\"http://example/graph/42\"))\n\ndef test_iter_10000_quads():\n n = 10000\n ds = Dataset()\n quads = set()\n for q in generate_quads(n):\n ds.add(q)\n quads.add(q)\n assert len(ds) > n * .9\n for quad in ds:\n quads.remove(quad)\n assert len(quads) == 0\n\ndef test_interfaceName():\n assert Literal(\"Bob\", \"en\").interfaceName == \"Literal\"\n assert NamedNode().interfaceName == \"NamedNode\"\n \ndef test_BlankNode_id():\n b1 = BlankNode()\n b2 = BlankNode()\n assert b1.value != b2.value\n"},"avg_line_length":{"kind":"number","value":32.253968254,"string":"32.253968"},"max_line_length":{"kind":"number","value":147,"string":"147"},"alphanum_fraction":{"kind":"number","value":0.6087598425,"string":"0.60876"}}},{"rowIdx":47269,"cells":{"hexsha":{"kind":"string","value":"033cebcbd7d667e2cf8eb5c214611871d49fe72b"},"size":{"kind":"number","value":20012,"string":"20,012"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"wz/ui/gridbase.py"},"max_stars_repo_name":{"kind":"string","value":"gradgrind/WZ"},"max_stars_repo_head_hexsha":{"kind":"string","value":"672d93a3c9d7806194d16d6d5b9175e4046bd068"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"wz/ui/gridbase.py"},"max_issues_repo_name":{"kind":"string","value":"gradgrind/WZ"},"max_issues_repo_head_hexsha":{"kind":"string","value":"672d93a3c9d7806194d16d6d5b9175e4046bd068"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"wz/ui/gridbase.py"},"max_forks_repo_name":{"kind":"string","value":"gradgrind/WZ"},"max_forks_repo_head_hexsha":{"kind":"string","value":"672d93a3c9d7806194d16d6d5b9175e4046bd068"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nui/gridbase.py\n\nLast updated: 2021-10-10\n\nWidget with tiles on grid layout (QGraphicsScene/QGraphicsView).\n\n=+LICENCE=============================\nCopyright 2021 Michael Towers\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n=-LICENCE========================================\n\"\"\"\n\n##### Configuration #####################\nFONT_DEFAULT = 'Droid Sans'\nFONT_SIZE_DEFAULT = 11\nFONT_COLOUR = '442222' # rrggbb\nBORDER_COLOUR = '000088' # rrggbb\nMARK_COLOUR = 'E00000' # rrggbb\n\n# Line width for borders\nUNDERLINE_WIDTH = 3.0\nBORDER_WIDTH = 1.0\n\nSCENE_MARGIN = 10.0 # Margin around content in GraphicsView widgets\n\n#####################\n\n### Messages\n_TILE_OUT_OF_BOUNDS = (\"Kachel außerhalb Tabellenbereich:\\n\"\n \" Zeile {row}, Höhe {rspan}, Spalte {col}, Breite {cspan}\")\n_NOTSTRING = \"In : Zeichenkette erwartet: {val}\"\n\n#####################################################\n\nimport sys, os, copy\n\nfrom PySide6.QtWidgets import QGraphicsView, QGraphicsScene, \\\n QGraphicsRectItem, QGraphicsSimpleTextItem, QGraphicsLineItem\nfrom PySide6.QtGui import (QFont, QPen, QColor, QBrush, QTransform,\n QPainter, QPdfWriter, QPageLayout)\nfrom PySide6.QtCore import Qt, QMarginsF, QRectF, QBuffer, QByteArray\n\nclass GridError(Exception):\n pass\n\n### ---\n\nclass GridView(QGraphicsView):\n \"\"\"This is the \"view\" widget for the grid.\n The actual grid is implemented as a \"scene\".\n \"\"\"\n def __init__(self):\n self._scale = 1.0\n super ().__init__()\n # Change update mode: The default, MinimalViewportUpdate, seems\n # to cause artefacts to be left, i.e. it updates too little.\n # Also BoundingRectViewportUpdate seems not to be 100% effective.\n #self.setViewportUpdateMode(self.BoundingRectViewportUpdate)\n self.setViewportUpdateMode(self.FullViewportUpdate)\n self.ldpi = self.logicalDpiX()\n if self.logicalDpiY() != self.ldpi:\n REPORT('WARNING', \"LOGICAL DPI different for x and y\")\n self.MM2PT = self.ldpi / 25.4\n#\n def set_scene(self, scene):\n \"\"\"Set the QGraphicsScene for this view. The size will be fixed\n to that of the initial (to prevent it from being\n altered by pop-ups).\n may be , to remove the current scene.\n \"\"\"\n self.setScene(scene)\n if scene:\n self.setSceneRect(scene._sceneRect)\n#\n def mousePressEvent(self, event):\n point = event.pos()\n# print(\"POS:\", point, self.mapToGlobal(point), self.itemAt(point))\n# The Tile may not be the top item.\n items = self.items(point)\n button = event.button()\n if items:\n for item in items:\n # Give all items at this point a chance to react, starting\n # with the topmost. An item can break the chain by\n # returning a false value.\n try:\n if button == Qt.LeftButton:\n if not item.leftclick():\n return\n elif button == Qt.RightButton:\n if not item.rightclick():\n return\n except AttributeError:\n pass\n#\n ### View scaling\n def scaleUp (self):\n self.scale(1)\n#\n def scaleDn (self):\n self.scale(-1)\n#\n def scale(self, delta):\n t = QTransform()\n self._scale += self._scale * delta / 10\n t.scale(self._scale, self._scale)\n self.setTransform(t)\n ### ---------------\n\n###\n\nclass GridViewRescaling(GridView):\n \"\"\"An QGraphicsView that automatically adjusts the scaling of its\n scene to fill the viewing window.\n \"\"\"\n def __init__(self):\n super().__init__()\n # Apparently it is a good idea to disable scrollbars when using\n # this resizing scheme. With this resizing scheme they would not\n # appear anyway, so this doesn't lose any features!\n self.setHorizontalScrollBarPolicy (Qt.ScrollBarAlwaysOff)\n self.setVerticalScrollBarPolicy (Qt.ScrollBarAlwaysOff)\n\n def resizeEvent(self, event):\n self.resize()\n return super().resizeEvent(event)\n\n def resize(self, qrect=None):\n if qrect == None:\n qrect = self.sceneRect()\n self.fitInView(qrect, Qt.KeepAspectRatio)\n\n###\n\nclass GridBase(QGraphicsScene):\n def __init__(self, gview, rowheights, columnwidths):\n \"\"\"Set the grid size.\n : a list of column widths (mm)\n : a list of row heights (mm)\n Rows and columns are 0-indexed.\n \"\"\"\n super().__init__()\n self._gview = gview\n self._styles = {'*': CellStyle(FONT_DEFAULT, FONT_SIZE_DEFAULT,\n align = 'c', border = 1, mark = MARK_COLOUR)\n }\n self.xmarks = [0.0]\n x = 0.0\n for c in columnwidths:\n x += c * self._gview.MM2PT\n self.xmarks.append(x)\n self.ymarks = [0.0]\n y = 0.0\n for r in rowheights:\n y += r * self._gview.MM2PT\n self.ymarks.append(y)\n # Allow a little margin\n self._sceneRect = QRectF(-SCENE_MARGIN, -SCENE_MARGIN,\n x + 2 * SCENE_MARGIN, y + 2 * SCENE_MARGIN)\n#\n def style(self, name):\n return self._styles[name]\n#\n def new_style(self, name, base = None, **params):\n if base:\n style0 = self._styles[base]\n self._styles[name] = style0.copy(**params)\n else:\n self._styles[name] = CellStyle(params.pop('font', None),\n params.pop('size', None), **params)\n#\n def ncols(self):\n return len(self.xmarks) - 1\n#\n def nrows(self):\n return len(self.ymarks) - 1\n#\n def screen_coordinates(self, x, y):\n \"\"\"Return the screen coordinates of the given scene point.\n \"\"\"\n viewp = self._gview.mapFromScene(x, y)\n return self._gview.mapToGlobal(viewp)\n#\n def basic_tile(self, row, col, tag, text, style, cspan = 1, rspan = 1):\n \"\"\"Add a basic tile to the grid, checking coordinates and\n converting row + col to x + y point-coordinates for the\n class.\n \"\"\"\n # Check bounds\n if (row < 0 or col < 0\n or (row + rspan) >= len(self.ymarks)\n or (col + cspan) >= len(self.xmarks)):\n raise GridError(_TILE_OUT_OF_BOUNDS.format(\n row = row, col = col, cspan = cspan, rspan = rspan))\n x = self.xmarks[col]\n y = self.ymarks[row]\n w = self.xmarks[col + cspan] - x\n h = self.ymarks[row + rspan] - y\n t = Tile(self, tag, x, y, w, h, text, self._styles[style])\n self.addItem(t)\n return t\n#\n ### pdf output\n def setPdfMargins(self, left = 15, top = 15, right = 15, bottom = 15):\n self._pdfmargins = (left, top, right, bottom)\n return self._pdfmargins\n#\n def pdfMargins(self):\n try:\n return self._pdfmargins\n except AttributeError:\n return self.setPdfMargins()\n#\n def to_pdf(self, filepath):\n \"\"\"Produce and save a pdf of the table.\n The output orientation is selected according to the aspect ratio\n of the table. If the table is too big for the page area, it will\n be shrunk to fit.\n \"\"\"\n if not filepath.endswith('.pdf'):\n filepath += '.pdf'\n printer = QPdfWriter(filepath)\n printer.setPageSize(printer.A4)\n printer.setPageMargins(QMarginsF(*self.pdfMargins()),\n QPageLayout.Millimeter)\n sceneRect = self._sceneRect\n sw = sceneRect.width()\n sh = sceneRect.height()\n if sw > sh:\n printer.setPageOrientation(QPageLayout.Orientation.Landscape)\n painter = QPainter()\n painter.begin(printer)\n scaling = printer.logicalDpiX() / self._gview.ldpi\n # Do drawing with painter\n page_layout = printer.pageLayout()\n pdf_rect = page_layout.paintRect(QPageLayout.Point)\n pdf_w = pdf_rect.width()\n pdf_h = pdf_rect.height()\n if sw > pdf_w or sh > pdf_h:\n # Shrink to fit page\n self.render(painter)\n else:\n # Scale resolution to keep size\n pdf_rect.setWidth(sw * scaling)\n pdf_rect.setHeight(sh * scaling)\n self.render(painter, pdf_rect)\n painter.end()\n return filepath\n#\n# An earlier, alternative implementation of the pdf writer:\n def to_pdf0(self, filepath):\n \"\"\"Produce and save a pdf of the table.\n The output orientation is selected according to the aspect ratio\n of the table. If the table is too big for the page area, it will\n be shrunk to fit.\n \"\"\"\n qbytes = QByteArray()\n qbuf = QBuffer(qbytes)\n qbuf.open(qbuf.WriteOnly)\n printer = QPdfWriter(qbuf)\n printer.setPageSize(printer.A4)\n printer.setPageMargins(QMarginsF(*self.pdfMargins()),\n QPageLayout.Millimeter)\n sceneRect = self._sceneRect\n sw = sceneRect.width()\n sh = sceneRect.height()\n if sw > sh:\n printer.setPageOrientation(QPageLayout.Orientation.Landscape)\n pdf_dpmm = printer.resolution() / 25.4 # pdf resolution, dots per mm\n scene_dpmm = self._gview.MM2PT # scene resolution, dots per mm\n natural_scale = pdf_dpmm / scene_dpmm\n page_layout = printer.pageLayout()\n pdf_rect = page_layout.paintRect(QPageLayout.Millimeter)\n swmm = sw / self._gview.MM2PT\n shmm = sh / self._gview.MM2PT\n painter = QPainter(printer)\n pdf_wmm = pdf_rect.width()\n pdf_hmm = pdf_rect.height()\n if swmm > pdf_wmm or shmm > pdf_hmm:\n # Shrink to fit page\n self.render(painter)\n else:\n # Scale resolution to keep size\n pdf_rect.setWidth(sw * natural_scale)\n pdf_rect.setHeight(sh * natural_scale)\n self.render(painter, pdf_rect)\n painter.end()\n qbuf.close()\n # Write resulting file\n if not filepath.endswith('.pdf'):\n filepath += '.pdf'\n with open(filepath, 'wb') as fh:\n fh.write(bytes(qbytes))\n return filepath\n\n###\n\nclass CellStyle:\n \"\"\"Handle various aspects of cell styling.\n Also manage caches for fonts, pens and brushes.\n \"\"\"\n _fonts = {}\n _brushes = {}\n _pens = {}\n#\n @classmethod\n def getFont(cls, fontFamily, fontSize, fontBold, fontItalic):\n ftag = (fontFamily, fontSize, fontBold, fontItalic)\n try:\n return cls._fonts[ftag]\n except:\n pass\n font = QFont()\n if fontFamily:\n font.setFamily(fontFamily)\n if fontSize:\n font.setPointSizeF(fontSize)\n if fontBold:\n font.setBold(True)\n if fontItalic:\n font.setItalic(True)\n cls._fonts[ftag] = font\n return font\n#\n @classmethod\n def getPen(cls, width, colour = None):\n \"\"\"Manage a cache for pens of different width and colour.\n \"\"\"\n if width:\n wc = (width, colour or BORDER_COLOUR)\n try:\n return cls._pens[wc]\n except AttributeError:\n cls._pens = {}\n except KeyError:\n pass\n pen = QPen('#FF' + wc[1])\n pen.setWidthF(wc[0])\n cls._pens[wc] = pen\n return pen\n else:\n try:\n return cls._noPen\n except AttributeError:\n cls._noPen = QPen()\n cls._noPen.setStyle(Qt.NoPen)\n return cls._noPen\n#\n @classmethod\n def getBrush(cls, colour):\n \"\"\"Manage a cache for brushes of different colour.\n is a colour in the form 'RRGGBB'.\n \"\"\"\n try:\n return cls._brushes[colour or FONT_COLOUR]\n except:\n pass\n brush = QBrush(QColor('#FF' + (colour or FONT_COLOUR)))\n cls._brushes[colour] = brush\n return brush\n#\n def __init__(self, font, size, align = 'c', highlight = None,\n bg = None, border = 1, border_colour = None, mark = None):\n \"\"\"\n is the name of the font ( => default, not recommended,\n unless the cell is to contain no text).\n is the size of the font ( => default, not recommended,\n unless the cell is to contain no text).\n is the horizontal (l, c or r) OR vertical (b, m, t) alignment.\n Vertical alignment is for rotated text (-90° only).\n can set bold, italic and font colour: 'bi:RRGGBB'. All bits\n are optional, but the colon must be present if a colour is given.\n can set the background colour ('RRGGBB').\n : Only three border types are supported here:\n 0: none\n 1: all sides\n 2: (thicker) underline\n : 'RRGGBB', default is .\n is a colour ('RRGGBB') which can be selected as an\n \"alternative\" font colour.\n \"\"\"\n # Font\n self.setFont(font, size, highlight)\n self.colour_marked = mark\n # Alignment\n self.setAlign(align)\n # Background colour\n self.bgColour = self.getBrush(bg) if bg else None\n # Border\n self.border = border\n self.border_colour = border_colour\n#\n def setFont(self, font, size, highlight):\n self._font, self._size, self._highlight = font, size, highlight\n try:\n emph, clr = highlight.split(':')\n except:\n emph, clr = highlight or '', None\n self.fontColour = self.getBrush(clr)\n self.font = self.getFont(font, size, 'b' in emph, 'i' in emph)\n#\n def setAlign(self, align):\n if align in 'bmt':\n # Vertical\n self.alignment = ('c', align, True)\n else:\n self.alignment = (align, 'm', False)\n#\n def copy(self, font = None, size = None, align = None,\n highlight = None, mark = None, bg = None, border = None):\n \"\"\"Make a copy of this style, but with changes specified by the\n parameters.\n Note that a change to a 'None' parameter value is not possible.\n \"\"\"\n newstyle = copy.copy(self)\n if font or size or highlight:\n newstyle.setFont(font or self._font,\n size or self._size, highlight or self._highlight)\n if mark:\n newstyle.colour_marked = mark\n if align:\n newstyle.setAlign(align)\n if bg:\n newstyle.bgColour = self.getBrush(bg)\n if border != None:\n newstyle.border = border\n return newstyle\n\n###\n\nclass Tile(QGraphicsRectItem):\n \"\"\"The graphical representation of a table cell.\n This cell can span rows and columns.\n It contains a simple text element.\n Both cell and text can be styled to a limited extent (see ).\n \"\"\"\n def __init__(self, grid, tag, x, y, w, h, text, style):\n self._style = style\n self._grid = grid\n self.tag = tag\n self.height0 = h\n self.width0 = w\n super().__init__(0, 0, w, h)\n self.setFlag(self.ItemClipsChildrenToShape, True)\n self.setPos(x, y)\n\n # Background colour\n if style.bgColour != None:\n self.setBrush(style.bgColour)\n\n # Border\n if style.border == 1:\n # Set the pen for the rectangle boundary\n pen0 = CellStyle.getPen(BORDER_WIDTH, style.border_colour)\n else:\n # No border for the rectangle\n pen0 = CellStyle.getPen(None)\n if style.border != 0:\n # Thick underline\n line = QGraphicsLineItem(self)\n line.setPen(CellStyle.getPen(UNDERLINE_WIDTH,\n style.border_colour))\n line.setLine(0, h, w, h)\n self.setPen(pen0)\n\n # Alignment and rotation\n self.halign, self.valign, self.rotation = style.alignment\n # Text\n self.textItem = QGraphicsSimpleTextItem(self)\n self.textItem.setFont(style.font)\n self.textItem.setBrush(style.fontColour)\n self.setText(text or '')\n#\n def mark(self):\n if self._style.colour_marked:\n self.textItem.setBrush(self._style.getBrush(self._style.colour_marked))\n#\n def unmark(self):\n self.textItem.setBrush(self._style.fontColour)\n#\n def margin(self):\n return 0.4 * self._grid._gview.MM2PT\n#\n def value(self):\n return self._text\n#\n def setText(self, text):\n if type(text) != str:\n raise GridError(_NOTSTRING.format(val = repr(text)))\n self._text = text\n self.textItem.setText(text)\n self.textItem.setScale(1)\n w = self.textItem.boundingRect().width()\n h = self.textItem.boundingRect().height()\n if text:\n scale = 1\n maxw = self.width0 - self.margin() * 2\n maxh = self.height0 - self.margin() * 2\n if self.rotation:\n maxh -= self.margin() * 4\n if w > maxh:\n scale = maxh / w\n if h > maxw:\n _scale = maxw / h\n if _scale < scale:\n scale = _scale\n if scale < 0.6:\n self.textItem.setText('###')\n scale = (maxh /\n self.textItem.boundingRect().width())\n if scale < 1:\n self.textItem.setScale(scale)\n trf = QTransform().rotate(-90)\n self.textItem.setTransform(trf)\n else:\n maxw -= self.margin() * 4\n if w > maxw:\n scale = maxw / w\n if h > maxh:\n _scale = maxh / h\n if _scale < scale:\n scale = _scale\n if scale < 0.6:\n self.textItem.setText('###')\n scale = (maxw /\n self.textItem.boundingRect().width())\n if scale < 1:\n self.textItem.setScale(scale)\n# This print line can help find box size problems:\n# print(\"BOX-SCALE: %5.3f (%s) *** w: %6.2f / %6.2f *** h: %6.2f / %6.2f\"\n# % (scale, text, w, maxw, h, maxh))\n bdrect = self.textItem.mapRectToParent(\n self.textItem.boundingRect())\n yshift = - bdrect.top() if self.rotation else 0.0\n w = bdrect.width()\n h = bdrect.height()\n xshift = 0.0\n if self.halign == 'l':\n xshift += self.margin()\n elif self.halign == 'r':\n xshift += self.width0 - self.margin() - w\n else:\n xshift += (self.width0 - w) / 2\n if self.valign == 't':\n yshift += self.margin()\n elif self.valign == 'b':\n yshift += self.height0 - self.margin() - h\n else:\n yshift += (self.height0 - h) / 2\n self.textItem.setPos(xshift, yshift)\n#\n def leftclick(self):\n return self._grid.tile_left_clicked(self)\n#\n def rightclick(self):\n return self._grid.tile_right_clicked(self)\n\n\n#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#\n#TODO ...\n"},"avg_line_length":{"kind":"number","value":34.8034782609,"string":"34.803478"},"max_line_length":{"kind":"number","value":84,"string":"84"},"alphanum_fraction":{"kind":"number","value":0.5601639017,"string":"0.560164"}}},{"rowIdx":47270,"cells":{"hexsha":{"kind":"string","value":"064ba62a570f85c226bd1d351ff184276e78c21f"},"size":{"kind":"number","value":10283,"string":"10,283"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py"},"max_stars_repo_name":{"kind":"string","value":"opencomputeproject/Rack-Manager"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-11-11T07:57:26.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-28T08:26:53.000Z"},"max_issues_repo_path":{"kind":"string","value":"Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py"},"max_issues_repo_name":{"kind":"string","value":"opencomputeproject/Rack-Manager"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-09-05T21:47:07.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-09-17T18:10:45.000Z"},"max_forks_repo_path":{"kind":"string","value":"Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py"},"max_forks_repo_name":{"kind":"string","value":"opencomputeproject/Rack-Manager"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":11,"string":"11"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-07-20T00:16:32.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-01-11T14:17:48.000Z"},"content":{"kind":"string","value":"# Copyright (C) Microsoft Corporation. All rights reserved.\n\n# This program is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom ipmicmd_library import * \n\ndef get_server_bios_config(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\",completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface,completion_code.failure)\n \n ipmi_cmd = 'ocsoem biosconfig' # IPMI command to get server bios config details\n cmdinterface = interface + ' ' + ipmi_cmd\n \n bios_config = parse_get_bios_config(cmdinterface, \"getserverbiosconfig\")\n \n if bios_config is None or not bios_config: # Check empty or none\n #return set_failure_dict(\"Empty data for biosconfig\", \"-1\")\n return set_failure_dict(\"Empty data for biosconfig\",completion_code.failure)\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"getbiosconfig Exception: \", e),completion_code.failure)\n\n return bios_config \n\ndef set_server_bios_config(serverid, majorconfig, minorconfig):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\",completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface,completion_code.failure)\n \n ipmi_cmd = 'ocsoem setbiosconfig' + ' ' + str(majorconfig) + ' ' + str(minorconfig) # IPMI command to set server bios config details\n cmdinterface = interface + ' ' + ipmi_cmd\n \n bios_config = parse_set_bios_config(cmdinterface, \"setserverbiosconfig\")\n \n if bios_config is None or not bios_config: # Check empty or none\n return set_failure_dict(\"Empty data for setbiosconfig\",completion_code.failure)\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"setbiosconfig Exception: \", e),completion_code.failure)\n\n return bios_config \n\ndef get_bios_code(serverid, version):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\",completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n\n if \"Failed:\" in interface:\n return set_failure_dict(interface,completion_code.failure)\n \n ipmi_cmd = 'ocsoem bioscode' + ' ' + version # IPMI command to get server bios code details\n \n cmdinterface = interface + ' ' + ipmi_cmd\n \n bios_code = parse_bioscode(cmdinterface, \"getserverbioscode\")\n \n if bios_code is None or not bios_code: # Check empty or none\n return set_failure_dict(\"Empty data for getserverbioscode\",completion_code.failure)\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"getbioscode Exception: \", e),completion_code.failure)\n\n return bios_code \n \n\ndef parse_get_bios_config(interface, command): \n try: \n completionstate = True\n output = call_ipmi(interface, command) \n \n if \"ErrorCode\" in output:\n return output\n \n biosconfigrsp = {}\n biosconfigrsp[\"AvailableConfigurations\"] = {}\n \n if(output['status_code'] == 0):\n biosdata = output['stdout'].split('\\n\\n') \n #Gets current and chosen config details from output\n current_config_details = biosdata.pop(0)\n currentconfig = current_config_details.split('\\n')\n \n for cfgval in currentconfig:\n if \"Current BIOS Configuration\" in cfgval:\n biosconfigrsp[\"Current BIOS Configuration\"] = cfgval.split(\":\")[-1]\n elif \"Chosen BIOS Configuration\" in cfgval:\n biosconfigrsp[\"Chosen BIOS Configuration\"] = cfgval.split(\":\")[-1]\n elif \"Available Configuration Name\" in cfgval:\n biosconfigrsp[\"AvailableConfigName\"] = cfgval.split(\":\")[-1]\n \n # Gets all available configuration details\n for availablecfg in biosdata:\n configdata = availablecfg.split('\\n') \n \n config_value= filter(None, configdata)\n # Skipping empty lists if any\n if len(config_value) == 0:\n break\n else: \n if config_value[0].lower().strip('-').strip() == \"Available Configurations\".lower(): \n available_config_data = availablecfg.split('*') \n available_config_value= filter(None, available_config_data) \n config_info = get_config_data(available_config_value) \n \n if completion_code.cc_key in config_info.keys():\n completionstate &= False\n biosconfigrsp[\"AvailableConfigurations\"] = None\n else:\n biosconfigrsp[\"AvailableConfigurations\"] = config_info\n \n \n if(completionstate):\n biosconfigrsp[completion_code.cc_key] = completion_code.success\n else:\n biosconfigrsp[completion_code.cc_key] = completion_code.failure\n \n return biosconfigrsp\n \n else:\n error_data = output['stderr'].split('\\n') \n biosconfigrsp[completion_code.cc_key] = completion_code.failure \n \n for data in error_data:\n if \"Error\" in data:\n biosconfigrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n biosconfigrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n return biosconfigrsp\n \n except Exception,e:\n #log.exception(\"GetserverBiosConfig: Exception error:\" ,e) \n return set_failure_dict((\"parse_get_bios_config() Exception: \",e),completion_code.failure)\n \ndef get_config_data(configdata):\n try:\n config_rsp = {}\n config_id = 1 \n \n for value in configdata:\n config_data = value.split('\\n') \n config_info = filter(None, config_data) # Removes empty strings \n # Skipping empty lists if any \n if len(config_info) == 0:\n break \n \n config_rsp[config_id] = {} \n for value in config_info: \n if \"ConfigName\" in value:\n config_rsp[config_id][\"Config Name\"] = value.split(\":\")[-1].strip()\n elif \"ConfigValue\" in value:\n config_rsp[config_id][\"Config Value\"] = value.split(\":\")[-1].strip()\n config_id = config_id + 1\n \n except Exception,e:\n config_rsp[completion_code.cc_key] = completion_code.failure\n config_rsp[completion_code.desc] = \"Get available config data, Exception: \", e\n \n return config_rsp \n \ndef parse_set_bios_config(interface, command):\n try: \n output = call_ipmi(interface, command) \n \n if \"ErrorCode\" in output:\n return output\n \n setbiosconfigrsp = {}\n \n if(output['status_code'] == 0):\n sdata = output['stdout'].split('\\n') \n completionstate = sdata.pop(0)\n if \"Completion Status\" in completionstate:\n setbiosconfigrsp[completion_code.cc_key] = completionstate.split(\":\")[-1] \n return setbiosconfigrsp \n \n else:\n error_data = output['stderr'].split('\\n') \n setbiosconfigrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n setbiosconfigrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n setbiosconfigrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n return setbiosconfigrsp \n \n except Exception, e:\n #log.exception(\"Exception error is: \",e)\n return set_failure_dict((\"parse_set_bios_config() Exception \",e),completion_code.failure)\n \ndef parse_bioscode(interface, command): \n try: \n output = call_ipmi(interface, command) \n \n if \"ErrorCode\" in output:\n return output\n\n biosrsp = {}\n \n if(output['status_code'] == 0):\n sdata = output['stdout'].split('\\n') \n biosrsp[\"Bios Code\"] = str(sdata[0]) \n biosrsp[completion_code.cc_key] = completion_code.success\n \n return biosrsp\n else:\n error_data = output['stderr'].split('\\n') \n biosrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n biosrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n biosrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n return biosrsp \n \n except Exception, e:\n #log.exception(\"Exception error is: %s \" %e)\n #print \"Exception: \", e\n return set_failure_dict((\"ParseGetBiosCodeResult() Exception: \",e),completion_code.failure)\n \n"},"avg_line_length":{"kind":"number","value":42.143442623,"string":"42.143443"},"max_line_length":{"kind":"number","value":142,"string":"142"},"alphanum_fraction":{"kind":"number","value":0.5617037829,"string":"0.561704"}}},{"rowIdx":47271,"cells":{"hexsha":{"kind":"string","value":"d1cb1afdc983b8fa4778f32f8458e3c566262013"},"size":{"kind":"number","value":6272,"string":"6,272"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"official/cv/unet/src/unet_nested/unet_model.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"official/cv/unet/src/unet_nested/unet_model.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"official/cv/unet/src/unet_nested/unet_model.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n# Model of UnetPlusPlus\n\nimport mindspore.nn as nn\nimport mindspore.ops as P\nfrom .unet_parts import UnetConv2d, UnetUp\n\n\nclass NestedUNet(nn.Cell):\n \"\"\"\n Nested unet\n \"\"\"\n def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True, use_ds=True):\n super(NestedUNet, self).__init__()\n self.in_channel = in_channel\n self.n_class = n_class\n self.feature_scale = feature_scale\n self.use_deconv = use_deconv\n self.use_bn = use_bn\n self.use_ds = use_ds\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # Down Sample\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode=\"same\")\n self.conv00 = UnetConv2d(self.in_channel, filters[0], self.use_bn)\n self.conv10 = UnetConv2d(filters[0], filters[1], self.use_bn)\n self.conv20 = UnetConv2d(filters[1], filters[2], self.use_bn)\n self.conv30 = UnetConv2d(filters[2], filters[3], self.use_bn)\n self.conv40 = UnetConv2d(filters[3], filters[4], self.use_bn)\n\n # Up Sample\n self.up_concat01 = UnetUp(filters[1], filters[0], self.use_deconv, 2)\n self.up_concat11 = UnetUp(filters[2], filters[1], self.use_deconv, 2)\n self.up_concat21 = UnetUp(filters[3], filters[2], self.use_deconv, 2)\n self.up_concat31 = UnetUp(filters[4], filters[3], self.use_deconv, 2)\n\n self.up_concat02 = UnetUp(filters[1], filters[0], self.use_deconv, 3)\n self.up_concat12 = UnetUp(filters[2], filters[1], self.use_deconv, 3)\n self.up_concat22 = UnetUp(filters[3], filters[2], self.use_deconv, 3)\n\n self.up_concat03 = UnetUp(filters[1], filters[0], self.use_deconv, 4)\n self.up_concat13 = UnetUp(filters[2], filters[1], self.use_deconv, 4)\n\n self.up_concat04 = UnetUp(filters[1], filters[0], self.use_deconv, 5)\n\n # Finale Convolution\n self.final1 = nn.Conv2d(filters[0], n_class, 1)\n self.final2 = nn.Conv2d(filters[0], n_class, 1)\n self.final3 = nn.Conv2d(filters[0], n_class, 1)\n self.final4 = nn.Conv2d(filters[0], n_class, 1)\n self.stack = P.Stack(axis=0)\n\n def construct(self, inputs):\n x00 = self.conv00(inputs) # channel = filters[0]\n x10 = self.conv10(self.maxpool(x00)) # channel = filters[1]\n x20 = self.conv20(self.maxpool(x10)) # channel = filters[2]\n x30 = self.conv30(self.maxpool(x20)) # channel = filters[3]\n x40 = self.conv40(self.maxpool(x30)) # channel = filters[4]\n\n x01 = self.up_concat01(x10, x00) # channel = filters[0]\n x11 = self.up_concat11(x20, x10) # channel = filters[1]\n x21 = self.up_concat21(x30, x20) # channel = filters[2]\n x31 = self.up_concat31(x40, x30) # channel = filters[3]\n\n x02 = self.up_concat02(x11, x00, x01) # channel = filters[0]\n x12 = self.up_concat12(x21, x10, x11) # channel = filters[1]\n x22 = self.up_concat22(x31, x20, x21) # channel = filters[2]\n\n x03 = self.up_concat03(x12, x00, x01, x02) # channel = filters[0]\n x13 = self.up_concat13(x22, x10, x11, x12) # channel = filters[1]\n\n x04 = self.up_concat04(x13, x00, x01, x02, x03) # channel = filters[0]\n\n final1 = self.final1(x01)\n final2 = self.final2(x02)\n final3 = self.final3(x03)\n final4 = self.final4(x04)\n\n if self.use_ds:\n final = self.stack((final1, final2, final3, final4))\n return final\n return final4\n\n\nclass UNet(nn.Cell):\n \"\"\"\n Simple UNet with skip connection\n \"\"\"\n def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True):\n super(UNet, self).__init__()\n self.in_channel = in_channel\n self.n_class = n_class\n self.feature_scale = feature_scale\n self.use_deconv = use_deconv\n self.use_bn = use_bn\n\n filters = [64, 128, 256, 512, 1024]\n filters = [int(x / self.feature_scale) for x in filters]\n\n # Down Sample\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode=\"same\")\n self.conv0 = UnetConv2d(self.in_channel, filters[0], self.use_bn)\n self.conv1 = UnetConv2d(filters[0], filters[1], self.use_bn)\n self.conv2 = UnetConv2d(filters[1], filters[2], self.use_bn)\n self.conv3 = UnetConv2d(filters[2], filters[3], self.use_bn)\n self.conv4 = UnetConv2d(filters[3], filters[4], self.use_bn)\n\n # Up Sample\n self.up_concat1 = UnetUp(filters[1], filters[0], self.use_deconv, 2)\n self.up_concat2 = UnetUp(filters[2], filters[1], self.use_deconv, 2)\n self.up_concat3 = UnetUp(filters[3], filters[2], self.use_deconv, 2)\n self.up_concat4 = UnetUp(filters[4], filters[3], self.use_deconv, 2)\n\n # Finale Convolution\n self.final = nn.Conv2d(filters[0], n_class, 1)\n\n def construct(self, inputs):\n x0 = self.conv0(inputs) # channel = filters[0]\n x1 = self.conv1(self.maxpool(x0)) # channel = filters[1]\n x2 = self.conv2(self.maxpool(x1)) # channel = filters[2]\n x3 = self.conv3(self.maxpool(x2)) # channel = filters[3]\n x4 = self.conv4(self.maxpool(x3)) # channel = filters[4]\n\n up4 = self.up_concat4(x4, x3)\n up3 = self.up_concat3(up4, x2)\n up2 = self.up_concat2(up3, x1)\n up1 = self.up_concat1(up2, x0)\n\n final = self.final(up1)\n\n return final\n"},"avg_line_length":{"kind":"number","value":42.3783783784,"string":"42.378378"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.6159119898,"string":"0.615912"}}},{"rowIdx":47272,"cells":{"hexsha":{"kind":"string","value":"ee3d6d3e804f9ef1a9f8dd5e3ba587f570138ac4"},"size":{"kind":"number","value":90,"string":"90"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"examples/helloWorld.py"},"max_stars_repo_name":{"kind":"string","value":"Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi"},"max_stars_repo_head_hexsha":{"kind":"string","value":"7fcae0e43de51843565c2403fa66da26cb79a04b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-02-29T09:22:05.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-05-16T23:10:38.000Z"},"max_issues_repo_path":{"kind":"string","value":"examples/helloWorld.py"},"max_issues_repo_name":{"kind":"string","value":"Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi"},"max_issues_repo_head_hexsha":{"kind":"string","value":"7fcae0e43de51843565c2403fa66da26cb79a04b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-01-20T20:58:28.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2017-02-06T08:28:30.000Z"},"max_forks_repo_path":{"kind":"string","value":"examples/helloWorld.py"},"max_forks_repo_name":{"kind":"string","value":"Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi"},"max_forks_repo_head_hexsha":{"kind":"string","value":"7fcae0e43de51843565c2403fa66da26cb79a04b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-01-20T20:02:57.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-03-10T20:21:59.000Z"},"content":{"kind":"string","value":"from mcpi import minecraft\n\nmc = minecraft.Minecraft.create()\nmc.postToChat(\"Hello World\")"},"avg_line_length":{"kind":"number","value":22.5,"string":"22.5"},"max_line_length":{"kind":"number","value":33,"string":"33"},"alphanum_fraction":{"kind":"number","value":0.7888888889,"string":"0.788889"}}},{"rowIdx":47273,"cells":{"hexsha":{"kind":"string","value":"c99b1c8bbe063bb3a62611598e7d312d2aee21ba"},"size":{"kind":"number","value":298,"string":"298"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"spht/urls.py"},"max_stars_repo_name":{"kind":"string","value":"consbio/spht"},"max_stars_repo_head_hexsha":{"kind":"string","value":"96ec6a0931851b33eace08720d4d18ab34775a52"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-08-04T21:13:41.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-08-04T21:13:41.000Z"},"max_issues_repo_path":{"kind":"string","value":"spht/urls.py"},"max_issues_repo_name":{"kind":"string","value":"consbio/spht"},"max_issues_repo_head_hexsha":{"kind":"string","value":"96ec6a0931851b33eace08720d4d18ab34775a52"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_issues_count":{"kind":"number","value":23,"string":"23"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-04-12T20:43:15.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-10T12:10:53.000Z"},"max_forks_repo_path":{"kind":"string","value":"spht/urls.py"},"max_forks_repo_name":{"kind":"string","value":"consbio/spht"},"max_forks_repo_head_hexsha":{"kind":"string","value":"96ec6a0931851b33eace08720d4d18ab34775a52"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls import url\nfrom django.views.generic import TemplateView\n\nfrom spht.views import IntersectView\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='spht/tool.html')),\n url(r'^intersect/tiles/(?P\\d+)/(?P\\d+)/(?P\\d+).png$', IntersectView.as_view())\n]\n"},"avg_line_length":{"kind":"number","value":29.8,"string":"29.8"},"max_line_length":{"kind":"number","value":91,"string":"91"},"alphanum_fraction":{"kind":"number","value":0.6979865772,"string":"0.697987"}}},{"rowIdx":47274,"cells":{"hexsha":{"kind":"string","value":"094ea321a49ca2dc60c8a40c13f143c9e2cd5be6"},"size":{"kind":"number","value":5306,"string":"5,306"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"kts/ui/static.py"},"max_stars_repo_name":{"kind":"string","value":"konodyuk/kts"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":18,"string":"18"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-02-14T13:10:07.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-26T07:10:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"kts/ui/static.py"},"max_issues_repo_name":{"kind":"string","value":"konodyuk/kts"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-02-17T14:06:42.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-09-15T18:05:54.000Z"},"max_forks_repo_path":{"kind":"string","value":"kts/ui/static.py"},"max_forks_repo_name":{"kind":"string","value":"konodyuk/kts"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-15T13:12:42.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-04-15T14:05:54.000Z"},"content":{"kind":"string","value":"CSS_STYLE = \"\"\"\n.kts {{\n line-height: 1.6;\n}}\n.kts * {{\n box-sizing: content-box;\n}}\n.kts-wrapper {{\n display: inline-flex;\n flex-direction: column;\n background-color: {first};\n padding: 10px;\n border-radius: 20px;\n}}\n.kts-wrapper-border {{\n border: 0px solid {second};\n}}\n.kts-pool {{\n display: flex;\n flex-wrap: wrap;\n background-color: {second};\n padding: 5px;\n border-radius: 20px;\n margin: 5px;\n}}\n.kts-field {{\n text-align: left;\n border-radius: 15px;\n padding: 5px 15px;\n margin: 5px;\n display: inline-block;\n}}\n.kts-field-bg {{\n background-color: {second};\n}}\n.kts-field-bold {{\n font-weight: bold;\n}}\n.kts-field-third {{\n color: {third};\n}}\n.kts-field-accent {{\n color: {accent};\n}}\n.kts-field-bg:hover {{\n background-color: {fourth};\n}}\n.kts-annotation {{\n text-align: left;\n margin-left: 20px;\n margin-bottom: -5px;\n display: inline-block;\n color: {third};\n}}\n.kts-title {{\n text-align: center;\n display: inline-block;\n font-weight: bold;\n color: {third};\n}}\n.kts-code {{\n background-color: {second};\n text-align: left;\n border-radius: 15px;\n padding: 0.5em 15px;\n margin: 5px;\n color: white;\n display: inline-block;\n}}\n.kts-code:hover {{\n background-color: {fourth};\n}}\n.kts-code > pre {{\n background-color: {second};\n overflow: auto;\n white-space: pre-wrap;\n}}\n.kts-code:hover > pre {{\n background-color: {fourth};\n}}\n.kts-output {{\n background-color: {second};\n text-align: left;\n border-radius: 15px;\n padding: 5px 15px;\n margin: 5px;\n font-weight: bold;\n font-family: monospace;\n color: {accent};\n overflow: auto;\n max-height: 4.8em;\n display: flex;\n flex-direction: column-reverse;\n}}\n\n.kts-df {{\n background-color: {second};\n text-align: left;\n border-radius: 15px;\n padding: 5px 15px;\n margin: 5px;\n display: inline-block;\n color: {accent};\n}}\n\n.kts-title-with-cross {{\n display: grid;\n grid-template-columns: 1em auto 1em;\n margin-left: 5px;\n margin-right: 5px;\n}}\n.kts-cross-circle {{\n background-color: {second};\n width: 1em;\n height: 1em;\n position: relative;\n border-radius: 50%;\n cursor: pointer;\n z-index: 2;\n margin-top: 2px;\n max-width: none;\n}}\n.kts-cross-before,\n.kts-cross-after {{\n background-color: {third};\n content: '';\n position: absolute;\n width: 0.75em;\n height: 2px;\n border-radius: 0;\n top: calc((1em - 2px) / 2);\n z-index: 0;\n}}\n.kts-cross-before {{\n -webkit-transform: rotate(-45deg);\n -moz-transform: rotate(-45deg);\n transform: rotate(-45deg);\n left: calc(1em / 8);\n}}\n.kts-cross-after {{\n -webkit-transform: rotate(-135deg);\n -moz-transform: rotate(-135deg);\n transform: rotate(-135deg);\n right: calc(1em / 8);\n}}\n\n#kts-hidden {{\n display: none\n}}\n.kts-thumbnail {{\n margin: 0;\n cursor: pointer;\n}}\n.kts-thumbnail-first {{\n background-color: {first};\n}}\n.kts-thumbnail-second {{\n background-color: {second};\n}}\n#kts-collapsible {{\n -webkit-transition: max-height {anim_height}, padding {anim_padding}; \n -moz-transition: max-height {anim_height}, padding {anim_padding}; \n -ms-transition: max-height {anim_height}, padding {anim_padding}; \n -o-transition: max-height {anim_height}, padding {anim_padding}; \n transition: max-height {anim_height}, padding {anim_padding}; \n \n padding: 0;\n margin: 2px;\n align-self: flex-start;\n max-height: 100px;\n overflow: hidden;\n}}\n.kts-check {{\n display: none;\n}}\n.kts-check:checked + #kts-collapsible {{\n padding: 10px;\n max-height: {max_height_expanded};\n}}\n.kts-check:checked + #kts-collapsible > #kts-hidden {{\n display: inline-flex;\n}}\n.kts-check:checked + #kts-collapsible > .kts-thumbnail {{\n display: none;\n}}\n.kts-check:checked + .kts-wrapper-border {{\n border: 2px solid {second};\n}}\n.kts-check-outer {{\n display: none;\n}}\n.kts-check-outer:checked + #kts-collapsible {{\n padding: 10px;\n max-height: {max_height_expanded};\n}}\n.kts-check-outer:checked + #kts-collapsible > #kts-hidden {{\n display: inline-flex;\n}}\n.kts-check-outer:checked + #kts-collapsible > .kts-thumbnail {{\n display: none;\n}}\n.kts-check-outer:checked + .kts-wrapper-border {{\n border: 2px solid {second};\n}}\n.kts-inner-wrapper {{\n flex-direction: column;\n}}\n\n.kts-progressbar-wrapper {{\n display: flex;\n flex-direction: row;\n align-items: center;\n height: 1.6em;\n}}\n\n.kts-progressbar-outer {{\n box-sizing: padding-box;\n display: flex;\n flex-direction: row;\n background-color: {second};\n align-items: center;\n padding: 3px;\n border-radius: 15px;\n width: 100%;\n}}\n\n.kts-progressbar-inner {{\n background-color: {third};\n height: 0.7em;\n border-radius: 15px;\n}}\n\n.kts-hbar-container {{\n display: block;\n position: relative;\n height: min(calc(100% - 3px), 1.5em);\n margin: 2px;\n}}\n.kts-hbar {{\n position: absolute;\n display: inline-block;\n background-color: {third};\n text-align: left;\n height: 100%;\n border-radius: 15px;\n}}\n.kts-hbar-line {{\n position: absolute;\n display: inline-block;\n background-color: {accent};\n text-align: left;\n height: 1px;\n top: 50%;\n}}\n\n.kts-inner-column {{\n display: flex;\n flex-direction: column;\n padding: auto;\n}}\n.kts-row {{\n display: flex;\n flex-direction: row;\n}}\n\n.kts-hoverable-line, .kts-hoverable-line * {{\n pointer-events: all;\n transition: all 0.1s ease-out;\n}}\n\n.kts-hoverable-line:hover * {{\n stroke: {second_accent};\n stroke-width: 10;\n}}\n\"\"\"\n"},"avg_line_length":{"kind":"number","value":19.2246376812,"string":"19.224638"},"max_line_length":{"kind":"number","value":72,"string":"72"},"alphanum_fraction":{"kind":"number","value":0.6507727101,"string":"0.650773"}}},{"rowIdx":47275,"cells":{"hexsha":{"kind":"string","value":"0959f644ddbc06b75da28e53ec3916db243825e1"},"size":{"kind":"number","value":2087,"string":"2,087"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"ecrire_json.py"},"max_stars_repo_name":{"kind":"string","value":"Maxim01/Programmes"},"max_stars_repo_head_hexsha":{"kind":"string","value":"dbe5b83b3c65776ccc00049793fa85313fb76065"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"ecrire_json.py"},"max_issues_repo_name":{"kind":"string","value":"Maxim01/Programmes"},"max_issues_repo_head_hexsha":{"kind":"string","value":"dbe5b83b3c65776ccc00049793fa85313fb76065"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ecrire_json.py"},"max_forks_repo_name":{"kind":"string","value":"Maxim01/Programmes"},"max_forks_repo_head_hexsha":{"kind":"string","value":"dbe5b83b3c65776ccc00049793fa85313fb76065"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import json\r\nimport time\r\nimport subprocess\r\nimport sys\r\n\r\nMAC_ARG = \"VIDE\"\r\nACTION_ARG = \"VIDE\" \r\nADD_ARG\t= \"VIDE\"\r\nMDP_ARG\t= \"VIDE\"\r\nDEST_ARG = \"VIDE\"\r\nMDP_SERR_ARG = \"VIDE\"\r\n\r\ndef mdp_serrure():\r\n\r\n\t\tglobal MAC_ARG\r\n\t\tglobal MDP_SERR_ARG\r\n\t\t\t\t\t\t\t\t\t\t\r\n\t\twith open('/home/Devismes_Bridge/Equipements/' + MAC_ARG + '/Pass.json') as f:\r\n\t\t\tdataa = json.load(f)\t\r\n\t\t\r\n\t\tdataa['Password']['Pass'] = MDP_SERR_ARG\r\n\t\t\r\n\t\twith open('/home/Devismes_Bridge/Equipements/' + MAC_ARG + '/Pass.json', 'w') as f:\r\n\t\t\tjson.dump(dataa, f, indent=2)\r\n\r\n\r\ndef mail_dest():\r\n\r\n\t\tglobal DEST_ARG\r\n\t\t\t\t\t\t\t\t\t\t\r\n\t\twith open('/home/Devismes_Bridge/JSON_List/mail.json') as f:\r\n\t\t\tdataa = json.load(f)\t\r\n\t\t\r\n\t\tprint \"OK: \", DEST_ARG\r\n\t\t\r\n\t\tdataa['mail']['Dest'] = DEST_ARG\r\n\t\t\r\n\t\twith open('/home/Devismes_Bridge/JSON_List/mail.json', 'w') as f:\r\n\t\t\tjson.dump(dataa, f, indent=2)\r\n\r\n\r\ndef mail_origine():\t #base SQL !!!!\r\n\t\r\n\t\tglobal ADD_ARG\r\n\t\tglobal MDP_ARG\r\n\t\t\t\t\t\t\t\t\r\n\t\t\r\n\t\twith open('/home/Devismes_Bridge/JSON_List/mail.json') as f:\r\n\t\t\tdataa = json.load(f)\t\r\n\t\t\r\n\t\tdataa['mail']['adresse'] = ADD_ARG\t\r\n\t\t\r\n\t\twith open('/home/Devismes_Bridge/JSON_List/mail.json', 'w') as f:\r\n\t\t\tjson.dump(dataa, f, indent=2)\t\r\n\r\ndef Arguments():\r\n\r\n\tglobal MAC_ARG\r\n\t\r\n\tglobal ADD_ARG\r\n\tglobal MDP_ARG\r\n\t\r\n\tglobal DEST_ARG\r\n\tglobal MDP_SERR_ARG\r\n\r\n\tprint \"Arguments: \", sys.argv\r\n\tprint \"NB d'arguments: \", len(sys.argv)\r\n\t\r\n\tif (len(sys.argv) == 4) and (sys.argv[1] == '1'): #on modifie le mot de passe de la serrure selectionnee\r\n\t\r\n\t\t\tprint \"modifie mot de passe serrure\"\r\n\t\t\tMAC_ARG = sys.argv[2]\r\n\t\t\tMDP_SERR_ARG = sys.argv[3]\r\n\t\t\tmdp_serrure()\r\n\t\t\t\t\r\n\tif (len(sys.argv) == 3) and (sys.argv[1] == '2'): #on modifie le mail de destination \r\n\t\r\n\t\t\tprint \"modifie mail destination\"\r\n\t\t\tDEST_ARG = sys.argv[2]\r\n\t\t\tmail_dest()\r\n\t\t\t\r\n\tif (len(sys.argv) == 3) and (sys.argv[1] == '3'): #on modifie le mail d'origine et mot de passe\r\n\t\r\n\t\t\tprint \"modifie mail d'origine et mot de passe\"\r\n\t\t\tADD_ARG = sys.argv[2]\t\r\n\t\t\tmail_origine()\r\n\t\t\r\ndef main():\r\n\tprint \"MAIN\"\r\n\tArguments()\r\n\t\r\nif __name__ == \"__main__\":\r\n main()\r\n "},"avg_line_length":{"kind":"number","value":22.2021276596,"string":"22.202128"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.6157163392,"string":"0.615716"}}},{"rowIdx":47276,"cells":{"hexsha":{"kind":"string","value":"1195d97ce7b7fc191ee2c37ce674f6e799bdf4b2"},"size":{"kind":"number","value":107,"string":"107"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"main.ru.py"},"max_stars_repo_name":{"kind":"string","value":"vv31415926/python_lessons_01_4"},"max_stars_repo_head_hexsha":{"kind":"string","value":"f5e67d008a5401335c7b5589d9dacc125856560d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"main.ru.py"},"max_issues_repo_name":{"kind":"string","value":"vv31415926/python_lessons_01_4"},"max_issues_repo_head_hexsha":{"kind":"string","value":"f5e67d008a5401335c7b5589d9dacc125856560d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"main.ru.py"},"max_forks_repo_name":{"kind":"string","value":"vv31415926/python_lessons_01_4"},"max_forks_repo_head_hexsha":{"kind":"string","value":"f5e67d008a5401335c7b5589d9dacc125856560d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"s = input('Введите ФИО через пробел:')\n\nlst = s.split()\nprint( 'Привет, ' )\nfor si in lst:\n print( si)"},"avg_line_length":{"kind":"number","value":17.8333333333,"string":"17.833333"},"max_line_length":{"kind":"number","value":38,"string":"38"},"alphanum_fraction":{"kind":"number","value":0.5981308411,"string":"0.598131"}}},{"rowIdx":47277,"cells":{"hexsha":{"kind":"string","value":"e115b2e949a11782a5bb56a5b2bb6a3795f7d276"},"size":{"kind":"number","value":330,"string":"330"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"menucard/admin.py"},"max_stars_repo_name":{"kind":"string","value":"baniasbaabe/happy-qr"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bf44ac19306ea6405cc7c9a100e6f83afca125b4"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-01-23T21:42:10.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-01-23T21:42:10.000Z"},"max_issues_repo_path":{"kind":"string","value":"menucard/admin.py"},"max_issues_repo_name":{"kind":"string","value":"baniasbaabe/happy-qr"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bf44ac19306ea6405cc7c9a100e6f83afca125b4"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"menucard/admin.py"},"max_forks_repo_name":{"kind":"string","value":"baniasbaabe/happy-qr"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bf44ac19306ea6405cc7c9a100e6f83afca125b4"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.contrib import admin\n\n# Register your models here.\nfrom menucard.models import *\n\nadmin.site.register(Vorspeise)\nadmin.site.register(Hauptspeise)\nadmin.site.register(Nachspeise)\nadmin.site.register(Snacks)\nadmin.site.register(AlkoholfreieDrinks)\nadmin.site.register(AlkoholhaltigeDrinks)\nadmin.site.register(Besucher)\n"},"avg_line_length":{"kind":"number","value":25.3846153846,"string":"25.384615"},"max_line_length":{"kind":"number","value":41,"string":"41"},"alphanum_fraction":{"kind":"number","value":0.8333333333,"string":"0.833333"}}},{"rowIdx":47278,"cells":{"hexsha":{"kind":"string","value":"0183624fde61b9b8bb023787016c964c88412b6b"},"size":{"kind":"number","value":170,"string":"170"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"flask/app/views.py"},"max_stars_repo_name":{"kind":"string","value":"hou2zi0/flask-app-docker"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0e51b1f00201fc6eb46a62d0d8f2701bc02d4031"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"flask/app/views.py"},"max_issues_repo_name":{"kind":"string","value":"hou2zi0/flask-app-docker"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0e51b1f00201fc6eb46a62d0d8f2701bc02d4031"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"flask/app/views.py"},"max_forks_repo_name":{"kind":"string","value":"hou2zi0/flask-app-docker"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0e51b1f00201fc6eb46a62d0d8f2701bc02d4031"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from app import app\n\n@app.route('/')\ndef index():\n return \"Hello from Flask! 🐵\"\n \n@app.route('/affe')\ndef affe():\n return \"Hello from Flask! Affe sagt Hallo! 🐵\"\n"},"avg_line_length":{"kind":"number","value":17,"string":"17"},"max_line_length":{"kind":"number","value":49,"string":"49"},"alphanum_fraction":{"kind":"number","value":0.6117647059,"string":"0.611765"}}},{"rowIdx":47279,"cells":{"hexsha":{"kind":"string","value":"0968faab53e0aa82c8b7c026041088ebbd25206c"},"size":{"kind":"number","value":2274,"string":"2,274"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/my_sql_conn.py"},"max_stars_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_stars_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-07-29T16:43:46.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-07-29T16:43:46.000Z"},"max_issues_repo_path":{"kind":"string","value":"python/my_sql_conn.py"},"max_issues_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_issues_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/my_sql_conn.py"},"max_forks_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_forks_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: UTF-8 -*-\n\nimport MySQLdb\n\n\nclass DbFunctions(object):\n \"\"\"\n 数据库连接\n \"\"\"\n\n def __init__(self, server, username, password, dbname):\n self.server = server\n self.username = username\n self.password = password\n self.dbname = dbname\n self.db = None\n self.cur = None\n\n def connection_open(self):\n self.db = MySQLdb.connect(host=self.server, user=self.username, passwd=self.password, db=self.dbname)\n self.cur = self.db.cursor()\n\n def connection_close(self):\n self.db.close()\n\n def mysql_qry(self, sql, bool): # 1 for select and 0 for insert update delete\n self.connection_open()\n try:\n self.cur.execute(sql)\n if bool:\n return self.cur.fetchall()\n else:\n self.db.commit()\n return True\n except MySQLdb.Error, e:\n try:\n print \"Mysql Error:- \" + str(e)\n except IndexError:\n print \"Mysql Error:- \" + str(e)\n self.connection_close()\n\n def mysql_insert(self, table, fields, values):\n sql = \"INSERT INTO \" + table + \" (\" + fields + \") VALUES (\" + values + \")\";\n return self.mysql_qry(sql, 0)\n\n def mysql_update(self, table, values, conditions):\n sql = \"UPDATE \" + table + \" SET \" + values + \" WHERE \" + conditions\n return self.mysql_qry(sql, 0)\n\n def mysql_delete(self, table, conditions):\n sql = \"DELETE FROM \" + table + \" WHERE \" + conditions\n return self.mysql_qry(sql, 0)\n\n def mysql_select(self, table):\n sql = \"SELECT * FROM \" + table\n return self.mysql_qry(sql, 1)\n\n def insert_by_many(self, tablname, rows):\n try:\n # sql = 'INSERT INTO table values(%s,%s,%s)'\n # 批量插入\n sql = 'INSERT INTO ' + tablname + ' values(%s,%s,%s)'\n self.connection_open()\n self.cur.executemany(sql, rows)\n self.db.commit()\n except Exception as e:\n print e\n self.db.rollback()\n\n self.connection_close()\n print '[insert_by_many executemany] total:', len(rows)\n\n\n # db = DbFunctions(\"localhost, \"root\", \"Root@123\", \"db\")\n # result = db.mysql_qry(\"\",1)\n"},"avg_line_length":{"kind":"number","value":29.9210526316,"string":"29.921053"},"max_line_length":{"kind":"number","value":109,"string":"109"},"alphanum_fraction":{"kind":"number","value":0.54705365,"string":"0.547054"}}},{"rowIdx":47280,"cells":{"hexsha":{"kind":"string","value":"61e5a5b59a9e4dbc5ab5ac9e9ca08a0245f2a295"},"size":{"kind":"number","value":3267,"string":"3,267"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"BigData_exp/exp3/exp3/my_html.py"},"max_stars_repo_name":{"kind":"string","value":"DolorHunter/hfut-exp-archived"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c67e26c1f4fba550c8974eaba10dfa302b928868"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_stars_count":{"kind":"number","value":12,"string":"12"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-12-07T05:49:05.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-25T09:09:36.000Z"},"max_issues_repo_path":{"kind":"string","value":"BigData_exp/exp3/exp3/my_html.py"},"max_issues_repo_name":{"kind":"string","value":"DolorHunter/hfut-exp"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c67e26c1f4fba550c8974eaba10dfa302b928868"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"BigData_exp/exp3/exp3/my_html.py"},"max_forks_repo_name":{"kind":"string","value":"DolorHunter/hfut-exp"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c67e26c1f4fba550c8974eaba10dfa302b928868"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-01-08T08:53:53.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-01-08T08:53:53.000Z"},"content":{"kind":"string","value":"import requests\n# filename cannot be 'html.py' which will lead to conflict to bs!!\nfrom bs4 import BeautifulSoup\nimport csv\n\n\n# Get raw html info\ndef get_html_info(url):\n try:\n print('url:'+url)\n re = requests.get(url, timeout=30)\n re.raise_for_status()\n re.encoding = 'utf-8'\n print(\"Get raw info.\")\n return re.text\n except:\n print(\"[Error] Failed to get HTML info!\")\n\n\n# Save raw html info to the file\ndef save_html_info(html_info):\n file = open('raw_html_info.txt', 'w')\n for info in html_info:\n file.write(info)\n file.close()\n print(\"Saved raw info to the file.\")\n\n\n# Re raw html info from the list\ndef re_html_info(raw_html_info):\n # Using bs4 to extract raw info\n soup = BeautifulSoup(raw_html_info, 'html.parser')\n data = soup.find_all('tr')\n # print(data) # test\n ready_info = []\n i = 0\n for info in data:\n if i < 2:\n # escape the title\n i += 1\n continue\n else:\n info = str(info)\n re_info = {}\n # Save serial numbers to list\n serial_num_start = info.find(\n '; height: 18.75pt\">')\n serial_num_end = info.find('')\n serial_num = info[serial_num_start+19: serial_num_end]\n re_info['序号'] = serial_num # \" \".join(serial_num.split())\n\n # Save student names to list\n student_name_start = info.find(\n 'left: medium none\">', serial_num_end, len(info))\n student_name_end = info.find('', student_name_start, len(info))\n student_name = info[student_name_start+19: student_name_end]\n re_info['姓名'] = student_name # \" \".join(student_name.split())\n\n # Save school names to list\n school_name_start = info.find(\n 'left: medium none\">', student_name_end, len(info))\n school_name_end = info.find('', school_name_start, len(info))\n school_name = info[school_name_start+19: school_name_end]\n re_info['录取学校'] = school_name # \" \".join(school_name.split())\n\n # Save school types to list\n school_types_start = info.find(\n 'left: medium none\">', school_name_end, len(info))\n school_types_end = info.find('', school_types_start, len(info))\n school_types = info[school_types_start+19: school_types_end]\n re_info['大学类型'] = school_types # \" \".join(school_types.split())\n\n ready_info.append(re_info)\n print(\"Info is ready.\")\n return ready_info\n\n\ndef save_to_csv(ready_info):\n with open('re_html_info.csv', 'wt', encoding='utf-16') as csvfile:\n csvout = csv.DictWriter(csvfile, ['序号', '姓名', '录取学校', '大学类型'])\n csvout.writeheader()\n csvout.writerows(ready_info)\n print(\"Save to csv.\")\n\n\ndef main():\n url = 'http://www.sszx.cn/jxjy/xkjs/201802/t20180205_8967.htm'\n raw_html_info = get_html_info(url) # Get raw html info\n save_html_info(raw_html_info) # Save raw html info to file\n ready_info = re_html_info(raw_html_info) # re raw html info from file\n print(ready_info)\n save_to_csv(ready_info) # Save ready info to csv\n\n\nif __name__ == '__main__':\n main()\n"},"avg_line_length":{"kind":"number","value":33.6804123711,"string":"33.680412"},"max_line_length":{"kind":"number","value":80,"string":"80"},"alphanum_fraction":{"kind":"number","value":0.6036118763,"string":"0.603612"}}},{"rowIdx":47281,"cells":{"hexsha":{"kind":"string","value":"28b0d798dce294187b09ea0093ced120bf38031b"},"size":{"kind":"number","value":2144,"string":"2,144"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Algorithms/Implementation/Bomberman.py"},"max_stars_repo_name":{"kind":"string","value":"baby5/HackerRank"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1e68a85f40499adb9b52a4da16936f85ac231233"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Algorithms/Implementation/Bomberman.py"},"max_issues_repo_name":{"kind":"string","value":"baby5/HackerRank"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1e68a85f40499adb9b52a4da16936f85ac231233"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Algorithms/Implementation/Bomberman.py"},"max_forks_repo_name":{"kind":"string","value":"baby5/HackerRank"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1e68a85f40499adb9b52a4da16936f85ac231233"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#coding:utf-8\n\nR, C, N = map(int, raw_input().split())\n\ngrid_1 = [[] for i in xrange(R)]\n\nbomb_list = []\nfor i in xrange(R):\n row_list = list(raw_input())\n grid_1[i] = row_list\n for j in xrange(len(row_list)):\n if row_list[j] == 'O':\n bomb_list.append((i, j))\n\ndef is_vaild(x, y):\n return x >= 0 and x < R and y >= 0 and y < C# and (x, y) not in bomb_list 愚蠢至极!\n\ndef have_bomb(x, y, grid):\n if x+1 < R and grid[x+1][y] == 'O':\n return 1\n elif x-1 >= 0 and grid[x-1][y] == 'O':\n return 1\n elif y+1 < C and grid[x][y+1] == 'O':\n return 1\n elif y-1 >= 0 and grid[x][y-1] == 'O':\n return 1\n else:\n return 0\n\nif N % 2 == 0:\n for _ in xrange(R):\n print ''.join(['O'] * C)\nelse:\n if N == 1:\n for row in grid_1:\n print ''.join(row) \n else:\n while 1:\n grid_3 = [['O']*C for _ in xrange(R)] \n for bomb in bomb_list:\n print bomb\n #middle\n x, y = bomb\n grid_3[x][y] = '.'\n \n #up\n i, j = x-1, y\n if is_vaild(i, j):\n grid_3[i][j] = '.'\n \n #down\n i, j = x+1, y\n if is_vaild(i, j):\n grid_3[i][j] = '.'\n \n #left\n i, j = x, y-1\n if is_vaild(i, j):\n grid_3[i][j] = '.'\n \n #right\n i, j = x, y+1\n if is_vaild(i, j):\n grid_3[i][j] = '.'\n \n if N/2 % 2 != 0:\n for row in grid_3:\n print ''.join(row)\n break\n\n for i in xrange(R):\n row = []\n for j in xrange(C):\n if grid_3[i][j] == 'O':\n row.append('.')\n elif have_bomb(i, j, grid_3): \n row.append('.') \n else:\n row.append('O')\n print ''.join(row)\n\n break\n"},"avg_line_length":{"kind":"number","value":25.8313253012,"string":"25.831325"},"max_line_length":{"kind":"number","value":83,"string":"83"},"alphanum_fraction":{"kind":"number","value":0.3502798507,"string":"0.35028"}}},{"rowIdx":47282,"cells":{"hexsha":{"kind":"string","value":"3a7d6a374822bd399b7fc85dc349d7dbd4212ce5"},"size":{"kind":"number","value":1228,"string":"1,228"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pytestDemo/common/read_data.py"},"max_stars_repo_name":{"kind":"string","value":"lthinktime/testdemo"},"max_stars_repo_head_hexsha":{"kind":"string","value":"509656d62535ed06e222c08671db11e31d9b3162"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pytestDemo/common/read_data.py"},"max_issues_repo_name":{"kind":"string","value":"lthinktime/testdemo"},"max_issues_repo_head_hexsha":{"kind":"string","value":"509656d62535ed06e222c08671db11e31d9b3162"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pytestDemo/common/read_data.py"},"max_forks_repo_name":{"kind":"string","value":"lthinktime/testdemo"},"max_forks_repo_head_hexsha":{"kind":"string","value":"509656d62535ed06e222c08671db11e31d9b3162"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import yaml\nimport json\nfrom configparser import ConfigParser\nfrom common.logger import logger\n\n\nclass MyConfigParser(ConfigParser):\n # 重写configparser 中的 optionxform 函数,解决 .ini 文件中的 键option 自动转为小写的问题\n def __init__(self, defaults=None):\n ConfigParser.__init__(self, defaults=defaults)\n\n def optionxform(self, optionstr):\n return optionstr\n\nclass ReadFileData():\n\n def __init__(self):\n pass\n\n def load_yaml(self, file_path):\n logger.info(\"加载 {} 文件......\".format(file_path))\n with open(file_path, encoding='utf-8') as f:\n data = yaml.safe_load(f)\n logger.info(\"读到数据 ==>> {} \".format(data))\n return data\n\n def load_json(self, file_path):\n logger.info(\"加载 {} 文件......\".format(file_path))\n with open(file_path, encoding='utf-8') as f:\n data = json.load(f)\n logger.info(\"读到数据 ==>> {} \".format(data))\n return data\n\n def load_ini(self, file_path):\n logger.info(\"加载 {} 文件......\".format(file_path))\n config = MyConfigParser()\n config.read(file_path, encoding=\"UTF-8\")\n data = dict(config._sections)\n # print(\"读到数据 ==>> {} \".format(data))\n return data\n\ndata = ReadFileData()\n"},"avg_line_length":{"kind":"number","value":28.5581395349,"string":"28.55814"},"max_line_length":{"kind":"number","value":69,"string":"69"},"alphanum_fraction":{"kind":"number","value":0.6148208469,"string":"0.614821"}}},{"rowIdx":47283,"cells":{"hexsha":{"kind":"string","value":"a33ece80edcfc6ff8944ba4518931f91f0b1ccbe"},"size":{"kind":"number","value":2094,"string":"2,094"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-07-03T06:52:20.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-07-03T06:52:20.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-01T06:17:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-10-04T08:39:45.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nRandomCrop operator.\n\"\"\"\n\nfrom mindspore.dataset.vision import py_transforms\nfrom mindspore.dataset.vision import py_transforms_util\nfrom mindspore.dataset.vision import utils\n\n\nclass RandomCrop(py_transforms.RandomCrop):\n \"\"\"\n RandomCrop inherits from py_transforms.RandomCrop but derives/uses the\n original image size as the output size.\n\n Please refer to py_transforms.RandomCrop for argument specifications.\n \"\"\"\n\n def __init__(self, padding=4, pad_if_needed=False,\n fill_value=0, padding_mode=utils.Border.CONSTANT):\n # Note the `1` for the size argument is only set for passing the check.\n super(RandomCrop, self).__init__(1, padding=padding, pad_if_needed=pad_if_needed,\n fill_value=fill_value, padding_mode=padding_mode)\n\n def __call__(self, img):\n \"\"\"\n Call method.\n\n Args:\n img (PIL image): Image to be padded and then randomly cropped back\n to the same size.\n\n Returns:\n img (PIL image), Randomly cropped image.\n \"\"\"\n if not py_transforms_util.is_pil(img):\n raise TypeError(\n py_transforms_util.augment_error_message.format(type(img)))\n\n return py_transforms_util.random_crop(\n img, img.size, self.padding, self.pad_if_needed,\n self.fill_value, self.padding_mode,\n )\n"},"avg_line_length":{"kind":"number","value":36.7368421053,"string":"36.736842"},"max_line_length":{"kind":"number","value":90,"string":"90"},"alphanum_fraction":{"kind":"number","value":0.658548233,"string":"0.658548"}}},{"rowIdx":47284,"cells":{"hexsha":{"kind":"string","value":"a37c14fdb39ad32cb41eecf0cb0e42257fe19ade"},"size":{"kind":"number","value":617,"string":"617"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"quant/common/log.py"},"max_stars_repo_name":{"kind":"string","value":"doubleDragon/QuantBot"},"max_stars_repo_head_hexsha":{"kind":"string","value":"53a1d6c62ecece47bf777da0c0754430b706b7fd"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-10-22T15:00:09.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-09-19T11:45:43.000Z"},"max_issues_repo_path":{"kind":"string","value":"quant/common/log.py"},"max_issues_repo_name":{"kind":"string","value":"doubleDragon/QuantBot"},"max_issues_repo_head_hexsha":{"kind":"string","value":"53a1d6c62ecece47bf777da0c0754430b706b7fd"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-01-19T16:19:40.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-01-19T16:19:40.000Z"},"max_forks_repo_path":{"kind":"string","value":"quant/common/log.py"},"max_forks_repo_name":{"kind":"string","value":"doubleDragon/QuantBot"},"max_forks_repo_head_hexsha":{"kind":"string","value":"53a1d6c62ecece47bf777da0c0754430b706b7fd"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":5,"string":"5"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2017-12-11T15:10:29.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-12-21T17:40:58.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\n\ndef get_logger(log_name, level=logging.DEBUG):\n \"\"\"\n 日志的疯转\n :param level: 日志级别\n :param log_name: 日志对象名\n :return: 日志对象名\n \"\"\"\n logger = logging.getLogger(log_name)\n\n logger.setLevel(level)\n\n rt_handler = RotatingFileHandler(log_name, maxBytes=100 * 1024 * 1024, backupCount=10)\n rt_handler.setLevel(level)\n formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')\n rt_handler.setFormatter(formatter)\n logger.addHandler(rt_handler)\n return logger\n"},"avg_line_length":{"kind":"number","value":25.7083333333,"string":"25.708333"},"max_line_length":{"kind":"number","value":90,"string":"90"},"alphanum_fraction":{"kind":"number","value":0.698541329,"string":"0.698541"}}},{"rowIdx":47285,"cells":{"hexsha":{"kind":"string","value":"6e9f8f97f56424abab696c62b47c86b133739767"},"size":{"kind":"number","value":784,"string":"784"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"06 APIs, Scraping I/kommetarezaehlen.py"},"max_stars_repo_name":{"kind":"string","value":"manuelapaganini/20_21_Workfile"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5ec3637d18cbd73256b56682d9b99547e21a24d9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-08-06T14:53:34.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-10-16T19:44:16.000Z"},"max_issues_repo_path":{"kind":"string","value":"06 APIs, Scraping I/kommetarezaehlen.py"},"max_issues_repo_name":{"kind":"string","value":"manuelapaganini/20_21_Workfile"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5ec3637d18cbd73256b56682d9b99547e21a24d9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-06-25T09:46:58.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-06-25T09:46:58.000Z"},"max_forks_repo_path":{"kind":"string","value":"06 APIs, Scraping I/kommetarezaehlen.py"},"max_forks_repo_name":{"kind":"string","value":"manuelapaganini/20_21_Workfile"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5ec3637d18cbd73256b56682d9b99547e21a24d9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-16T13:05:51.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-09-27T09:07:49.000Z"},"content":{"kind":"string","value":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport datetime\nimport sys\n\ndef kommentarezaehlen(url):\n\n r = requests.get(url)\n soup = BeautifulSoup(r.text,'xml')\n storybox = soup.find_all('div', {'class':'text'})\n\n lst = []\n for elem in storybox:\n try:\n t = elem.find('h2').text\n except:\n t = 'Kein Titel'\n try:\n k = elem.find('a', {'class':'standard comments'}).text.replace(\"\\n\", \"\")\n except:\n k = 'Keine Kommentare'\n mini_dict = {'Titel': t,\n 'Kommentar': k}\n lst.append(mini_dict)\n\n now = datetime.datetime.now()\n \n return pd.DataFrame(lst).to_csv(str(now)+'-watson.csv')\n\nif __name__== \"__main__\":\n kommentarezaehlen(sys.argv[1])\n"},"avg_line_length":{"kind":"number","value":23.7575757576,"string":"23.757576"},"max_line_length":{"kind":"number","value":84,"string":"84"},"alphanum_fraction":{"kind":"number","value":0.5676020408,"string":"0.567602"}}},{"rowIdx":47286,"cells":{"hexsha":{"kind":"string","value":"287b71be0094ff9c0a8a646a19e81197612eb1f3"},"size":{"kind":"number","value":3673,"string":"3,673"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"zplus_scraper/pipelines.py"},"max_stars_repo_name":{"kind":"string","value":"tstaec/ZplusScraper"},"max_stars_repo_head_hexsha":{"kind":"string","value":"38c4f9534b8ee3822a80b48a6827ef4f52793c0b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"zplus_scraper/pipelines.py"},"max_issues_repo_name":{"kind":"string","value":"tstaec/ZplusScraper"},"max_issues_repo_head_hexsha":{"kind":"string","value":"38c4f9534b8ee3822a80b48a6827ef4f52793c0b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-07-04T12:02:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-07-04T12:02:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"zplus_scraper/pipelines.py"},"max_forks_repo_name":{"kind":"string","value":"tstaec/ZplusScraper"},"max_forks_repo_head_hexsha":{"kind":"string","value":"38c4f9534b8ee3822a80b48a6827ef4f52793c0b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from datetime import datetime\n\nimport mysql\nfrom scrapy.exceptions import NotConfigured\n\nfrom database import create_database\n\n\nclass ZplusscraperPipeline:\n def process_item(self, item, spider):\n return item\n\n\nclass DatabasePipeline(object):\n\n def __init__(self, db, user, passwd, host):\n self.db = db\n self.user = user\n self.passwd = passwd\n self.host = host\n\n @classmethod\n def from_crawler(cls, crawler):\n db_settings = crawler.settings.getdict(\"DB_SETTINGS\")\n if not db_settings: # if we don't define db config in settings\n raise NotConfigured # then raise error\n db = db_settings['db']\n user = db_settings['user']\n passwd = db_settings['passwd']\n host = db_settings['host']\n return cls(db, user, passwd, host) # returning pipeline instance\n\n def open_spider(self, spider):\n print('open spider was called. Initializing database')\n self.context = mysql.connector.connect(\n user=self.user,\n passwd=self.passwd,\n host=self.host,\n charset='utf8mb4',\n use_unicode=True)\n create_database(self.context, self.db)\n\n def close_spider(self, spider):\n print('closing spider')\n self.context.close()\n\n def process_item(self, item, spider):\n existing_article = self.get_existing_article(item)\n if existing_article is None:\n article_id = self.save_article(item)\n else:\n article_id = existing_article[0]\n if item['datazplus'] is not None and item['article_html'] is not None:\n self.update_article(item)\n self.save_scrape_run(item, article_id)\n\n return item\n\n def get_existing_article(self, article):\n href = article['href']\n if href is None:\n return None\n\n cursor = self.context.cursor(buffered=True)\n sql_command = \"SELECT id, title FROM articles WHERE href = %s\"\n returned_rows = cursor.execute(sql_command, (href,))\n\n result = cursor.fetchone()\n cursor.close()\n return result\n\n def save_article(self, article):\n cursor = self.context.cursor(buffered=True)\n\n sql_command = \"\"\"INSERT INTO articles (created, last_modified, title, href, article_html) \n VALUES (%s, %s, %s, %s, %s)\"\"\"\n str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute(sql_command, (str_now, str_now, article['title'], article['href'], article['article_html']))\n\n self.context.commit()\n row_id = cursor.lastrowid\n cursor.close()\n return row_id\n\n def update_article(self, article):\n cursor = self.context.cursor(buffered=True)\n\n sql_command = \"\"\"UPDATE articles SET last_modified = %s, article_html = %s \"\"\"\n str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute(sql_command, (str_now, article['article_html']))\n\n self.context.commit()\n cursor.close()\n return None\n\n def save_scrape_run(self, article, article_id):\n cursor = self.context.cursor(buffered=True)\n\n sql_command = \"\"\"INSERT INTO scrape_run (created, datazplus, article_id) \n VALUES (%s, %s, %s)\"\"\"\n str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cursor.execute(sql_command, (str_now, article['datazplus'], article_id))\n\n self.context.commit()\n cursor.close()\n return None\n"},"avg_line_length":{"kind":"number","value":34.3271028037,"string":"34.327103"},"max_line_length":{"kind":"number","value":115,"string":"115"},"alphanum_fraction":{"kind":"number","value":0.5869861149,"string":"0.586986"}}},{"rowIdx":47287,"cells":{"hexsha":{"kind":"string","value":"95f916a0d2f23768a5613ddf564dc02ba2c599ac"},"size":{"kind":"number","value":478,"string":"478"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"ggit_platform/admin.py"},"max_stars_repo_name":{"kind":"string","value":"girlsgoit/GirlsGoIT"},"max_stars_repo_head_hexsha":{"kind":"string","value":"447cd15c44ebee4af9e942a079d681be8683239f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-02-27T21:20:54.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-02-27T21:20:54.000Z"},"max_issues_repo_path":{"kind":"string","value":"ggit_platform/admin.py"},"max_issues_repo_name":{"kind":"string","value":"girlsgoit/GirlsGoIT"},"max_issues_repo_head_hexsha":{"kind":"string","value":"447cd15c44ebee4af9e942a079d681be8683239f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"ggit_platform/admin.py"},"max_forks_repo_name":{"kind":"string","value":"girlsgoit/GirlsGoIT"},"max_forks_repo_head_hexsha":{"kind":"string","value":"447cd15c44ebee4af9e942a079d681be8683239f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom markdownx.admin import MarkdownxModelAdmin\n\nfrom .models import Event\nfrom .models import Member\nfrom .models import MemberRole\nfrom .models import Region\nfrom .models import Story\nfrom .models import Track\n\nadmin.site.register(Track, MarkdownxModelAdmin)\nadmin.site.register(Region)\nadmin.site.register(Member)\nadmin.site.register(MemberRole)\nadmin.site.register(Event, MarkdownxModelAdmin)\nadmin.site.register(Story, MarkdownxModelAdmin)\n"},"avg_line_length":{"kind":"number","value":28.1176470588,"string":"28.117647"},"max_line_length":{"kind":"number","value":47,"string":"47"},"alphanum_fraction":{"kind":"number","value":0.8368200837,"string":"0.83682"}}},{"rowIdx":47288,"cells":{"hexsha":{"kind":"string","value":"66ad35acb4d63dc9937ca5276e6b238cb6b79da1"},"size":{"kind":"number","value":10058,"string":"10,058"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"spider/got/manager/TweetManager.py"},"max_stars_repo_name":{"kind":"string","value":"iecasszyjy/tweet_search-master"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e4978521a39964c22ae46bf35d6ff17710e8e6c6"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"spider/got/manager/TweetManager.py"},"max_issues_repo_name":{"kind":"string","value":"iecasszyjy/tweet_search-master"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e4978521a39964c22ae46bf35d6ff17710e8e6c6"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-03-31T18:54:16.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-12-13T19:49:08.000Z"},"max_forks_repo_path":{"kind":"string","value":"spider/got/manager/TweetManager.py"},"max_forks_repo_name":{"kind":"string","value":"iecasszyjy/tweet_search-master"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e4978521a39964c22ae46bf35d6ff17710e8e6c6"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import urllib,urllib2,json,re,datetime,sys,cookielib\nfrom .. import models\nfrom pyquery import PyQuery\nimport requests\nimport random\nrandom.seed(1)\n\ndef fetch_activities(tweet_id):\n\tretusers = []\n\tfavorusers = []\n\tre_url = 'https://twitter.com/i/activity/retweeted_popup?id=%s'%(tweet_id)\n\tfavor_url = 'https://twitter.com/i/activity/favorited_popup?id=%s'%(tweet_id)\n\theaders = {\n\t\t\t'Host':\"twitter.com\",\n\t\t\t'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)),\n\t\t\t'Accept':\"application/json, text/javascript, */*; q=0.01\",\n\t\t\t'Accept-Language':\"de,en-US;q=0.7,en;q=0.3\",\n\t\t\t'X-Requested-With':\"XMLHttpRequest\",\n\t\t\t'Referer':'https://twitter.com/',\n\t\t\t'Connection':\"keep-alive\",\n\t\t}\n\tre_users = PyQuery(requests.get(re_url,headers=headers).json()['htmlUsers'])('ol.activity-popup-users')\n\tfor re_user in re_users('div.account'):\n\t\tuserPQ = PyQuery(re_user)\n\t\tuserd = {\n\t\t\t\t\t\t'screen_name':userPQ.attr('data-screen-name'),\n\t\t\t\t\t\t'user_id':userPQ.attr('data-user-id'),\n\t\t\t\t\t\t'data_name':userPQ.attr('data-name'),\n\t\t\t\t\t\t'avatar_src':userPQ('img.avatar').attr('src'),\n\t\t\t\t\t\t'userbadges':userPQ('span.UserBadges').text(),\n\t\t\t\t\t\t'bio':userPQ('p.bio').text(),\n\t\t}\n\t\tretusers.append({userd['screen_name']:userd})\n\tfavor_users = PyQuery(requests.get(favor_url,headers=headers).json()['htmlUsers'])('ol.activity-popup-users')\n\tfor favor_user in favor_users('div.account'):\n\t\tuserPQ = PyQuery(favor_user)\n\t\tuserd = {\n\t\t\t\t\t\t'screen_name':userPQ.attr('data-screen-name'),\n\t\t\t\t\t\t'user_id':userPQ.attr('data-user-id'),\n\t\t\t\t\t\t'data_name':userPQ.attr('data-name'),\n\t\t\t\t\t\t'avatar_src':userPQ('img.avatar').attr('src'),\n\t\t\t\t\t\t'userbadges':userPQ('span.UserBadges').text(),\n\t\t\t\t\t\t'bio':userPQ('p.bio').text(),\n\t\t}\n\t\tfavorusers.append({userd['screen_name']:userd})\n\t\t\n\treturn retusers,favorusers\n\ndef fetch_entities(tweetPQ):\n\thashtags = []\n\turls = []\n\tfor url in tweetPQ('p.js-tweet-text a'):\n\t\td = dict(url.items())\n\t\tif d.has_key('data-expanded-url'): #d['class'] == 'twitter-timeline-link' \n\t\t\t#pdb.set_trace()\n\t\t\turls.append({'href':d['href'],'expanded_url':d['data-expanded-url']})\n\t\tif d['href'].startswith('/hashtag/'):\n\t\t\thashtags.append(d['href'].split('?')[0].split('/')[-1])\n\ttweetPQ('p.js-tweet-text a.twitter-timeline-link').remove()\n\treturn hashtags,urls\n\ndef getTweet(tweetHTML):\n\ttweetPQ = PyQuery(tweetHTML)\n\ttweet = models.Tweet()\n\t\n\t#base info\n\tid = tweetPQ.attr(\"data-tweet-id\")\n\tconversation_id = tweetPQ.attr('data-conversation-id')\n\tdateSec = int(tweetPQ(\"small.time span.js-short-timestamp\").attr(\"data-time\"))\n\t#permalink = tweetPQ.attr(\"data-permalink-path\")\n\t\n\t#user\n\tscreen_name = tweetPQ.attr('data-screen-name')\n\tuser_id = tweetPQ.attr('data-user-id')\n\tdata_name = tweetPQ.attr('data-name')\n\tavatar_src = tweetPQ('img.avatar').attr('src')\n\tuserbadges = tweetPQ('span.UserBadges').text()\n\t\n\t#text\n\thashtags,urls = fetch_entities(tweetPQ)\n\tmentions = tweetPQ.attr(\"data-mentions\")\n\tlang = tweetPQ(\"p.js-tweet-text\").attr('lang')\n\traw_text = re.sub(r\"\\s+\", \" \", tweetPQ(\"p.js-tweet-text\").text().replace('# ', '#').replace('@ ', '@'))\n\tstandard_text = re.sub(r\"\\s+\", \" \", tweetPQ(\"p.js-tweet-text\").text().replace('# ', '').replace('@ ', ''))\n\ttweetPQ('p.js-tweet-text')('a').remove()\n\ttweetPQ('p.js-tweet-text')('img').remove()\n\tclean_text = tweetPQ(\"p.js-tweet-text\").text()\n\t\n\t#media\n\tquote_id = tweetPQ('div.QuoteTweet a.QuoteTweet-link').attr('data-conversation-id')\n\thas_cards = tweetPQ.attr('data-has-cards')\n\tcard_url = tweetPQ('div.js-macaw-cards-iframe-container').attr('data-card-url')\n\timg_src = tweetPQ('div.AdaptiveMedia-container img').attr('src')\n\tvideo_src = tweetPQ('div.AdaptiveMedia-container video').attr('src')\n\tgeo = ''\n\tgeoSpan = tweetPQ('span.Tweet-geo')\n\tif len(geoSpan) > 0:\n\t\tgeo = geoSpan.attr('title')\n\t\n\t#action\n\tretweet_id = tweetPQ.attr('data-retweet-id')\n\tretweeter = tweetPQ.attr('data-retweeter')\n\t#retusers,favorusers = fetch_activities(id)\n\treplies = int(tweetPQ(\"span.ProfileTweet-action--reply span.ProfileTweet-actionCount\").attr(\"data-tweet-stat-count\").replace(\",\", \"\"))\n\tretweets = int(tweetPQ(\"span.ProfileTweet-action--retweet span.ProfileTweet-actionCount\").attr(\"data-tweet-stat-count\").replace(\",\", \"\"))\n\tfavorites = int(tweetPQ(\"span.ProfileTweet-action--favorite span.ProfileTweet-actionCount\").attr(\"data-tweet-stat-count\").replace(\",\", \"\"))\n\t\n\t## tweet model\n\t\n\ttweet.id = id\n\ttweet.conversation_id = conversation_id\n\ttweet.is_reply = tweet.id != tweet.conversation_id\n\ttweet.created_at = datetime.datetime.fromtimestamp(dateSec)\n\t#tweet.permalink = 'https://twitter.com' + permalink\n\t\n\t#user\n\ttweet.user = {\n\t\t\t\t'screen_name':screen_name,\n\t\t\t\t'user_id':user_id,\n\t\t\t\t'data_name':data_name,\n\t\t\t\t'avatar_src':avatar_src,\n\t\t\t\t'userbadges':userbadges,\n\t}\n\t\n\t#media\n\ttweet.media = {\n\t\t\t\t\t'quote_id':quote_id,\n\t\t\t\t\t'has_cards':has_cards,\n\t\t\t\t\t'card_url':card_url,\n\t\t\t\t\t'img_src':img_src,\n\t\t\t\t\t'video_src':video_src,\n\t\t\t\t\t'geo':geo,\n\t}\n\t\n\t#text\n\ttweet.hashtags = hashtags\n\ttweet.urls = urls\n\ttweet.mentions = mentions.split(' ') if mentions != None else None\n\ttweet.lang = lang\n\ttweet.raw_text = raw_text\n\ttweet.standard_text = standard_text\n\t#tweet.clean_text = clean_text\n\t\n\t#action\n\ttweet.action = {\n\t\t\t\t\t\t\t\t#'retusers':retusers,\n\t\t\t\t\t\t\t\t#'favorusers':favorusers,\n\t\t\t\t\t\t\t\t'replies':replies,\n\t\t\t\t\t\t\t\t'retweets':retweets,\n\t\t\t\t\t\t\t\t'favorites':favorites,\n\t\t\t\t\t\t\t\t'retweet_id':retweet_id,\n\t\t\t\t\t\t\t\t'retweeter':retweeter,\n\t\t\t\t\t\t\t\t'is_retweet':True if retweet_id != None else False,\n\t}\n\t\n\treturn tweet\n\nclass TweetManager:\n\t\n\tdef __init__(self):\n\t\tpass\n\t\t\n\t@staticmethod\n\tdef getTweetsById(tweet_id):\n\t\turl = 'https://twitter.com/xxx/status/%s'%(tweet_id)\n\t\theaders = {\n\t\t\t'Host':\"twitter.com\",\n\t\t\t'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)),\n\t\t\t'Accept':\"application/json, text/javascript, */*; q=0.01\",\n\t\t\t'Accept-Language':\"de,en-US;q=0.7,en;q=0.3\",\n\t\t\t'X-Requested-With':\"XMLHttpRequest\",\n\t\t\t'Referer':'https://twitter.com/',\n\t\t\t'Connection':\"keep-alive\",\n\t\t}\n\t\ttweets = PyQuery(requests.get(url,headers=headers).content)('div.js-original-tweet')\n\t\tfor tweetHTML in tweets:\n\t\t\treturn getTweet(tweetHTML)\n\t\t\n\t@staticmethod\n\tdef getTweets(tweetCriteria, refreshCursor='', receiveBuffer=None, bufferLength=100, proxy=None):\n\t\tresults = []\n\t\tresultsAux = []\n\t\tcookieJar = cookielib.CookieJar()\n\t\t\n\t\tif hasattr(tweetCriteria, 'username') and (tweetCriteria.username.startswith(\"\\'\") or tweetCriteria.username.startswith(\"\\\"\")) and (tweetCriteria.username.endswith(\"\\'\") or tweetCriteria.username.endswith(\"\\\"\")):\n\t\t\ttweetCriteria.username = tweetCriteria.username[1:-1]\n\n\t\tactive = True\n\n\t\twhile active:\n\t\t\tjson = TweetManager.getJsonReponse(tweetCriteria, refreshCursor, cookieJar, proxy)\n\t\t\tif len(json['items_html'].strip()) == 0:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tif not json.has_key('min_position'):\n\t\t\t\tbreak\n\t\t\trefreshCursor = json['min_position']\n\t\t\tif refreshCursor == None:\n\t\t\t\tbreak\n\t\t\ttweets = PyQuery(json['items_html'])('div.js-stream-tweet')\n\t\t\t\n\t\t\tif len(tweets) == 0:\n\t\t\t\tbreak\n\t\t\t\n\t\t\tfor tweetHTML in tweets:\n\t\t\t\ttweet = getTweet(tweetHTML)\n\t\t\t\tif hasattr(tweetCriteria, 'sinceTimeStamp'):\n\t\t\t\t\tif tweet.created_at < tweetCriteria.sinceTimeStamp:\n\t\t\t\t\t\tactive = False\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tif hasattr(tweetCriteria, 'untilTimeStamp'):\n\t\t\t\t\tif tweet.created_at <= tweetCriteria.untilTimeStamp:\n\t\t\t\t\t\tresults.append(tweet.__dict__)\n\t\t\t\telse:\n\t\t\t\t\tresults.append(tweet.__dict__)\n\t\t\t\t#resultsAux.append(tweet)\n\t\t\t\t\n\t\t\t\tif receiveBuffer and len(resultsAux) >= bufferLength:\n\t\t\t\t\treceiveBuffer(resultsAux)\n\t\t\t\t\tresultsAux = []\n\t\t\t\t\n\t\t\t\tif tweetCriteria.maxTweets > 0 and len(results) >= tweetCriteria.maxTweets:\n\t\t\t\t\tactive = False\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\n\t\tif receiveBuffer and len(resultsAux) > 0:\n\t\t\treceiveBuffer(resultsAux)\n\t\t\n\t\treturn results\n\t\n\t@staticmethod\n\tdef getJsonReponse(tweetCriteria, refreshCursor, cookieJar, proxy):\n\t\turl = \"https://twitter.com/i/search/timeline?q=%s&src=typd&max_position=%s\"\n\t\t\n\t\turlGetData = ''\n\t\t\n\t\tif hasattr(tweetCriteria, 'username'):\n\t\t\turlGetData += ' from:' + tweetCriteria.username\n\t\t\n\t\tif hasattr(tweetCriteria, 'querySearch'):\n\t\t\turlGetData += ' ' + tweetCriteria.querySearch\n\t\t\n\t\tif hasattr(tweetCriteria, 'near'):\n\t\t\turlGetData += \"&near:\" + tweetCriteria.near + \" within:\" + tweetCriteria.within\n\t\t\n\t\tif hasattr(tweetCriteria, 'since'):\n\t\t\turlGetData += ' since:' + tweetCriteria.since\n\t\t\t\n\t\tif hasattr(tweetCriteria, 'until'):\n\t\t\turlGetData += ' until:' + tweetCriteria.until\n\t\t\n\n\t\tif hasattr(tweetCriteria, 'topTweets'):\n\t\t\tif tweetCriteria.topTweets:\n\t\t\t\turl = \"https://twitter.com/i/search/timeline?q=%s&src=typd&max_position=%s\"\n\t\t\n\t\tif hasattr(tweetCriteria, 'tweetType'):\n\t\t\turl = url + tweetCriteria.tweetType\n\t\t\n\t\turl = url % (urllib.quote(urlGetData), refreshCursor)\n\t\tua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999))\n\n\t\theaders = [\n\t\t\t('Host', \"twitter.com\"),\n\t\t\t('User-Agent', ua), \n\t\t\t# Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36 \n\t\t\t#Mozilla/5.0 (Windows NT 6.1; Win64; x64)\n\t\t\t('Accept', \"application/json, text/javascript, */*; q=0.01\"),\n\t\t\t('Accept-Language', \"de,en-US;q=0.7,en;q=0.3\"),\n\t\t\t('X-Requested-With', \"XMLHttpRequest\"),\n\t\t\t('Referer', url),\n\t\t\t('Connection', \"keep-alive\")\n\t\t]\n\n\t\tif proxy:\n\t\t\topener = urllib2.build_opener(urllib2.ProxyHandler({'http': proxy, 'https': proxy}), urllib2.HTTPCookieProcessor(cookieJar))\n\t\telse:\n\t\t\topener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))\n\t\topener.addheaders = headers\n\n\t\ttry:\n\t\t\tresponse = opener.open(url)\n\t\t\tjsonResponse = response.read()\n\t\texcept Exception,e:\n\t\t\tprint \"Twitter weird response. Try to see on browser: https://twitter.com/search?q=%s&src=typd\" % urllib.quote(urlGetData)\n\t\t\traise Exception(e.message)\n\t\t\t#sys.exit()\n\t\t\t#return None\n\t\t\n\t\tdataJson = json.loads(jsonResponse)\n\t\t\n\t\treturn dataJson\t\t\n"},"avg_line_length":{"kind":"number","value":34.0949152542,"string":"34.094915"},"max_line_length":{"kind":"number","value":214,"string":"214"},"alphanum_fraction":{"kind":"number","value":0.6801551004,"string":"0.680155"}}},{"rowIdx":47289,"cells":{"hexsha":{"kind":"string","value":"06d235db1b1f699e83bac4c384e8ab53dcbb9ca9"},"size":{"kind":"number","value":103,"string":"103"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"dtdocker/containers/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"oxsoftdev/dt-docker"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"dtdocker/containers/__init__.py"},"max_issues_repo_name":{"kind":"string","value":"oxsoftdev/dt-docker"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"dtdocker/containers/__init__.py"},"max_forks_repo_name":{"kind":"string","value":"oxsoftdev/dt-docker"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from MssqlContainer import MssqlContainer\nfrom RedisContainers import RedisContainers, RedisContainer\n\n"},"avg_line_length":{"kind":"number","value":25.75,"string":"25.75"},"max_line_length":{"kind":"number","value":59,"string":"59"},"alphanum_fraction":{"kind":"number","value":0.8932038835,"string":"0.893204"}}},{"rowIdx":47290,"cells":{"hexsha":{"kind":"string","value":"06d61d6128d3d24a44e5f423459d0349beadddf4"},"size":{"kind":"number","value":834,"string":"834"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Raspberry Pi Pico/Pi_Pico_TrafficLight.py"},"max_stars_repo_name":{"kind":"string","value":"ckuehnel/MicroPython"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c57d0df744fe5301e755bd139b6cc56d69c442fd"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-03-22T18:38:43.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-03-22T18:38:43.000Z"},"max_issues_repo_path":{"kind":"string","value":"Raspberry Pi Pico/Pi_Pico_TrafficLight.py"},"max_issues_repo_name":{"kind":"string","value":"ckuehnel/MicroPython"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c57d0df744fe5301e755bd139b6cc56d69c442fd"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Raspberry Pi Pico/Pi_Pico_TrafficLight.py"},"max_forks_repo_name":{"kind":"string","value":"ckuehnel/MicroPython"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c57d0df744fe5301e755bd139b6cc56d69c442fd"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-02-06T10:07:36.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-02-06T10:07:36.000Z"},"content":{"kind":"string","value":"# Pi_Pico_TrafficLight.py\n# Controlling Neopixel by PIO to simulate a traffic light\n# using ws2812b library by benevpi\n# https://github.com/benevpi/pico_python_ws2812b\n\nimport time\nimport ws2812b\n\nNUM_PIX = 3 # this is for M5Stack RGB LED\nPIN_NUM = 16\nlight = ws2812b.ws2812b(NUM_PIX, 0, PIN_NUM)\n\nRED = (255, 0, 0)\nYELLOW = (255, 150, 0)\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\nCOLORS = (RED, YELLOW, GREEN, BLACK)\n\ndef lights(L0, L1, L2, t):\n color = L0\n light.set_pixel(0, color[0], color[1], color[2])\n color = L1\n light.set_pixel(1, color[0], color[1], color[2])\n color = L2\n light.set_pixel(2, color[0], color[1], color[2])\n light.show()\n time.sleep(t)\n\nwhile True:\n lights(RED, BLACK, BLACK, 2)\n lights(RED, YELLOW, BLACK, 1)\n lights(BLACK, BLACK, GREEN, 3)\n lights(BLACK, YELLOW, BLACK, 2) \n"},"avg_line_length":{"kind":"number","value":24.5294117647,"string":"24.529412"},"max_line_length":{"kind":"number","value":57,"string":"57"},"alphanum_fraction":{"kind":"number","value":0.6558752998,"string":"0.655875"}}},{"rowIdx":47291,"cells":{"hexsha":{"kind":"string","value":"b081a38729addd1e06a8d879010ab8225a044073"},"size":{"kind":"number","value":9011,"string":"9,011"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/nlp/tprr/src/reader_downstream.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/nlp/tprr/src/reader_downstream.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"research/nlp/tprr/src/reader_downstream.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"downstream Model for reader\"\"\"\n\nimport numpy as np\nfrom mindspore import nn, ops\nfrom mindspore import Tensor, Parameter\nfrom mindspore.ops import operations as P\nfrom mindspore import dtype as mstype\n\n\ndst_type = mstype.float16\ndst_type2 = mstype.float32\n\n\nclass Linear(nn.Cell):\n \"\"\"module of reader downstream\"\"\"\n def __init__(self, linear_weight_shape, linear_bias_shape):\n \"\"\"init function\"\"\"\n super(Linear, self).__init__()\n self.matmul = nn.MatMul()\n self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, linear_weight_shape).astype(np.float32)),\n name=None)\n self.add = P.Add()\n self.add_bias = Parameter(Tensor(np.random.uniform(0, 1, linear_bias_shape).astype(np.float32)), name=None)\n self.relu = nn.ReLU()\n\n def construct(self, hidden_state):\n \"\"\"construct function\"\"\"\n output = self.matmul(ops.Cast()(hidden_state, dst_type), ops.Cast()(self.matmul_w, dst_type))\n output = self.add(ops.Cast()(output, dst_type2), self.add_bias)\n output = self.relu(output)\n return output\n\n\nclass BertLayerNorm(nn.Cell):\n \"\"\"Normalization module of reader downstream\"\"\"\n def __init__(self, bert_layer_norm_weight_shape, bert_layer_norm_bias_shape, eps=1e-12):\n \"\"\"init function\"\"\"\n super(BertLayerNorm, self).__init__()\n self.reducemean = P.ReduceMean(keep_dims=True)\n self.sub = P.Sub()\n self.pow = P.Pow()\n self.add = P.Add()\n self.sqrt = P.Sqrt()\n self.div = P.Div()\n self.mul = P.Mul()\n self.variance_epsilon = eps\n self.bert_layer_norm_weight = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_weight_shape)\n .astype(np.float32)), name=None)\n self.bert_layer_norm_bias = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_bias_shape)\n .astype(np.float32)), name=None)\n\n def construct(self, x):\n \"\"\"construct function\"\"\"\n u = self.reducemean(x, -1)\n s = self.reducemean(self.pow(self.sub(x, u), 2), -1)\n x = self.div(self.sub(x, u), self.sqrt(self.add(s, self.variance_epsilon)))\n output = self.mul(self.bert_layer_norm_weight, x)\n output = self.add(output, self.bert_layer_norm_bias)\n return output\n\n\nclass SupportingOutputLayer(nn.Cell):\n \"\"\"module of reader downstream\"\"\"\n def __init__(self, linear_1_weight_shape, linear_1_bias_shape, bert_layer_norm_weight_shape,\n bert_layer_norm_bias_shape):\n \"\"\"init function\"\"\"\n super(SupportingOutputLayer, self).__init__()\n self.linear_1 = Linear(linear_weight_shape=linear_1_weight_shape,\n linear_bias_shape=linear_1_bias_shape)\n self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape,\n bert_layer_norm_bias_shape=bert_layer_norm_bias_shape)\n self.matmul = nn.MatMul()\n self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, (8192, 1)).astype(np.float32)), name=None)\n\n def construct(self, x):\n \"\"\"construct function\"\"\"\n output = self.linear_1(x)\n output = self.bert_layer_norm(output)\n output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.matmul_w, dst_type))\n return ops.Cast()(output, dst_type2)\n\n\nclass PosOutputLayer(nn.Cell):\n \"\"\"module of reader downstream\"\"\"\n def __init__(self, linear_weight_shape, linear_bias_shape, bert_layer_norm_weight_shape,\n bert_layer_norm_bias_shape):\n \"\"\"init function\"\"\"\n super(PosOutputLayer, self).__init__()\n self.linear_1 = Linear(linear_weight_shape=linear_weight_shape,\n linear_bias_shape=linear_bias_shape)\n self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape,\n bert_layer_norm_bias_shape=bert_layer_norm_bias_shape)\n self.matmul = nn.MatMul()\n self.linear_2_weight = Parameter(Tensor(np.random.uniform(0, 1, (4096, 1)).astype(np.float32)), name=None)\n self.add = P.Add()\n self.linear_2_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)\n\n def construct(self, state):\n \"\"\"construct function\"\"\"\n output = self.linear_1(state)\n output = self.bert_layer_norm(output)\n output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.linear_2_weight, dst_type))\n output = self.add(ops.Cast()(output, dst_type2), self.linear_2_bias)\n return output\n\n\nclass MaskInvalidPos(nn.Cell):\n \"\"\"module of reader downstream\"\"\"\n def __init__(self):\n \"\"\"init function\"\"\"\n super(MaskInvalidPos, self).__init__()\n self.squeeze = P.Squeeze(2)\n self.sub = P.Sub()\n self.mul = P.Mul()\n\n def construct(self, pos_pred, context_mask):\n \"\"\"construct function\"\"\"\n output = self.squeeze(pos_pred)\n invalid_pos_mask = self.mul(self.sub(1.0, context_mask), 1e30)\n output = self.sub(output, invalid_pos_mask)\n return output\n\n\nclass Reader_Downstream(nn.Cell):\n \"\"\"Downstream model for reader\"\"\"\n def __init__(self):\n \"\"\"init function\"\"\"\n super(Reader_Downstream, self).__init__()\n\n self.add = P.Add()\n self.para_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)\n self.para_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192),\n linear_1_bias_shape=(8192,),\n bert_layer_norm_weight_shape=(8192,),\n bert_layer_norm_bias_shape=(8192,))\n self.sent_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None)\n self.sent_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192),\n linear_1_bias_shape=(8192,),\n bert_layer_norm_weight_shape=(8192,),\n bert_layer_norm_bias_shape=(8192,))\n\n self.start_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096),\n linear_bias_shape=(4096,),\n bert_layer_norm_weight_shape=(4096,),\n bert_layer_norm_bias_shape=(4096,))\n self.end_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096),\n linear_bias_shape=(4096,),\n bert_layer_norm_weight_shape=(4096,),\n bert_layer_norm_bias_shape=(4096,))\n self.mask_invalid_pos = MaskInvalidPos()\n self.gather_input_weight = Tensor(np.array(0))\n self.gather = P.Gather()\n self.type_linear_1 = nn.Dense(in_channels=4096, out_channels=4096, has_bias=True)\n self.relu = nn.ReLU()\n\n self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=(4096,), bert_layer_norm_bias_shape=(4096,))\n self.type_linear_2 = nn.Dense(in_channels=4096, out_channels=3, has_bias=True)\n\n def construct(self, para_state, sent_state, state, context_mask):\n \"\"\"construct function\"\"\"\n para_logit = self.para_output_layer(para_state)\n para_logit = self.add(para_logit, self.para_bias)\n sent_logit = self.sent_output_layer(sent_state)\n sent_logit = self.add(sent_logit, self.sent_bias)\n\n start = self.start_output_layer(state)\n start = self.mask_invalid_pos(start, context_mask)\n\n end = self.end_output_layer(state)\n end = self.mask_invalid_pos(end, context_mask)\n\n cls_emb = self.gather(state, self.gather_input_weight, 1)\n q_type = self.type_linear_1(cls_emb)\n q_type = self.relu(q_type)\n q_type = self.bert_layer_norm(q_type)\n q_type = self.type_linear_2(q_type)\n return q_type, start, end, para_logit, sent_logit\n"},"avg_line_length":{"kind":"number","value":46.9322916667,"string":"46.932292"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.6190211963,"string":"0.619021"}}},{"rowIdx":47292,"cells":{"hexsha":{"kind":"string","value":"7c08672a1f4b3a7a17149a2b57e2b2a120ca8857"},"size":{"kind":"number","value":1046,"string":"1,046"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/onegov/wtfs/upgrade.py"},"max_stars_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/onegov/wtfs/upgrade.py"},"max_issues_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/onegov/wtfs/upgrade.py"},"max_forks_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\" Contains upgrade tasks that are executed when the application is being\nupgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.\n\n\"\"\"\nfrom onegov.core.upgrade import upgrade_task\n\n\n@upgrade_task('Add payment types')\ndef add_payment_types(context):\n session = context.session\n\n if context.has_table('wtfs_payment_type'):\n query = session.execute('SELECT count(*) FROM wtfs_payment_type')\n if not query.scalar():\n session.execute(\"\"\"\n INSERT INTO wtfs_payment_type (\"name\", \"price_per_quantity\")\n VALUES ('normal', 700), ('spezial', 850);\n \"\"\")\n\n query = session.execute(\"\"\"\n UPDATE groups\n SET meta = CASE\n WHEN meta @> '{\"_price_per_quantity\"\\\\:850}'::jsonb\n THEN jsonb_set(meta, '{payment_type}', '\"spezial\"')\n ELSE jsonb_set(meta, '{payment_type}', '\"normal\"')\n END\n WHERE groups.meta ? '_price_per_quantity';\n \"\"\")\n"},"avg_line_length":{"kind":"number","value":36.0689655172,"string":"36.068966"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.5783938815,"string":"0.578394"}}},{"rowIdx":47293,"cells":{"hexsha":{"kind":"string","value":"b01c8f325df0368355928771fb0cdac6ea1b83fc"},"size":{"kind":"number","value":553,"string":"553"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/es/test_01_04.py"},"max_stars_repo_name":{"kind":"string","value":"Jette16/spacy-course"},"max_stars_repo_head_hexsha":{"kind":"string","value":"32df0c8f6192de6c9daba89740a28c0537e4d6a0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2085,"string":"2,085"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-04-17T13:10:40.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T21:51:46.000Z"},"max_issues_repo_path":{"kind":"string","value":"exercises/es/test_01_04.py"},"max_issues_repo_name":{"kind":"string","value":"Jette16/spacy-course"},"max_issues_repo_head_hexsha":{"kind":"string","value":"32df0c8f6192de6c9daba89740a28c0537e4d6a0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":79,"string":"79"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-04-18T14:42:55.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-07T08:15:43.000Z"},"max_forks_repo_path":{"kind":"string","value":"exercises/es/test_01_04.py"},"max_forks_repo_name":{"kind":"string","value":"Jette16/spacy-course"},"max_forks_repo_head_hexsha":{"kind":"string","value":"32df0c8f6192de6c9daba89740a28c0537e4d6a0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":361,"string":"361"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-04-17T13:34:32.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-28T04:42:45.000Z"},"content":{"kind":"string","value":"def test():\n assert (\n \"if token.like_num\" in __solution__\n ), \"¿Estás revisando el atributo del token like_num?\"\n assert (\n 'next_token.text == \"%\"' in __solution__\n ), \"¿Estás revisando si el texto del siguiente token es un símbolo de porcentaje?\"\n assert (\n next_token.text == \"%\"\n ), \"¿Estás revisando si el texto del siguiente token es un símbolo de porcentaje?\"\n\n __msg__.good(\n \"¡Bien hecho! Como puedes ver hay muchos análisis poderosos que puedes hacer usando los tokens y sus atributos.\"\n )\n"},"avg_line_length":{"kind":"number","value":36.8666666667,"string":"36.866667"},"max_line_length":{"kind":"number","value":120,"string":"120"},"alphanum_fraction":{"kind":"number","value":0.6564195298,"string":"0.65642"}}},{"rowIdx":47294,"cells":{"hexsha":{"kind":"string","value":"05a8c4fd95b6efca543f543621dd2cffe6ce31a4"},"size":{"kind":"number","value":1092,"string":"1,092"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"DataProcess/my_utils/myplot.py"},"max_stars_repo_name":{"kind":"string","value":"ZhangQiHang-98/RFID_Scirpt"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0f74087add4cf16e2d201ad4f31cc1abd287db7e"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-24T04:52:03.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-27T02:14:49.000Z"},"max_issues_repo_path":{"kind":"string","value":"DataProcess/my_utils/myplot.py"},"max_issues_repo_name":{"kind":"string","value":"ZhangQiHang-98/RFID_Scirpt"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0f74087add4cf16e2d201ad4f31cc1abd287db7e"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"DataProcess/my_utils/myplot.py"},"max_forks_repo_name":{"kind":"string","value":"ZhangQiHang-98/RFID_Scirpt"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0f74087add4cf16e2d201ad4f31cc1abd287db7e"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@Project :DataProcess \n@File :myplot.py\n@Author :Zhang Qihang\n@Date :2021/11/8 14:41 \n\"\"\"\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport glob\nimport config\nimport myunwrap\nimport numpy as np\nimport scipy.constants as C\nfrom sklearn.preprocessing import scale\n\n\ndef phase_heatmap(phase_mat):\n sns.set_context({\"figure.figsize\": (8, 8)})\n sns.heatmap(phase_mat)\n plt.show()\n\n\ndef normalization(data):\n _range = np.max(data) - np.min(data)\n return (data - np.min(data)) / _range\n\n\ndef phase_scatter(df):\n phases = df[\"phase\"].values\n times = df[\"time\"].values\n plt.plot(times, phases)\n plt.show()\n\n\nif __name__ == '__main__':\n test_path = glob.glob(os.path.join(config.PEN_PATH, '*.csv'))\n file_path = \"../20220105104342normal.csv\"\n df = pd.read_csv(file_path, header=None)\n df.columns = config.COMMON_COLUMNS\n df[\"phase\"] = 2 * C.pi - df[\"phase\"]\n phase_scatter(df)\n # print(test_path)\n # for path in test_path:\n # phase_scatter(path)\n"},"avg_line_length":{"kind":"number","value":22.2857142857,"string":"22.285714"},"max_line_length":{"kind":"number","value":65,"string":"65"},"alphanum_fraction":{"kind":"number","value":0.6675824176,"string":"0.667582"}}},{"rowIdx":47295,"cells":{"hexsha":{"kind":"string","value":"bbd9406456d0020c9cdbba07ff1aed2650cbfe2a"},"size":{"kind":"number","value":1631,"string":"1,631"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"barcode.py"},"max_stars_repo_name":{"kind":"string","value":"T9C5F/packstation-barcode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3d7e719d6931d2d9e4834e7aaef2a892bd564aa1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":45,"string":"45"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-11-22T10:18:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-06-17T07:20:25.000Z"},"max_issues_repo_path":{"kind":"string","value":"barcode.py"},"max_issues_repo_name":{"kind":"string","value":"T9C5F/packstation-barcode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3d7e719d6931d2d9e4834e7aaef2a892bd564aa1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-11-18T14:55:43.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-10-14T15:15:08.000Z"},"max_forks_repo_path":{"kind":"string","value":"barcode.py"},"max_forks_repo_name":{"kind":"string","value":"T9C5F/packstation-barcode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3d7e719d6931d2d9e4834e7aaef2a892bd564aa1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":4,"string":"4"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-12-27T22:02:13.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-10-18T14:29:57.000Z"},"content":{"kind":"string","value":"#!/usr/bin/python3\n# coding: utf-8\n\nimport os\nimport luhn # sudo apt install python3-pip && sudo pip3 install luhn\n\n# Deutsche Post DHL hat einfach an Packstationen den Kartenleser\n# ausgebaut und duch einen Barcodeleser ersetzt, ohne den\n# Besitzern einer Goldcard automatisch rechtzeitig eine neue\n# Karte zuzuschicken. Man kann aber weiterhin an solchen Stationen\n# Pakete abholen, nur muss man jetzt die PostNummer manuell eintippen.\n# Mit diesem Skript generieren wir uns den entsprechenden Barcode, da das\n# Zusenden einer neuen Karte bei DHL beauftragt werden muss, die alte Karte\n# in der Zwischenzeit sofort gesperrt wird und die neue bis zu 2 Wochen\n# dauern kann. Das ist maximal kundenunfreundlich.\n# Sicherheitstechnisch liegt kein Verstoß vor, da man durch Eintippen\n# der PostNummer sowieso Pakete abholen kann und die Umrechnung von\n# PostNummer zu Barcode im Netz bereits vielfach beschrieben ist.\n# Der 16-stellige ITF-Barcode ist relativ einfach aufgebaut:\n# \"3”+”[so viele ‘0’, dass die Zahl insgesamt 16 Stellen hat]”\n# +”[Postnummer*631]”+”[Luhn-Prüfziffer über ‘Postnummer*631’]\"\n# http://www.frei-tag.com/index.php?/archives/445-DHL-Packstation-ohne-Goldcard.html\n\ndef generate(number):\n\tpostnummer = int(number)\n\n\tnumber = postnummer*631\n\tluhnnr = luhn.generate(str(number))\n\tnumber = \"3\" + (str(number)+str(luhnnr)).zfill(15)\n\n\treturn(str(number))\n\n# Beispiel anhand einer zufallsgenerierten Zahl:\n# 20281557 ergibt 3000127976624677\nassert generate(\"20281557\") == \"3000127976624677\"\n\n# Interleaved 2 of 5 ITF barcode\nos.system(\"xdg-open https://barcode.tec-it.com/de/Code25IL?data=\" + generate(20281557))\n"},"avg_line_length":{"kind":"number","value":41.8205128205,"string":"41.820513"},"max_line_length":{"kind":"number","value":87,"string":"87"},"alphanum_fraction":{"kind":"number","value":0.774984672,"string":"0.774985"}}},{"rowIdx":47296,"cells":{"hexsha":{"kind":"string","value":"a548d4044babcf769d72c98f4c36e053030cd2ea"},"size":{"kind":"number","value":214,"string":"214"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/bo4e/enum/landescode.py"},"max_stars_repo_name":{"kind":"string","value":"bo4e/BO4E-python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"28b12f853c8a496d14b133759b7aa2d6661f79a0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-03-02T12:49:44.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-02T12:49:44.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/bo4e/enum/landescode.py"},"max_issues_repo_name":{"kind":"string","value":"bo4e/BO4E-python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"28b12f853c8a496d14b133759b7aa2d6661f79a0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":21,"string":"21"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-02-04T07:38:46.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-28T14:01:53.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/bo4e/enum/landescode.py"},"max_forks_repo_name":{"kind":"string","value":"bo4e/BO4E-python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"28b12f853c8a496d14b133759b7aa2d6661f79a0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nDer ISO-Landescode als Enumeration.\n\"\"\"\nfrom enum import Enum\n\nfrom iso3166 import countries\n\nalpha2codes = {c.alpha2: c.alpha2 for c in countries}\n\nLandescode = Enum(\"Landescode\", alpha2codes) # type: ignore\n"},"avg_line_length":{"kind":"number","value":19.4545454545,"string":"19.454545"},"max_line_length":{"kind":"number","value":60,"string":"60"},"alphanum_fraction":{"kind":"number","value":0.7429906542,"string":"0.742991"}}},{"rowIdx":47297,"cells":{"hexsha":{"kind":"string","value":"a553923330965bb513b635f568c86575b16db188"},"size":{"kind":"number","value":1410,"string":"1,410"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py"},"max_stars_repo_name":{"kind":"string","value":"jonasrdt/Wirtschaftsinformatik2"},"max_stars_repo_head_hexsha":{"kind":"string","value":"30d5d896808b98664c55cb6fbb3b30a7f1904d9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2022-03-23T09:40:39.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-23T09:40:39.000Z"},"max_issues_repo_path":{"kind":"string","value":"WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py"},"max_issues_repo_name":{"kind":"string","value":"jonasrdt/Wirtschaftsinformatik2"},"max_issues_repo_head_hexsha":{"kind":"string","value":"30d5d896808b98664c55cb6fbb3b30a7f1904d9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py"},"max_forks_repo_name":{"kind":"string","value":"jonasrdt/Wirtschaftsinformatik2"},"max_forks_repo_head_hexsha":{"kind":"string","value":"30d5d896808b98664c55cb6fbb3b30a7f1904d9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"ueberweisungslimit = 50_000\nkontostand = 3_500\nungueltiger_betrag = True\n\n# Funktionsdefinition\ndef trenner(anzahl):\n for zaehler in range(anzahl):\n print(\"-\", end=\"\")\n print()\n\n# Funktionsaufruf\ntrenner(50)\nprint(\"Willkommen beim Online-Banking der DKB\")\nprint(\"Ihr Überweisungslimit beträgt\", ueberweisungslimit, \"€\")\nprint(\"Ihr aktueller Kontostand beträgt\", kontostand, \"€\")\ntrenner(50)\nwhile ungueltiger_betrag:\n try:\n betrag = round(float(input(\"Bitte geben Sie einen Überweisungbetrag in € ein: \")),2)\n if betrag > ueberweisungslimit:\n print(\"Ihr Betrag liegt über den Überweisungslimit von\", ueberweisungslimit, \"€.\")\n elif betrag < 0:\n print(\"Bitte geben Sie nur positive Zahle für eine Überweisung ein.\")\n elif betrag > kontostand:\n print(\"Leider reicht Ihr Kontostand i.H.v.\", kontostand,\"€ nicht für die Überweisung i.H.v.\", betrag,\"€ aus.\")\n entscheidung = input(\"Wollen Sie einen niedrigeren Betrag überweisen (ja/nein): \")\n if entscheidung.lower() == \"nein\":\n ungueltiger_betrag = False\n else:\n print(\"Ihre Überweisung i.H.v.\", betrag,\"€ wurde durchgeführt.\")\n kontostand -= betrag\n print(\"Ihr neuer Kontostand beträgt:\", kontostand, \"€.\")\n ungueltiger_betrag = False\n except:\n print(\"Bitte geben Sie nur Zahlen ein.\")\n\n"},"avg_line_length":{"kind":"number","value":38.1081081081,"string":"38.108108"},"max_line_length":{"kind":"number","value":122,"string":"122"},"alphanum_fraction":{"kind":"number","value":0.6475177305,"string":"0.647518"}}},{"rowIdx":47298,"cells":{"hexsha":{"kind":"string","value":"a5916d10fd080c93d3ed806b1372e236b4a374e6"},"size":{"kind":"number","value":1078,"string":"1,078"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Vergleich-Display/upy.py"},"max_stars_repo_name":{"kind":"string","value":"aboehm/CLT2019"},"max_stars_repo_head_hexsha":{"kind":"string","value":"51b9b5674b5ed18297c5ee7e825888d632d96a0e"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-07-01T11:59:06.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-07-01T11:59:06.000Z"},"max_issues_repo_path":{"kind":"string","value":"Vergleich-Display/upy.py"},"max_issues_repo_name":{"kind":"string","value":"aboehm/CLT2019"},"max_issues_repo_head_hexsha":{"kind":"string","value":"51b9b5674b5ed18297c5ee7e825888d632d96a0e"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Vergleich-Display/upy.py"},"max_forks_repo_name":{"kind":"string","value":"aboehm/CLT2019"},"max_forks_repo_head_hexsha":{"kind":"string","value":"51b9b5674b5ed18297c5ee7e825888d632d96a0e"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import machine\nfrom display import ssd1306\n\ndisplay = None\nrun = 0\n\ndef setup():\n global display\n\n from machine import I2C, Pin\n import uos\n\n _, nodename, _, _, _ = uos.uname()\n if nodename == 'esp32':\n i2c = I2C(freq=400000, scl=machine.Pin(22), sda=machine.Pin(21))\n elif nodename == 'pyboard':\n i2c = I2C(freq=400000, scl=machine.Pin('X9'), sda=machine.Pin('X10'))\n else:\n raise Exception('No compatible board found')\n\n display = ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3c)\n\ndef stress(show=True):\n global display, run\n\n display.fill(0)\n\n run += 1\n\n display.text('CLT2019 %i uPython' % (run), 0, 0)\n\n i = 0\n\n for y in range(8, 56, 8):\n for x in range(0, 119, 8):\n display.text('%c' % (ord('0') + (((run + i) * 17) % 36)), x, y)\n i += 1\n\n if show:\n display.show()\n\ndef loop():\n loops = 100\n\n print('Stressing library and io ...')\n for i in range(loops):\n stress(show=True)\n print('Done')\n\ntry:\n setup()\n while True:\n loop()\nexcept:\n pass\n"},"avg_line_length":{"kind":"number","value":19.25,"string":"19.25"},"max_line_length":{"kind":"number","value":77,"string":"77"},"alphanum_fraction":{"kind":"number","value":0.5584415584,"string":"0.558442"}}},{"rowIdx":47299,"cells":{"hexsha":{"kind":"string","value":"3c8ae15a51f7ed8aa4a4793f2527828936db7e90"},"size":{"kind":"number","value":294,"string":"294"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"FUNDASTORE/APPS/PRODUCTOS/forms.py"},"max_stars_repo_name":{"kind":"string","value":"GabrielB-07/FundaStore-cgb"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b509a9743a651344b32dd7a40ab789f1db48e54b"},"max_stars_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"FUNDASTORE/APPS/PRODUCTOS/forms.py"},"max_issues_repo_name":{"kind":"string","value":"GabrielB-07/FundaStore-cgb"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b509a9743a651344b32dd7a40ab789f1db48e54b"},"max_issues_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"FUNDASTORE/APPS/PRODUCTOS/forms.py"},"max_forks_repo_name":{"kind":"string","value":"GabrielB-07/FundaStore-cgb"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b509a9743a651344b32dd7a40ab789f1db48e54b"},"max_forks_repo_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django import forms\nfrom .models import Producto\n\nclass FormularioProducto(forms.ModelForm):\n class Meta:\n model = Producto\n fields = '__all__'\n labels = {'pro_nombre': 'NOMBRE','pro_precio': 'PRECIO','pro_stock': 'STOCK','pro_descripcion':'DESCRIPCIÖN'}\n \n"},"avg_line_length":{"kind":"number","value":29.4,"string":"29.4"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.6632653061,"string":"0.663265"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":472,"numItemsPerPage":100,"numTotalItems":48262,"offset":47200,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjM4MTUzNiwic3ViIjoiL2RhdGFzZXRzL2Jqb2VybnAvdGhlLXN0YWNrLWRlZHVwLXB5dGhvbi1kZXVfTGF0biIsImV4cCI6MTc1NjM4NTEzNiwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.T0Kaua7SSqmsFBDAZCgawJtmETZ5vt7RhUC02q6GIYV1N8vTJXfC9O-tgllvv_LY4m9kpLr95Dzl4VfF-qRBDw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
hexsha
stringlengths
40
40
size
int64
6
782k
ext
stringclasses
7 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
237
max_stars_repo_name
stringlengths
6
72
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
list
max_stars_count
int64
1
53k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
184
max_issues_repo_name
stringlengths
6
72
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
list
max_issues_count
int64
1
27.1k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
184
max_forks_repo_name
stringlengths
6
72
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
list
max_forks_count
int64
1
12.2k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
6
782k
avg_line_length
float64
2.75
664k
max_line_length
int64
5
782k
alphanum_fraction
float64
0
1
57e11bef0ed4bb5532b0fb2f87ab3defa3ee0f08
17,229
py
Python
python/etc/preprocessing/jisc/jisc_preprocessing.py
sma-h/openapc-de
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
[ "Cube" ]
89
2015-02-13T13:46:06.000Z
2022-03-13T16:42:44.000Z
python/etc/preprocessing/jisc/jisc_preprocessing.py
sma-h/openapc-de
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
[ "Cube" ]
91
2015-03-12T13:31:36.000Z
2022-01-14T07:37:37.000Z
python/etc/preprocessing/jisc/jisc_preprocessing.py
sma-h/openapc-de
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
[ "Cube" ]
138
2015-03-04T15:23:43.000Z
2022-03-09T15:11:52.000Z
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- import argparse import csv import datetime import json from os import path import re import sys from urllib.error import HTTPError, URLError ARG_HELP_STRINGS = { "source_file": "The jisc csv file", "exchange_rates_cache_file": "An optional cache file for ECB exchange rates", "no_decorations": "Do not use ANSI coded colors in console output", "jisc_file_format": "The format type of the Jisc input file" } FIELDNAMES = { "2014_16": { "article": [ "APC paid (actual currency) including VAT if charged", "APC paid (£) including VAT (calculated)", "APC paid (£) including VAT if charged", "Currency of APC", "DOI", "Date of APC payment", "Date of initial application by author", "ISSN0", "Institution", "Journal", "Licence", "PubMed Central (PMC) ID", "PubMed ID", "Publisher", "TCO year", "Type of publication", "Drop?", "Year of publication", "period", "is_hybrid", "euro" ], "book": [ "Line number", "APC paid (actual currency) including VAT if charged", "APC paid (£) including VAT (calculated)", "APC paid (£) including VAT if charged", "Article title", "Currency of APC", "DOI", "Date of APC payment", "Date of initial application by author", "Institution", "Journal", "Licence", "Publisher", "TCO year", "Type of publication", "Year of publication", "period", "euro", "ISBN" ] }, "2017": { "article": [ "APC paid (£) including VAT if charged", "DOI", "Date of APC payment", "ISSN0", "Institution", "Journal", "Licence", "PubMed ID", "Publisher", "TCO year", "Type of publication", "Drop?", "Period of APC payment", "period", "is_hybrid", "euro" ], "book": [ "Line number", "APC paid (£) including VAT if charged", "Article title", "DOI", "Date of APC payment", "Institution", "Journal", "Licence", "Publisher", "TCO year", "Type of publication", "Period of APC payment", "period", "euro", "ISBN" ] }, "2018": { "article": [ "Institution", "Date of acceptance", "PubMed ID", "DOI", "Publisher", "Journal", "Type of publication", "Date of publication", "Date of APC payment", "APC paid (£) including VAT if charged", "period", "is_hybrid", "euro" ], "book": [ "Line number", "Institution", "Date of acceptance", "DOI", "Publisher", "Journal", "Type of publication", "Article title", "Date of publication", "Date of APC payment", "APC paid (£) including VAT if charged", "period", "euro", "ISBN" ] } } PUBLICATION_TYPES_BL = [ "Book chapter", "Book edited", "Conference Paper/Proceeding/Abstract", "Letter" ] PUBLICATION_TYPES_BOOKS = [ "Book", "Monograph" ] DATE_DAY_RE = { "2014_16": re.compile("(?P<year>[0-9]{4})-?(?P<month>[0-9]{2})?-?(?P<day>[0-9]{2})?"), "2017": re.compile("(?P<year>[0-9]{4})-?(?P<month>[0-9]{2})?-?(?P<day>[0-9]{2})?"), "2018": re.compile("(?P<month>[0-9]{1,2})/(?P<day>[0-9]{1,2})/(?P<year>[0-9]{4})") } DATE_STRPTIME = { "2014_16": "%Y-%m-%d", "2017": "%Y-%m-%d", "2018": "%m/%d/%Y" } PERIOD_FIELD_SOURCE = { "2014_16": [ "Date of APC payment", "Year of publication", "Date of initial application by author", "TCO year" ], "2017": [ "Date of APC payment", "Period of APC payment", "TCO year" ], "2018": [ "Date of APC payment", "Date of publication", "Date of acceptance" ] } EXCHANGE_RATES_CACHE = {} EXCHANGE_RATES_CACHE_FILE = None DELETE_REASONS = {} CURRENT_YEAR = 2017 #CURRENT_YEAR = datetime.datetime.now().year NO_DECORATIONS = False def delete_line(line_dict, reason): _print("r", " - " + reason + ", line deleted") if reason not in DELETE_REASONS: DELETE_REASONS[reason] = 1 else: DELETE_REASONS[reason] += 1 for key in line_dict: line_dict[key] = "" def line_as_list(line_dict, pub_type): return [line_dict[field] for field in FIELDNAMES[FORMAT][pub_type]] def is_money_value(string): try: number = float(string) return number > 0 except ValueError: return False def is_valid_date(date_match_obj): gd = date_match_obj.groupdict() if gd["year"] is None or gd["month"] is None or gd["day"] is None: return False try: date = datetime.datetime(int(gd["year"]), int(gd["month"]), int(gd["day"])) if date > datetime.datetime.now(): return False return True except ValueError: return False def shutdown(): _print("r", "Updating exchange rates cache...") with open(EXCHANGE_RATES_CACHE_FILE, "w") as f: f.write(json.dumps(EXCHANGE_RATES_CACHE, sort_keys=True, indent=4, separators=(',', ': '))) f.flush() _print("r", "Done.") sys.exit() def _print(color, s): if color in ["r", "y", "g", "b"] and not NO_DECORATIONS: getattr(oat, "print_" + color)(s) else: print(s) def get_exchange_rate(currency, frequency, date, jisc_format): if currency not in EXCHANGE_RATES_CACHE: EXCHANGE_RATES_CACHE[currency] = {} if frequency not in EXCHANGE_RATES_CACHE[currency]: EXCHANGE_RATES_CACHE[currency][frequency] = {} if not len(EXCHANGE_RATES_CACHE[currency][frequency]): try: rates = oat.get_euro_exchange_rates(currency, frequency) EXCHANGE_RATES_CACHE[currency][frequency] = rates except HTTPError as httpe: _print("r", "HTTPError while querying the ECB data warehouse: " + httpe.reason) shutdown() except URLError as urle: _print("r", "URLError while querying the ECB data warehouse: " + urle.reason) shutdown() except ValueError as ve: _print("r", "ValueError while querying the ECB data warehouse: " + ve.reason) shutdown() if frequency == "D": # The ECB does not report exchange rate for all dates due to weekends/holidays. We have # consider some days in advance to find the next possible data in some cases. day = datetime.datetime.strptime(date, DATE_STRPTIME[jisc_format]) for i in range(6): future_day = day + datetime.timedelta(days=i) search_day = future_day.strftime("%Y-%m-%d") if search_day in EXCHANGE_RATES_CACHE[currency][frequency]: _print("y", " [Exchange rates: Cached value used]") if i > 0: msg = " [Exchange rates: No rate found for date {}, used value for {} instead]" _print("y", msg.format(date, search_day)) return EXCHANGE_RATES_CACHE[currency][frequency][search_day] _print("r", "Error during Exchange rates lookup: No rate for " + date + " or any following day!") shutdown() else: return EXCHANGE_RATES_CACHE[currency][frequency][date] def calculate_euro_value(line, jisc_format): payment_date = line["Date of APC payment"] date_match = DATE_DAY_RE[jisc_format].match(payment_date) if jisc_format in ["2017", "2018"]: apc_pound = line["APC paid (£) including VAT if charged"] field_used_for_pound_value = "APC paid (£) including VAT if charged" elif jisc_format == "2014_16": apc_orig = line["APC paid (actual currency) including VAT if charged"] apc_pound = "" field_used_for_pound_value = "" for field in ["APC paid (£) including VAT (calculated)", "APC paid (£) including VAT if charged"]: if is_money_value(line[field]): apc_pound = line[field] field_used_for_pound_value = field break if is_money_value(apc_orig): currency = line["Currency of APC"].strip() if currency == "EUR": line["euro"] = apc_orig msg = " - Created euro field ('{}') by using the value in 'APC paid (actual currency) including VAT if charged' directly since the currency is EUR" _print("g", msg.format(apc_orig)) elif len(currency) == 3: if date_match and is_valid_date(date_match): rate = get_exchange_rate(currency, "D", payment_date, jisc_format) euro_value = round(float(apc_orig) / float(rate), 2) line["euro"] = str(euro_value) msg = " - Created euro field ('{}') by dividing the value in 'APC paid (actual currency) including VAT if charged' ({}) by {} (EUR -> {} conversion rate on {}) [ECB]" msg = msg.format(euro_value, apc_orig, rate, currency, payment_date) _print("g", msg) else: year = line["period"] if int(year) >= CURRENT_YEAR: del_msg = "period ({}) too recent to determine average yearly conversion rate".format(year) delete_line(line, del_msg) return try: rate = get_exchange_rate(currency, "A", year, jisc_format) except KeyError: _print("r", "KeyError: An average yearly conversion rate is missing (" + currency + ", " + year + ")") shutdown() euro_value = round(float(apc_orig) / float(rate), 2) line["euro"] = str(euro_value) msg = " - Created euro field ('{}') by dividing the value in 'APC paid (actual currency) including VAT if charged' ({}) by {} (avg EUR -> {} conversion rate in {}) [ECB]" msg = msg.format(euro_value, apc_orig, rate, currency, year) _print("g", msg) if line["euro"] == "" and is_money_value(apc_pound): if date_match and is_valid_date(date_match): rate = get_exchange_rate("GBP", "D", payment_date, jisc_format) euro_value = round(float(apc_pound) / float(rate), 2) line["euro"] = str(euro_value) msg = " - Created euro field ('{}') by dividing the value in '{}' ({}) by {} (EUR -> GBP conversion rate on {}) [ECB]" msg = msg.format(euro_value, field_used_for_pound_value, apc_pound, rate, payment_date) _print("g", msg) else: year = line["period"] if int(year) > CURRENT_YEAR: del_msg = "period ({}) too recent to determine average yearly conversion rate".format(year) delete_line(line, del_msg) return try: rate = get_exchange_rate("GBP", "A", year, jisc_format) except KeyError: _print("r", "KeyError: An average yearly conversion rate is missing (GBP, " + year + ")") shutdown() euro_value = round(float(apc_pound) / float(rate), 2) line["euro"] = str(euro_value) msg = " - Created euro field ('{}') by dividing the value in '{}' ({}) by {} (avg EUR -> GBP conversion rate in {}) [ECB]" msg = msg.format(euro_value, field_used_for_pound_value, apc_pound, rate, year) _print("g", msg) if line["euro"] == "": delete_line(line, "Unable to properly calculate a converted euro value") def main(): global EXCHANGE_RATES_CACHE, EXCHANGE_RATES_CACHE_FILE, NO_DECORATIONS, FORMAT parser = argparse.ArgumentParser() parser.add_argument("source_file", help=ARG_HELP_STRINGS["source_file"]) parser.add_argument("jisc_file_format", choices=list(FIELDNAMES), help=ARG_HELP_STRINGS["jisc_file_format"]) parser.add_argument("-c", "--exchange_rates_cache_file", help=ARG_HELP_STRINGS["exchange_rates_cache_file"], default="_exchange_rates_cache.json") parser.add_argument("-n", "--no-decorations", help=ARG_HELP_STRINGS["no_decorations"], action="store_true") args = parser.parse_args() NO_DECORATIONS = args.no_decorations EXCHANGE_RATES_CACHE_FILE = args.exchange_rates_cache_file FORMAT = args.jisc_file_format if path.isfile(args.exchange_rates_cache_file): with open(EXCHANGE_RATES_CACHE_FILE, "r") as f: try: EXCHANGE_RATES_CACHE = json.loads(f.read()) except ValueError: _print("r", "Could not decode a cache structure from " + EXCHANGE_RATES_CACHE_FILE + ", starting with an empty cache.") f = open(args.source_file, "r", encoding="utf-8") reader = csv.DictReader(f) article_content = [list(FIELDNAMES[FORMAT]["article"])] book_content = [list(FIELDNAMES[FORMAT]["book"])] empty_article_line = ["" for i in range(len(FIELDNAMES[FORMAT]["article"]))] empty_book_line = ["" for i in range(len(FIELDNAMES[FORMAT]["book"]))] for line in reader: line["period"] = "" line["euro"] = "" line["Journal"] = line["Journal"].replace("\n", " ") _print("b", "--- Analysing line " + str(reader.line_num) + " ---") is_book = False pub_type = line["Type of publication"] if pub_type in PUBLICATION_TYPES_BOOKS: line["Line number"] = str(reader.line_num) line["ISBN"] = "" is_book = True else: line["is_hybrid"] = "" # Publication blacklist checking if pub_type in PUBLICATION_TYPES_BL and not is_book: delete_line(line, "Blacklisted pub type ('" + pub_type + "')") article_content.append(list(empty_article_line)) continue # DOI checking if len(line["DOI"].strip()) == 0 and not is_book: delete_line(line, "Empty DOI") article_content.append(list(empty_article_line)) continue # Drop checking if "Drop?" in FIELDNAMES[FORMAT]["article"] and line["Drop?"] == "1": delete_line(line, "Drop mark found") article_content.append(list(empty_article_line)) continue # period field generation for source_field in PERIOD_FIELD_SOURCE[FORMAT]: content = line[source_field].strip() match = DATE_DAY_RE[FORMAT].match(content) if match: year = match.groupdict()["year"] if int(year) > CURRENT_YEAR: continue line["period"] = year msg = " - Created period field ('{}') by parsing value '{}' in column '{}'".format(year, content, source_field) _print("g", msg) break else: delete_line(line, "Unable to determine payment date for period column") article_content.append(list(empty_article_line)) continue # euro field generation calculate_euro_value(line, FORMAT) if is_book: if line["Line number"] != "": book_content.append(line_as_list(line, "book")) delete_line(line, "Book content (extracted to separate file)") article_content.append(list(empty_article_line)) else: article_content.append(line_as_list(line, "article")) with open('out.csv', 'w') as out: writer = oat.OpenAPCUnicodeWriter(out, None, False, True) writer.write_rows(article_content) with open('out_books.csv', 'w') as out: writer = oat.OpenAPCUnicodeWriter(out, None, False, True) writer.write_rows(book_content) print("\n\nPreprocessing finished, deleted articles overview:") sorted_reasons = sorted(DELETE_REASONS.items(), key=lambda x: x[1]) sorted_reasons.reverse() for item in sorted_reasons: _print("r", item[0].ljust(72) + str(item[1])) _print("r,", "-------------------------------------------------") _print("r", "Total".ljust(72) + str(sum(DELETE_REASONS.values()))) shutdown() if __name__ == '__main__' and __package__ is None: sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))) import openapc_toolkit as oat main()
38.371938
192
0.555111
57ea82b16b274f5350cda5f8ffb8033e48a01085
1,548
py
Python
methods/transformers/examples/seq2seq/save_len_file.py
INK-USC/RiddleSense
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
[ "MIT" ]
3
2021-07-06T20:02:31.000Z
2022-03-27T13:13:01.000Z
methods/transformers/examples/seq2seq/save_len_file.py
INK-USC/RiddleSense
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
[ "MIT" ]
null
null
null
methods/transformers/examples/seq2seq/save_len_file.py
INK-USC/RiddleSense
a3d57eaf084da9cf6b77692c608e2cd2870fbd97
[ "MIT" ]
null
null
null
#!/usr/bin/env python import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import Seq2SeqDataset, pickle_save def save_len_file( tokenizer_name, data_dir, max_source_length=1024, max_target_length=1024, consider_target=False, **kwargs ): """Save max(src_len, tgt_len) for each example to allow dynamic batching.""" tok = AutoTokenizer.from_pretrained(tokenizer_name) train_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="train", **kwargs) pad = tok.pad_token_id def get_lens(ds): dl = tqdm( DataLoader(ds, batch_size=512, num_workers=8, shuffle=False, collate_fn=ds.collate_fn), desc=str(ds.len_file), ) max_lens = [] for batch in dl: src_lens = batch["input_ids"].ne(pad).sum(1).tolist() tgt_lens = batch["labels"].ne(pad).sum(1).tolist() if consider_target: for src, tgt in zip(src_lens, tgt_lens): max_lens.append(max(src, tgt)) else: max_lens.extend(src_lens) return max_lens train_lens = get_lens(train_ds) val_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="val", **kwargs) val_lens = get_lens(val_ds) pickle_save(train_lens, train_ds.len_file) pickle_save(val_lens, val_ds.len_file) if __name__ == "__main__": fire.Fire(save_len_file)
35.181818
112
0.654393
57ec6408fc157866d0a81c58f4feac352152e619
3,450
py
Python
research/cv/pointnet2/src/pointnet2.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
1
2021-11-18T08:17:44.000Z
2021-11-18T08:17:44.000Z
research/cv/pointnet2/src/pointnet2.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
null
null
null
research/cv/pointnet2/src/pointnet2.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
2
2019-09-01T06:17:04.000Z
2019-10-04T08:39:45.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """network definition""" import mindspore.nn as nn import mindspore.ops as P from mindspore.nn.loss.loss import _Loss from mindspore.ops import functional as F from src.layers import Dense from src.pointnet2_utils import PointNetSetAbstraction class PointNet2(nn.Cell): """PointNet2""" def __init__(self, num_class, normal_channel=False): super(PointNet2, self).__init__() in_channel = 6 if normal_channel else 3 self.normal_channel = normal_channel self.sa1 = PointNetSetAbstraction(npoint=512, radius=0.2, nsample=32, in_channel=in_channel, mlp=[64, 64, 128], group_all=False) self.sa2 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], group_all=False) self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=256 + 3, mlp=[256, 512, 1024], group_all=True) self.fc1 = Dense(1024, 512) self.bn1 = nn.BatchNorm1d(512) self.drop1 = nn.Dropout(0.6) self.fc2 = Dense(512, 256) self.bn2 = nn.BatchNorm1d(256) self.drop2 = nn.Dropout(0.5) self.fc3 = Dense(256, num_class) self.relu = P.ReLU() self.reshape = P.Reshape() self.log_softmax = P.LogSoftmax() self.transpose = P.Transpose() def construct(self, xyz): """ construct method """ if self.normal_channel: norm = self.transpose(xyz[:, :, 3:], (0, 2, 1)) xyz = xyz[:, :, :3] else: norm = None l1_xyz, l1_points = self.sa1(xyz, norm) # [B, 3, 512], [B, 128, 512] l2_xyz, l2_points = self.sa2(l1_xyz, l1_points) # [B, 3, 128], [B, 256, 128] _, l3_points = self.sa3(l2_xyz, l2_points) # [B, 3, 1], [B, 1024, 1] x = self.reshape(l3_points, (-1, 1024)) x = self.drop1(self.relu(self.bn1(self.fc1(x)))) x = self.drop2(self.relu(self.bn2(self.fc2(x)))) x = self.fc3(x) x = self.log_softmax(x) return x class NLLLoss(_Loss): """NLL loss""" def __init__(self, reduction='mean'): super(NLLLoss, self).__init__(reduction) self.one_hot = P.OneHot() self.reduce_sum = P.ReduceSum() def construct(self, logits, label): """ construct method """ label_one_hot = self.one_hot(label, F.shape(logits)[-1], F.scalar_to_array(1.0), F.scalar_to_array(0.0)) loss = self.reduce_sum(-1.0 * logits * label_one_hot, (1,)) return loss
37.5
112
0.576232
a4f4e23a6d735a570fdd38b211528a6cdd0610d9
1,189
py
Python
Problems/Depth-First Search/medium/NumberEnclaves/number_of_enclaves.py
dolong2110/Algorithm-By-Problems-Python
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
[ "MIT" ]
1
2021-08-16T14:52:05.000Z
2021-08-16T14:52:05.000Z
Problems/Depth-First Search/medium/NumberEnclaves/number_of_enclaves.py
dolong2110/Algorithm-By-Problems-Python
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
[ "MIT" ]
null
null
null
Problems/Depth-First Search/medium/NumberEnclaves/number_of_enclaves.py
dolong2110/Algorithm-By-Problems-Python
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
[ "MIT" ]
null
null
null
from typing import List def numEnclaves(self, grid: List[List[int]]) -> int: m, n = len(grid), len(grid[0]) def dfs(row: int, col: int) -> int: if row < 0 or row >= m or col < 0 or col >= n: return - (m * n) if grid[row][col] == 0: return 0 grid[row][col] = 0 top = dfs(row - 1, col) down = dfs(row + 1, col) left = dfs(row, col - 1) right = dfs(row, col + 1) return 1 + top + down + left + right ans = 0 for i in range(m): for j in range(n): if grid[i][j] == 1: enclosed = dfs(i, j) ans += enclosed if enclosed > 0 else 0 return ans # def numEnclaves(self, A: List[List[int]]) -> int: # def dfs(i, j): # A[i][j] = 0 # for x, y in (i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1): # if 0 <= x < m and 0 <= y < n and A[x][y]: # dfs(x, y) # # m, n = len(A), len(A[0]) # for i in range(m): # for j in range(n): # if A[i][j] == 1 and (i == 0 or j == 0 or i == m - 1 or j == n - 1): # dfs(i, j) # return sum(sum(row) for row in A)
27.022727
81
0.417998
3527e785eb348ee6938e6cb836b183317a0e5f7c
3,688
py
Python
plugins/tff_backend/bizz/rogerthat.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
null
null
null
plugins/tff_backend/bizz/rogerthat.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
178
2017-08-02T12:58:06.000Z
2017-12-20T15:01:12.000Z
plugins/tff_backend/bizz/rogerthat.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
2
2018-01-10T10:43:12.000Z
2018-03-18T10:42:23.000Z
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ import json import logging from google.appengine.api import users from mcfw.rpc import arguments, returns from plugins.rogerthat_api.api import system, messaging, RogerthatApiException from plugins.rogerthat_api.to import MemberTO from plugins.rogerthat_api.to.messaging import Message, AnswerTO from plugins.rogerthat_api.to.messaging.service_callback_results import TYPE_FLOW, FlowCallbackResultTypeTO, \ FlowMemberResultCallbackResultTO from plugins.tff_backend.bizz import get_tf_token_api_key from plugins.tff_backend.bizz.service import get_main_branding_hash from plugins.tff_backend.plugin_consts import FLOW_ERROR_MESSAGE from plugins.tff_backend.utils.app import get_app_user_tuple def put_user_data(api_key, user_email, app_id, updated_user_data, retry=True): # type: (unicode, unicode, unicode, dict, bool) -> None try: system.put_user_data(api_key, user_email, app_id, updated_user_data) except RogerthatApiException as e: if retry and e.code == 60011: # user not in friend list raise Exception(e.message) # ensure task is retried raise @returns(unicode) @arguments(member=MemberTO, message=unicode, answers=(None, [AnswerTO]), flags=(int, long), api_key=unicode) def send_rogerthat_message(member, message, answers=None, flags=None, api_key=None): # type: (MemberTO, unicode, list[AnswerTO], int, unicode) -> unicode flags = flags if flags is not None else Message.FLAG_AUTO_LOCK if not answers: flags = flags | Message.FLAG_ALLOW_DISMISS answers = [] return messaging.send(api_key=api_key or get_tf_token_api_key(), parent_message_key=None, members=[member], message=message, answers=answers or [], flags=flags, alert_flags=Message.ALERT_FLAG_VIBRATE, branding=get_main_branding_hash(), tag=None) @returns(unicode) @arguments(member=(MemberTO, users.User), flow=unicode) def send_rogerthat_flow(member, flow): if isinstance(member, users.User): human_user, app_id = get_app_user_tuple(member) member = MemberTO(member=human_user.email(), app_id=app_id, alert_flags=Message.ALERT_FLAG_VIBRATE) messaging.start_local_flow(api_key=get_tf_token_api_key(), xml=None, members=[member], flow=flow) def create_error_message(message=None): logging.debug('Sending error message') if not message: message = u'Oh no! An error occurred.\nHow embarrassing :-(\n\nPlease try again later.' result = FlowCallbackResultTypeTO(flow=FLOW_ERROR_MESSAGE, tag=None, force_language=None, flow_params=json.dumps({'message': message})) return FlowMemberResultCallbackResultTO(type=TYPE_FLOW, value=result)
43.388235
110
0.68167
1077af5d072f08e925497a9eb5ae52458f753edd
1,830
py
Python
challenge/Thiemo/test.py
florianletsch/kinect-juggling
f320cc0b55adf65d338d25986a03106a7e3f46ef
[ "Unlicense", "MIT" ]
7
2015-11-27T09:53:32.000Z
2021-01-13T17:35:54.000Z
challenge/Thiemo/test.py
florianletsch/kinect-juggling
f320cc0b55adf65d338d25986a03106a7e3f46ef
[ "Unlicense", "MIT" ]
null
null
null
challenge/Thiemo/test.py
florianletsch/kinect-juggling
f320cc0b55adf65d338d25986a03106a7e3f46ef
[ "Unlicense", "MIT" ]
null
null
null
import timeit tests = [ ('Summierung uint8', ''' import pyximport; import numpy as np pyximport.install(setup_args={'include_dirs': np.get_include()}) import summierungInt as alg a = np.random.random_integers(0,255, %(dim)s).astype(np.uint8) b = np.random.random_integers(0,255, %(dim)s).astype(np.uint8) ''', 'alg.sum(a, b)'), ('Summierung float32', ''' import pyximport; import numpy as np pyximport.install(setup_args={'include_dirs': np.get_include()}) import summierungFloat as alg a = np.random.random_integers(0,255, %(dim)s).astype(np.float32) b = np.random.random_integers(0,255, %(dim)s).astype(np.float32) ''', 'alg.sum(a, b)'), ('Schwellenwert uint8', ''' import pyximport; import numpy as np pyximport.install(setup_args={'include_dirs': np.get_include()}) import schwellenwertInt as alg a = np.random.random_integers(0,255, %(dim)s).astype(np.uint8) ''', 'alg.scalar(a, 125)'), ('Schwellenwert float32', ''' import pyximport; import numpy as np pyximport.install(setup_args={'include_dirs': np.get_include()}) import schwellenwertFloat as alg a = np.random.random_integers(0,255, %(dim)s).astype(np.float32) ''', 'alg.scalar(a, 125.0)'), ('Histogramm uint8', ''' import pyximport; import numpy as np pyximport.install(setup_args={'include_dirs': np.get_include()}) import histogrammInt as alg a = np.random.random_integers(0,255, %(dim)s).astype(np.uint8) ''', 'alg.histogramm(a)'), ('Histogramm float32', ''' import pyximport; import numpy as np pyximport.install(setup_args={'include_dirs': np.get_include()}) import histogrammFloat as alg a = np.random.random_integers(0,255, %(dim)s).astype(np.float32) ''', 'alg.histogramm(a)') ] for name, setup, cmd in tests: for dim in ('(204,204)','(409,409)'): print '%s %s: %f' % (name, dim, timeit.timeit(cmd, setup=setup % {'dim' : dim}, number=100))
26.142857
100
0.708743
dc47572676cafedf0e1f868c7160abfa4617d015
4,013
py
Python
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
linuxonly801/awesome-DeepLearning
b063757fa130c4d56aea5cce2e592610f1e169f9
[ "Apache-2.0" ]
1
2022-01-12T06:52:43.000Z
2022-01-12T06:52:43.000Z
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
linuxonly801/awesome-DeepLearning
b063757fa130c4d56aea5cce2e592610f1e169f9
[ "Apache-2.0" ]
null
null
null
Paddle_Industry_Practice_Sample_Library/nlp_projects/nlu/bilstm_with_crf/data.py
linuxonly801/awesome-DeepLearning
b063757fa130c4d56aea5cce2e592610f1e169f9
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import json from collections import OrderedDict from paddle.io import Dataset import numpy as np class ATISDataset(Dataset): def __init__(self, path, vocab_path, intent_path, slot_path): self.examples = self.load_data(path) self.token2id, self.id2token = self.load_dict(vocab_path) self.intent2id, self.id2intent = self.load_dict(intent_path) self.slot2id, self.id2slot = self.load_dict(slot_path) def __getitem__(self, idx): example = self.examples[idx] tokens, tags, intent = self.convert_example_to_id(example) return np.array(tokens), np.array(tags), intent, len(tokens) def __len__(self): return len(self.examples) @property def vocab_size(self): return len(self.token2id) @property def num_intents(self): return len(self.intent2id) @property def num_slots(self): return len(self.slot2id) def convert_example_to_id(self, example): tokens = example["text"].split() tags = example["tag"].split() intent = example["intent"] assert len(tokens) == len(tags) tokens = [self.token2id.get(token, "[unk]") for token in tokens] tags = [self.slot2id.get(tag, "O") for tag in tags] intent = self.intent2id[intent] return tokens, tags, intent def load_dict(self, dict_path): with open(dict_path, "r", encoding="utf-8") as f: words = [word.strip() for word in f.readlines()] dict2id = dict(zip(words, range(len(words)))) id2dict = {v:k for k,v in dict2id.items()} return dict2id, id2dict def _split_with_id(self, text, start=0): word2sid = OrderedDict() word = "" count = 0 for i in range(len(text)): if text[i] == " ": continue else: word += text[i] if (i < len(text) - 1 and text[i + 1] == " ") or i == len(text) - 1: # get whole word key = str(i - len(word) + 1 + start) + "_" + str(i + start) + "_" + word word2sid[key] = count count += 1 word = "" return word2sid def load_data(self, path): examples = [] raw_examples = [] with open(path, "r", encoding="utf-8") as f: for example in f.readlines(): raw_examples.append(json.loads(example)) for raw_example in raw_examples: example = {} example["text"] = raw_example["text"] example["intent"] = raw_example["intent"] splited_text = raw_example["text"].split() tags = ['O'] * len(splited_text) word2sid = self._split_with_id(raw_example["text"]) for entity in raw_example["entities"]: start, end, value, entity_name = entity["start"], entity["end"] - 1, entity["value"], entity["entity"] entity2sid = self._split_with_id(value, start=start) for i, word in enumerate(entity2sid.keys()): if i == 0: tags[word2sid[word]] = "B-" + entity_name else: tags[word2sid[word]] = "I-" + entity_name example["tag"] = " ".join(tags) examples.append(example) return examples
34.299145
118
0.585098
10f85d1081fec9c0fe3b1cde987d69b3cccacd2d
1,707
py
Python
Openharmony v1.0/vendor/hisi/hi35xx/third_party/uboot/tools/binman/image_test.py
clkbit123/TheOpenHarmony
0e6bcd9dee9f1a2481d762966b8bbd24baad6159
[ "MIT" ]
1
2022-02-15T08:51:55.000Z
2022-02-15T08:51:55.000Z
hihope_neptune-oh_hid/00_src/v0.1/device/hisilicon/third_party/uboot/u-boot-2020.01/tools/binman/image_test.py
dawmlight/vendor_oh_fun
bc9fb50920f06cd4c27399f60076f5793043c77d
[ "Apache-2.0" ]
null
null
null
hihope_neptune-oh_hid/00_src/v0.1/device/hisilicon/third_party/uboot/u-boot-2020.01/tools/binman/image_test.py
dawmlight/vendor_oh_fun
bc9fb50920f06cd4c27399f60076f5793043c77d
[ "Apache-2.0" ]
null
null
null
# SPDX-License-Identifier: GPL-2.0+ # Copyright (c) 2017 Google, Inc # Written by Simon Glass <[email protected]> # # Test for the image module import unittest from image import Image from test_util import capture_sys_output class TestImage(unittest.TestCase): def testInvalidFormat(self): image = Image('name', 'node', test=True) with self.assertRaises(ValueError) as e: image.LookupSymbol('_binman_something_prop_', False, 'msg', 0) self.assertIn( "msg: Symbol '_binman_something_prop_' has invalid format", str(e.exception)) def testMissingSymbol(self): image = Image('name', 'node', test=True) image._entries = {} with self.assertRaises(ValueError) as e: image.LookupSymbol('_binman_type_prop_pname', False, 'msg', 0) self.assertIn("msg: Entry 'type' not found in list ()", str(e.exception)) def testMissingSymbolOptional(self): image = Image('name', 'node', test=True) image._entries = {} with capture_sys_output() as (stdout, stderr): val = image.LookupSymbol('_binman_type_prop_pname', True, 'msg', 0) self.assertEqual(val, None) self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n", stderr.getvalue()) self.assertEqual('', stdout.getvalue()) def testBadProperty(self): image = Image('name', 'node', test=True) image._entries = {'u-boot': 1} with self.assertRaises(ValueError) as e: image.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg', 0) self.assertIn("msg: No such property 'bad", str(e.exception))
37.933333
79
0.627417
337a7618e5b3ffaa46eaf54af32dd998cfb7e5e2
9,642
py
Python
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/plugins/mapmatching/wxgui-02_stillbrokesave.py
uruzahe/carla
940c2ab23cce1eda1ef66de35f66b42d40865fb1
[ "MIT" ]
4
2020-11-13T02:35:56.000Z
2021-03-29T20:15:54.000Z
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/plugins/mapmatching/wxgui-02_stillbrokesave.py
uruzahe/carla
940c2ab23cce1eda1ef66de35f66b42d40865fb1
[ "MIT" ]
9
2020-12-09T02:12:39.000Z
2021-02-18T00:15:28.000Z
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/plugins/mapmatching/wxgui-02_stillbrokesave.py
uruzahe/carla
940c2ab23cce1eda1ef66de35f66b42d40865fb1
[ "MIT" ]
1
2020-11-20T19:31:26.000Z
2020-11-20T19:31:26.000Z
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo # Copyright (C) 2016-2020 German Aerospace Center (DLR) and others. # SUMOPy module # Copyright (C) 2012-2017 University of Bologna - DICAM # This program and the accompanying materials are made available under the # terms of the Eclipse Public License 2.0 which is available at # https://www.eclipse.org/legal/epl-2.0/ # This Source Code may also be made available under the following Secondary # Licenses when the conditions for such availability set forth in the Eclipse # Public License 2.0 are satisfied: GNU General Public License, version 2 # or later which is available at # https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html # SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later # @file wxgui-02_stillbrokesave.py # @author Joerg Schweizer # @date import os import wx import numpy as np from agilepy.lib_wx.modulegui import ModuleGui from agilepy.lib_wx.ogleditor import * from agilepy.lib_base.processes import Process from agilepy.lib_wx.processdialog import ProcessDialog from coremodules.network import routing from coremodules.demand import demand import mapmatching class WxGui(ModuleGui): """Contains functions that communicate between the widgets of the main wx gui and the functions of the plugin. """ def __init__(self, ident): self._mapmatching = None self._scenario = None self._init_common(ident, priority=100001, icondirpath=os.path.join(os.path.dirname(__file__), 'images')) def get_module(self): return self._mapmatching def get_scenario(self): return self._mainframe.get_modulegui('coremodules.scenario').get_scenario() def get_neteditor(self): return self._mainframe.get_modulegui('coremodules.network').get_neteditor() def init_widgets(self, mainframe): """ Set mainframe and initialize widgets to various places. """ self._mainframe = mainframe #self._neteditor = mainframe.add_view("Network", Neteditor) # mainframe.browse_obj(self._module) self.make_menu() self.make_toolbar() def refresh_widgets(self): """ Check through mainframe what the state of the application is and reset widgets. For exampe enable/disable widgets dependent on the availability of data. """ scenario = self.get_scenario() # print 'demand refresh_widgets',scenario.net is_refresh = False if self._scenario != scenario: del self._scenario del self._mapmatching self._scenario = scenario self._mapmatching = mapmatching.Mapmatching('mapmatching', self._scenario) #self._mapmatching = self._demand.add_demandobject(ident = 'mapmatching', DemandClass = mapmatching.Mapmatching) is_refresh = True def make_menu(self): menubar = self._mainframe.menubar menubar.append_menu('plugins/mapmatching', bitmap=self.get_icon("icon_gps.png"), ) menubar.append_item('plugins/mapmatching/browse', self.on_browse, # common function in modulegui info='View and browse mapmatching in object panel.', bitmap=self.get_agileicon('icon_browse_24px.png'), # , ) menubar.append_menu('plugins/mapmatching/import', bitmap=self.get_agileicon("Document_Import_24px.png"), ) menubar.append_item('plugins/mapmatching/import/European cycling challange...', self.on_import_ecc, info=self.on_import_ecc.__doc__.strip(), bitmap=self.get_agileicon("Document_Import_24px.png"), ) menubar.append_item('plugins/mapmatching/project points', self.on_project_points, bitmap=wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_MENU), ) menubar.append_item('plugins/mapmatching/safe as...', self.on_save_as, info='Save all mapmatching data in a new Python binary file.', bitmap=wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE_AS, wx.ART_MENU), ) menubar.append_item('plugins/mapmatching/open...', self.on_open, info='Open previousely saved mapmatching data from a Python binary file.', bitmap=wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_MENU), ) def on_import_ecc(self, event=None): """ Import and filter data from a European cycling challange. """ p = mapmatching.EccTracesImporter(self._mapmatching, logger=self._mainframe.get_logger()) dlg = ProcessDialog(self._mainframe, p, immediate_apply=True) dlg.CenterOnScreen() # this does not return until the dialog is closed. val = dlg.ShowModal() # print ' val,val == wx.ID_OK',val,wx.ID_OK,wx.ID_CANCEL,val == wx.ID_CANCEL # print ' status =',dlg.get_status() if dlg.get_status() != 'success': # val == wx.ID_CANCEL: # print ">>>>>>>>>Unsuccessful\n" dlg.Destroy() if dlg.get_status() == 'success': # print ">>>>>>>>>successful\n" # apply current widget values to scenario instance dlg.apply() dlg.Destroy() self._mainframe.browse_obj(self._mapmatching.trips) def on_project_points(self, event=None): self._mapmatching.points.project() self._mainframe.browse_obj(self._mapmatching.points) if event: event.Skip() def on_browse(self, event=None): self._mainframe.browse_obj(self._mapmatching) if event: event.Skip() def on_save_as(self, event=None): if self._mapmatching is None: return scenario = self.get_scenario() wildcards_all = "All files (*.*)|*.*" wildcards_obj = "Python binary result files (*.mmatch.obj)|*.mmatch.obj|Python binary files (*.obj)|*.obj" wildcards = wildcards_obj+"|"+wildcards_all # Finally, if the directory is changed in the process of getting files, this # dialog is set up to change the current working directory to the path chosen. dlg = wx.FileDialog( self._mainframe, message="Save mapmatching to file", defaultDir=scenario.get_workdirpath(), defaultFile=scenario.get_rootfilepath()+'.mmatch.obj', wildcard=wildcards, style=wx.SAVE | wx.CHANGE_DIR ) val = dlg.ShowModal() # Show the dialog and retrieve the user response. If it is the OK response, # process the data. if val == wx.ID_OK: # This returns a Python list of files that were selected. filepath = dlg.GetPath() if len(filepath) > 0: # now set new filename and workdir self._mapmatching.save(filepath) # Destroy the dialog. Don't do this until you are done with it! # BAD things can happen otherwise! dlg.Destroy() def on_open(self, event=None): wildcards_all = "All files (*.*)|*.*" wildcards_obj = "Python binary mapmatching files (*.mmatch.obj)|*.mmatch.obj|Python binary files (*.obj)|*.obj" wildcards = wildcards_obj+"|"+wildcards_all # Finally, if the directory is changed in the process of getting files, this # dialog is set up to change the current working directory to the path chosen. dlg = wx.FileDialog( self._mainframe, message="Open mapmatching file", defaultDir=self.get_scenario().get_workdirpath(), #defaultFile = os.path.join(scenario.get_workdirpath(), scenario.format_ident()+'.obj'), wildcard=wildcards, style=wx.OPEN | wx.CHANGE_DIR ) # Show the dialog and retrieve the user response. If it is the OK response, # process the data. is_new = False if dlg.ShowModal() == wx.ID_OK: # This returns a Python list of files that were selected. filepath = dlg.GetPath() if len(filepath) > 0: if self._mapmatching is not None: # browse away from results # self._mainframe.browse_obj(self._results.get_scenario()) del self._mapmatching self._mapmatching = mapmatching.load_mapmatching(filepath, self.get_scenario(), logger=self._mainframe.get_logger() ) is_new = True # Destroy the dialog. Don't do this until you are done with it! # BAD things can happen otherwise! dlg.Destroy() if is_new: # this should update all widgets for the new scenario!! # print 'call self._mainframe.refresh_moduleguis()' self._mainframe.browse_obj(self._mapmatching) self._mainframe.select_view(name="Network") # !!!!!!!!tricky, crashes without self.refresh_widgets()
41.560345
124
0.601846
1d3beb1500f67cee98b3bd4f352519dd91475e5e
1,159
py
Python
sso-db/ssodb/common/models/service_model.py
faical-yannick-congo/sso-backend
e962006b0fecd68e4da94e54b4dc63547a5a2c21
[ "MIT" ]
null
null
null
sso-db/ssodb/common/models/service_model.py
faical-yannick-congo/sso-backend
e962006b0fecd68e4da94e54b4dc63547a5a2c21
[ "MIT" ]
null
null
null
sso-db/ssodb/common/models/service_model.py
faical-yannick-congo/sso-backend
e962006b0fecd68e4da94e54b4dc63547a5a2c21
[ "MIT" ]
null
null
null
import datetime from ..core import db import json from bson import ObjectId class Service(db.Document): created_at = db.StringField(default=str(datetime.datetime.utcnow())) updated_at = db.StringField(default=str(datetime.datetime.utcnow())) name = db.StringField(required=True, unique=True) host = db.StringField() possible_status = ["active", "innactive"] status = db.StringField(default="innactive", choices=possible_status) menu_endpoint = db.StringField() # Endpoint that take care of providing the sms menu to the service. def save(self, *args, **kwargs): self.updated_at = str(datetime.datetime.utcnow()) self.day = str(datetime.date.today().isoformat()) return super(Service, self).save(*args, **kwargs) def info(self): data = {'updated-at':str(self.updated_at), 'id':str(self.id), 'created-at':str(self.created_at), 'status':self.status, 'name':self.name, 'host':self.host, 'menu-endpoint':self.menu_endpoint} return data def to_json(self): data = self.info() return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
39.965517
104
0.672994
89208cce1c70cc78da035c74743a23606b906167
2,660
py
Python
examples/relationship/manytoonefield/models.py
zhengtong0898/django-decode
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
[ "MIT" ]
5
2020-07-14T07:48:10.000Z
2021-12-20T21:20:10.000Z
examples/relationship/manytoonefield/models.py
zhengtong0898/django-decode
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
[ "MIT" ]
7
2021-03-26T03:13:38.000Z
2022-03-12T00:42:03.000Z
examples/relationship/manytoonefield/models.py
zhengtong0898/django-decode
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
[ "MIT" ]
1
2021-02-16T07:04:25.000Z
2021-02-16T07:04:25.000Z
from django.db import models # CREATE TABLE `manytoonefield_reporter` ( -- Django 建表语句 # `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, # `first_name` varchar(30) NOT NULL, # `last_name` varchar(30) NOT NULL, # `email` varchar(254) NOT NULL # ); # # # CREATE TABLE `manytoonefield_reporter` ( # `id` int(11) NOT NULL AUTO_INCREMENT, -- django自动补充该字段, 自增ID # `first_name` varchar(30) NOT NULL, # `last_name` varchar(30) NOT NULL, # `email` varchar(254) NOT NULL, # PRIMARY KEY (`id`) -- 主键(聚集索引) # ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; class Reporter(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) email = models.EmailField() # CREATE TABLE `manytoonefield_article` ( -- Django 建表语句 # `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, # `headline` varchar(100) NOT NULL, # `pub_date` date NOT NULL, # `reporter_id` integer NOT NULL # ); # # ALTER TABLE `manytoonefield_article` # ADD CONSTRAINT `manytoonefield_artic_reporter_id_01692140_fk_manytoone` FOREIGN KEY (`reporter_id`) # REFERENCES `manytoonefield_reporter` (`id`) -- Django 添加外键语句 # # # Question: 辅助索引与外键有什么区别? # https://stackoverflow.com/questions/1732467/what-is-the-difference-between-an-index-and-a-foreign-key # 辅助索引的内部是一个b+树的数据结构, 对于随机查询起到加速的作用. # 外键仅仅是对只想其他表的主键. # CREATE TABLE `manytoonefield_article` ( # `id` int(11) NOT NULL AUTO_INCREMENT, -- django自动补充该字段, 自增ID # `headline` varchar(100) NOT NULL, # `pub_date` date NOT NULL, # `reporter_id` int(11) NOT NULL, # PRIMARY KEY (`id`), -- 主键(聚集索引) # KEY `manytoonefield_artic_reporter_id_01692140_fk_manytoone` (`reporter_id`), -- 辅助索引 # CONSTRAINT `manytoonefield_artic_reporter_id_01692140_fk_manytoone` FOREIGN KEY (`reporter_id`) \ # REFERENCES `manytoonefield_reporter` (`id`) -- 外键索引 # ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateField() reporter = models.ForeignKey(Reporter, on_delete=models.CASCADE) # Many-to-one
48.363636
117
0.566165
89ac690609f1e3d0a1501b1e64be3237dc585594
92
py
Python
2014/07/men-life-gap/graphic_config.py
nprapps/graphics-archive
97b0ef326b46a959df930f5522d325e537f7a655
[ "FSFAP" ]
14
2015-05-08T13:41:51.000Z
2021-02-24T12:34:55.000Z
2014/07/men-life-gap/graphic_config.py
nprapps/graphics-archive
97b0ef326b46a959df930f5522d325e537f7a655
[ "FSFAP" ]
null
null
null
2014/07/men-life-gap/graphic_config.py
nprapps/graphics-archive
97b0ef326b46a959df930f5522d325e537f7a655
[ "FSFAP" ]
7
2015-04-04T04:45:54.000Z
2021-02-18T11:12:48.000Z
#!/usr/bin/env python COPY_GOOGLE_DOC_KEY = '16FywAcZFB7xGfCtrHMWmUkidEV8ikFwMgSgJf5LWxxI'
23
68
0.847826
7fa1485d45de4609f2ca861fc3816ac0c76217b5
1,723
py
Python
scripts/component_graph/server/query/query_handler.py
opensource-assist/fuschia
66646c55b3d0b36aae90a4b6706b87f1a6261935
[ "BSD-3-Clause" ]
null
null
null
scripts/component_graph/server/query/query_handler.py
opensource-assist/fuschia
66646c55b3d0b36aae90a4b6706b87f1a6261935
[ "BSD-3-Clause" ]
null
null
null
scripts/component_graph/server/query/query_handler.py
opensource-assist/fuschia
66646c55b3d0b36aae90a4b6706b87f1a6261935
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # Copyright 2019 The Fuchsia Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """QueryHandler services all graph and package queries. The QueryHandler is the business logic layer between the PackageManager and the ComponentGraphGenerator. It is responsible for processing the data provided by the PackageManager and forwarding it through for graph generation. """ import sys import json from server.util.logging import get_logger from server.graph import ComponentGraphGenerator class ComponentQueryError(Exception): """Raised when an unrecoverable query exception occurs""" pass class QueryHandler: """ Core handler to respond to different queries """ def __init__(self, package_manager): """ Verifies the package manager is online. """ self.logger = get_logger(__name__) self.package_manager = package_manager self.graph_generator = ComponentGraphGenerator() if not self.package_manager.ping(): self.logger.error( "Failed to connect to package manager please run fx serve.") sys.exit(1) def services(self, packages): """ Returns the list of service to component url mappings """ return self.package_manager.get_services(packages) def packages(self): """ Returns a list of available packages """ return self.package_manager.get_packages() def component_graph(self): """ Returns the component graph that shows all component connections """ packages = self.packages() return self.graph_generator.generate(packages, self.services(packages)).export()
36.659574
88
0.720255
12062a73da81e781e475f26cc8fd1813ec241c8a
1,908
py
Python
tests/rbac/common/addresser/addresser_test.py
akgunkel/sawtooth-next-directory
a88833033ab30e9091479a38947f04c5e396ca46
[ "Apache-2.0" ]
null
null
null
tests/rbac/common/addresser/addresser_test.py
akgunkel/sawtooth-next-directory
a88833033ab30e9091479a38947f04c5e396ca46
[ "Apache-2.0" ]
1
2019-07-08T22:32:43.000Z
2019-07-08T22:32:43.000Z
tests/rbac/common/addresser/addresser_test.py
akgunkel/sawtooth-next-directory
a88833033ab30e9091479a38947f04c5e396ca46
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Contributors to Hyperledger Sawtooth # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----------------------------------------------------------------------------- """Test Addresser""" import logging import pytest from rbac.common import addresser from tests.rbac.common.assertions import TestAssertions LOGGER = logging.getLogger(__name__) @pytest.mark.addressing @pytest.mark.library class TestAddresser(TestAssertions): """Test Addresser""" def test_family_props(self): """Test the addresser family has the expected properties""" self.assertIsInstance(addresser.family.name, str) self.assertIsInstance(addresser.family.version, str) self.assertIsInstance(addresser.family.pattern.pattern, str) def test_unique_id(self): """Test unique_id returns unique identifiers""" unique_id1 = addresser.role.unique_id() unique_id2 = addresser.role.unique_id() self.assertIsIdentifier(unique_id1) self.assertIsIdentifier(unique_id2) self.assertNotEqual(unique_id1, unique_id2) def test_hash(self): """Test hash returns unique identifiers""" hash1 = addresser.role.hash(addresser.role.unique_id()) hash2 = addresser.role.hash(addresser.role.unique_id()) self.assertIsIdentifier(hash1) self.assertIsIdentifier(hash2) self.assertNotEqual(hash1, hash2)
36
79
0.70283
126022b5d66c9c3625d207e1cae9dbfb33a68a04
2,702
py
Python
tests/google/test_primary_google_service_account.py
scottyellis/fence
012ba76a58853169e9ee8e3f44a0dc510f4b2543
[ "Apache-2.0" ]
31
2018-01-05T22:49:33.000Z
2022-02-02T10:30:23.000Z
tests/google/test_primary_google_service_account.py
scottyellis/fence
012ba76a58853169e9ee8e3f44a0dc510f4b2543
[ "Apache-2.0" ]
737
2017-12-11T17:42:11.000Z
2022-03-29T22:42:52.000Z
tests/google/test_primary_google_service_account.py
scottyellis/fence
012ba76a58853169e9ee8e3f44a0dc510f4b2543
[ "Apache-2.0" ]
46
2018-02-23T09:04:23.000Z
2022-02-09T18:29:51.000Z
import pytest from unittest.mock import MagicMock, patch class MockResponse: def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code def json(self): return self.json_data def test_primary_google_service_account_valid( client, app, db_session, encoded_jwt_google_data_access, primary_google_service_account_google, ): """ Test that given valid credentials, the endpoint responds with the user's primary google SA in the response and it matches the mocked value setup in the fixture """ encoded_creds_jwt = encoded_jwt_google_data_access["jwt"] mock = primary_google_service_account_google["get_or_create_service_account_mock"] email = primary_google_service_account_google["email"] response = client.post( "/google/primary_google_service_account", headers={"Authorization": "Bearer " + encoded_creds_jwt}, content_type="application/json", ) assert response.status_code == 200 assert response.json.get("primary_google_service_account") == email def test_primary_google_service_account_invalid( client, app, db_session, encoded_jwt_service_accounts_access, primary_google_service_account_google, ): """ Test that given invalid credentials (e.g. doesn't have the right scope), this endpoint responds with an HTTP error code and no data NOTE: encoded_jwt_service_accounts_access does not have the expected claim in the mocked token. """ encoded_creds_jwt = encoded_jwt_service_accounts_access["jwt"] mock = primary_google_service_account_google["get_or_create_service_account_mock"] email = primary_google_service_account_google["email"] response = client.post( "/google/primary_google_service_account", headers={"Authorization": "Bearer " + encoded_creds_jwt}, content_type="application/json", ) assert response.status_code == 401 assert not (response.json or {}).get("primary_google_service_account") def test_primary_google_service_account_no_creds( client, app, db_session, primary_google_service_account_google, ): """ Test that given no creds, this endpoint responds with an HTTP error code and no data """ mock = primary_google_service_account_google["get_or_create_service_account_mock"] email = primary_google_service_account_google["email"] response = client.post( "/google/primary_google_service_account", content_type="application/json", ) assert response.status_code == 401 assert not (response.json or {}).get("primary_google_service_account")
32.554217
88
0.734641
c3e71626d20fc18a6728e7e99e05a5d7cf3045ab
3,380
py
Python
Uebung4/Uebung4_Aufgabe12.py
B0mM3L6000/EiP
f68718f95a2d3cde8ead62b6134ac1b5068881a5
[ "MIT" ]
1
2018-04-18T19:10:06.000Z
2018-04-18T19:10:06.000Z
Uebung4/Uebung4_Aufgabe12.py
B0mM3L6000/EiP
f68718f95a2d3cde8ead62b6134ac1b5068881a5
[ "MIT" ]
null
null
null
Uebung4/Uebung4_Aufgabe12.py
B0mM3L6000/EiP
f68718f95a2d3cde8ead62b6134ac1b5068881a5
[ "MIT" ]
1
2018-04-29T08:48:00.000Z
2018-04-29T08:48:00.000Z
# import: import random # die Liste der gezogenen Lottozahlen (sauce header) picks = list() # code: #liste der zur auswahl stehenden zahlen (hier 1-49) zahlen = list(range(1,50)) #anzahl der ziehungen (hier 6 aus 49) anzahl = 6 #ziehungen: jeweils eine zahl ziehen und zu picks hinzufügen und aus zahlen entfernen um doppelte zu vermeiden for i in range(anzahl): j = random.randint(0, len(zahlen)-i-1) picks.append(zahlen[j]) zahlen.remove(zahlen[j]) #sortieren picks.sort() #Sauce Output: print("#test if 6 different numbers were drawn") if(len(set(picks)) == 6): print("all picks are unique") else: print("at least two picks are the same") print("your picks:", picks) print("#test if all picks are in range from 1 to 49") if(all(k in range(1,50) for k in picks)): print("picks in valid range") else: print("picks are not in valid range") print("your picks:", picks) print("#test if picks are sorted") if(sorted(picks) == picks): print("picks are sorted") else: print("picks are not sorted!") print("your picks:", picks) """ Aufgabe 12.1: Da es sich um eine Zeihung ohne Zurücklegen handelt, ist die Wahrscheinlichkeit jeder Permutation gleich. """ """ # Aufgabe 12.3: getippt = False while getippt == False: tipp = [int(i) for i in (input("Tippe 6 Zahlen:").split())] if (len(set(tipp)) == 6) & (all(k in range(1,50) for k in picks)): getippt = True else: print("Bitte genau 6 verschiedene Zahlen zwischen 1 und 49 tippen.") tipp.sort() tippset = set(tipp) picksset = set(picks) treffer = list(picksset.intersection(tippset)) count_treffer = len(treffer) print("Folgende Zahlen sind richtig:",treffer) print("Du hast", count_treffer, "Richtige!") """ """ # Aufgabe 12.4: #Simuliere random tipps bis sie mit den 6 rnd picks von lotto übereinstimmen # x-mal wiederholen um Mittelwert zu erhalten x = int(input("Wieviele Durchgaenge fuer den Mittelwert?")) #durchgangszähler zaehler = 0 for _ in range(x): richtig = False while richtig == False: #für die picks aus aufgabenteil 2: picks = list() zahlen = list(range(1,50)) anzahl = 6 for i in range(anzahl): j = random.randint(0, len(zahlen)-i-1) picks.append(zahlen[j]) zahlen.remove(zahlen[j]) picks.sort() #für tipps: tipps = list() zahlen = list(range(1,50)) for i in range(anzahl): j = random.randint(0, len(zahlen)-i-1) tipps.append(zahlen[j]) zahlen.remove(zahlen[j]) tipps.sort() #überprüfen ob richtig getippt: tippsset = set(tipps) picksset = set(picks) treffer = list(picksset.intersection(tippsset)) count_treffer = len(treffer) if count_treffer == 6: richtig = True zaehler += 1 mittel = zaehler/x print(mittel) # Ergebnis mit x = 1: Im Mittel braucht man 10011063.0 Tipps um 6 Richtige zu erhalten. # Ergebnis mit x = 10: Im Mittel braucht man 11212583.5 Tipps um 6 Richtige zu erhalten. # Zu aufwendig für mehr. #Mathematisch: # Mögliche Permutationen der Ziehung: (49 über 6) = 49!/(6!*43!) = 13 983 816 # Mögliche Tipps die dazu passen: (6 über 6) = 6!/6! = 1 # => P(6 Richtige) = 1/13983816 bzw im Mitel braucht man 13 983 816 Versuche für 6 Richtige. """
24.316547
110
0.644083
7f247104a82e8fad0e1f63f31b0d8269e65797f9
2,003
py
Python
RESTApi/flaskapp/recommend.py
Peacecake/HoloMu
98b422b226c2274e6d7e96df31724b0d2abd8ebb
[ "MIT" ]
null
null
null
RESTApi/flaskapp/recommend.py
Peacecake/HoloMu
98b422b226c2274e6d7e96df31724b0d2abd8ebb
[ "MIT" ]
32
2018-06-19T15:27:04.000Z
2018-09-30T20:17:23.000Z
RESTApi/flaskapp/recommend.py
Peacecake/HoloMu
98b422b226c2274e6d7e96df31724b0d2abd8ebb
[ "MIT" ]
null
null
null
import json from db import get_db import random def calcRecommendation(watched_name, watched_cat): db = get_db() watchedSameCat = 2 watchedExhibit = 0.9 otherExhibit = 1 weights_sum = 0 lastRow = db.execute("SELECT * FROM recommend WHERE ID=(SELECT MAX(ID) FROM recommend)").fetchone() newData = json.loads(lastRow["data"]) for exhibit_data in newData: if watched_name != exhibit_data["e_name"] and watched_cat == exhibit_data["e_cat"]: exhibit_data["e_prop"] = exhibit_data["e_prop"] * watchedSameCat weights_sum += exhibit_data["e_prop"] elif watched_name == exhibit_data["e_name"]: exhibit_data["e_prop"] = exhibit_data["e_prop"] * watchedExhibit weights_sum += exhibit_data["e_prop"] else: exhibit_data["e_prop"] = exhibit_data["e_prop"] * otherExhibit weights_sum += exhibit_data["e_prop"] for exhibit_data in newData: exhibit_data["e_prop"] = exhibit_data["e_prop"] / weights_sum return newData def recommendExhibit(watched_exhibit): db = get_db() exhibitNames = [] weights = [] currentRow = db.execute("SELECT * FROM recommend WHERE ID=(SELECT MAX(ID) FROM recommend)") for data_set in currentRow: data = json.loads(data_set["data"]) for exhibit_data in data: if exhibit_data["e_name"] != watched_exhibit: weights.append(exhibit_data["e_prop"]) exhibitNames.append(exhibit_data["e_name"]) recommendedExhibit = weightedRands(exhibitNames, weights) return recommendedExhibit # returns random value from exhibitNames list weighted by the e_props # Retrieved from: https://stackoverflow.com/questions/12096819 def weightedRands(exhibits, weights): r = random.uniform(0, sum(weights)) for n,v in map(None, exhibits, [sum(weights[:x+1]) for x in range(len(weights))]): if r < v: return n
40.06
104
0.648527
9ccb7ea5fedf5bc0b68bae94dbd5a5c09c9d9c76
108
py
Python
python/python_new/Python 3/untitled4x.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
16
2018-11-26T08:39:42.000Z
2019-05-08T10:09:52.000Z
python/python_new/Python 3/untitled4x.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
8
2020-05-04T06:29:26.000Z
2022-02-12T05:33:16.000Z
python/python_new/Python 3/untitled4x.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[ "MIT" ]
5
2020-02-11T16:02:21.000Z
2021-02-05T07:48:30.000Z
import numpy as np a=np.array([[1,2],[3,4],[5,8]]) b=a[-1,0:2] print(b) c=a[-1] print(c) d=a[:,1:3] print(d)
13.5
31
0.546296
2c28d23538ff750b39c70a97536878ac52e6fc71
1,076
py
Python
setup.py
KeepSafe/html-structure-diff
83ea2b8ad4b78b140bb5f69e3f2966c9fda01a89
[ "Apache-2.0" ]
3
2016-05-10T13:57:14.000Z
2016-09-29T21:01:53.000Z
setup.py
KeepSafe/html-structure-diff
83ea2b8ad4b78b140bb5f69e3f2966c9fda01a89
[ "Apache-2.0" ]
3
2015-10-20T22:29:37.000Z
2022-01-18T18:20:06.000Z
setup.py
KeepSafe/html-structure-diff
83ea2b8ad4b78b140bb5f69e3f2966c9fda01a89
[ "Apache-2.0" ]
1
2016-11-05T04:23:05.000Z
2016-11-05T04:23:05.000Z
import os from setuptools import setup, find_packages version = '0.4.1' def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() install_requires = [ 'mistune <= 1', ] tests_require = [ 'nose', 'flake8', 'autopep8', ] devtools_require = [ 'twine', 'build', ] setup(name='sdiff', version=version, description=('sdiff compares the structure of two markdown texts'), classifiers=[ 'License :: OSI Approved :: BSD License', 'Intended Audience :: Developers', 'Programming Language :: Python'], author='Keepsafe', author_email='[email protected]', url='https://github.com/KeepSafe/html-structure-diff/', license='Apache', packages=find_packages(exclude=['tests']), package_data={}, namespace_packages=[], install_requires=install_requires, tests_require=tests_require, extras_require={ 'tests': tests_require, 'devtools': devtools_require, }, include_package_data=False)
22.416667
74
0.624535
9fac8f3c828695c8cff839c5969097866b2f76b4
514
py
Python
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/07.0-Condition.py
shihab4t/Books-Code
b637b6b2ad42e11faf87d29047311160fe3b2490
[ "Unlicense" ]
null
null
null
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/07.0-Condition.py
shihab4t/Books-Code
b637b6b2ad42e11faf87d29047311160fe3b2490
[ "Unlicense" ]
null
null
null
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/07.0-Condition.py
shihab4t/Books-Code
b637b6b2ad42e11faf87d29047311160fe3b2490
[ "Unlicense" ]
null
null
null
marks = int(input("What is you marks in Math: ")) def show_grade(grade): print(f"You got: {grade}") if marks >= 80: show_grade("A+") elif marks >= 70: show_grade("A") elif marks >= 60: show_grade("A-") elif marks >= 50: show_grade("B") elif marks >= 40: show_grade("C") else: show_grade("F") if marks > 80 or marks < 10: print("You are very good or very bad") if marks > 80: print("Excellent") else: print("Not so good") else: print("You are okay")
17.724138
49
0.577821
2c6a60c9c8ae76a52cd66a8d5cb2289cc09c49cf
2,147
py
Python
python_experiments/data_analysis/vldbj_data_parsing/generate_accuracy_markdown.py
RapidsAtHKUST/SimRank
3a601b08f9a3c281e2b36b914e06aba3a3a36118
[ "MIT" ]
8
2020-04-14T23:17:00.000Z
2021-06-21T12:34:04.000Z
python_experiments/data_analysis/vldbj_data_parsing/generate_accuracy_markdown.py
RapidsAtHKUST/SimRank
3a601b08f9a3c281e2b36b914e06aba3a3a36118
[ "MIT" ]
null
null
null
python_experiments/data_analysis/vldbj_data_parsing/generate_accuracy_markdown.py
RapidsAtHKUST/SimRank
3a601b08f9a3c281e2b36b914e06aba3a3a36118
[ "MIT" ]
1
2021-01-17T16:26:50.000Z
2021-01-17T16:26:50.000Z
from data_analysis.vldbj_data_parsing.querying_time_accuracy_statistics import * from data_analysis.vldbj_data_parsing.reads_accuracy_statistics import * import decimal def get_accuracy_dict(root_dir='.', file_name='accuracy_result'): with open(root_dir + os.sep + 'data-json' + os.sep + file_name + '.json') as ifs: return json.load(ifs) def get_accuracy_dict_with_reads(root_dir='.', file_name='accuracy_result'): accuracy_dict = get_accuracy_dict(root_dir) for file in ['accuracy_result_reads', 'accuracy_result_probesim']: read_dict = get_accuracy_dict(root_dir, file_name=file) assert isinstance(read_dict, dict) for key, val in read_dict.items(): accuracy_dict[key] = val return accuracy_dict def format_str(float_num): my_str = str(decimal.Decimal.from_float(float_num * (10 ** 2)).quantize(decimal.Decimal('0.000'))) if (float_num < 0.01): return my_str else: return '**' + my_str + '**' if __name__ == '__main__': algorithm_tag_lst = [bflpmc_tag, flpmc_tag, bprw_tag, sling_tag, reads_tag, reads_d_tag, reads_rq_tag, isp_tag, tsf_tag, lind_tag, cw_tag] accuracy_dict = get_accuracy_dict_with_reads() def get_time_table(round_lst, data_set): table_lines = [] header = ['algo\\data'] + map(str, round_lst) table_lines.append(' | '.join(header)) table_lines.append(' | '.join(['---'] * (len(round_lst) + 1))) lines = map(lambda algorithm: ' | '.join([algorithm] + map(lambda num: format_str(num) + "", accuracy_dict[algorithm][data_set][str(10 ** 5)], )), algorithm_tag_lst) table_lines.extend(lines) return '\n'.join(table_lines) os.system('mkdir -p data-markdown') with open('data-markdown/accuracy_result.md', 'w') as ofs: ofs.writelines(['# Max Err, Unit: 0.01\n\n']) for data_set in accuracy_data_set_lst: ofs.writelines(['## ' + data_set, '\n\n', get_time_table(range(10), data_set), '\n\n'])
38.339286
107
0.630182
646a745e6ac3daf4d9d0c6d9836370483a70e293
230
py
Python
Algorithms/Implementation/service_lane.py
byung-u/HackerRank
4c02fefff7002b3af774b99ebf8d40f149f9d163
[ "MIT" ]
null
null
null
Algorithms/Implementation/service_lane.py
byung-u/HackerRank
4c02fefff7002b3af774b99ebf8d40f149f9d163
[ "MIT" ]
null
null
null
Algorithms/Implementation/service_lane.py
byung-u/HackerRank
4c02fefff7002b3af774b99ebf8d40f149f9d163
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 N, T = list(map(int, input().strip().split(' '))) width = list(map(int, input().strip().split(' '))) for _ in range(T): i, j = list(map(int, input().strip().split(' '))) print(min(width[i:j + 1]))
25.555556
53
0.556522
6484edd6039e15fcfb7d0ccf9cd6a117867e0839
250
py
Python
frappe-bench/apps/erpnext/erpnext/patches/v5_1/default_bom.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
1
2021-04-29T14:55:29.000Z
2021-04-29T14:55:29.000Z
frappe-bench/apps/erpnext/erpnext/patches/v5_1/default_bom.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/apps/erpnext/erpnext/patches/v5_1/default_bom.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
1
2021-04-29T14:39:01.000Z
2021-04-29T14:39:01.000Z
from __future__ import unicode_literals import frappe def execute(): frappe.db.sql("""Update `tabItem` as item set default_bom = NULL where not exists(select name from `tabBOM` as bom where item.default_bom = bom.name and bom.docstatus =1 )""")
35.714286
106
0.752
64b334b0e627ffd9f97989fbdbc08774a19c7319
734
py
Python
AutosClasificados/core/test2.py
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
2997359150236a7d897a3f9201f8e9404f3d7f02
[ "MIT" ]
null
null
null
AutosClasificados/core/test2.py
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
2997359150236a7d897a3f9201f8e9404f3d7f02
[ "MIT" ]
null
null
null
AutosClasificados/core/test2.py
joaquinpunales1992/Python-Django-WatsonVisualRecognition-WatsonNLU
2997359150236a7d897a3f9201f8e9404f3d7f02
[ "MIT" ]
null
null
null
import json #json = {{"watsonVisualRecognition":{"vAPIKey": "868b9f7ba1beb9cd3ef77236760fea74bac9af26", "vAPIVersion": "2016-05-20", "vIdClasificador": "vehiculos_260725218", "vUmbralMinScore_WVR": 0.5}, "watsonNLU":{"preprocessing_queue":{"vAPIUser": "e477af0a-db2f-4753-8b2d-14a084e607cf", "vAPIPass": "qbHBxkSPhwPA","vAPIVersion": "2017-10-03", "vUmbralMinScore_WNLU": 0.1}, "otros":{"vUmbralMinDescripcion": 10}}}} with open('AutosClasificados\core\config.json') as json_data_file: vConfig = json.load(json_data_file) vAPIVersion = vConfig["watsonVisualRecognition"]["vAPIVersion"] vAPIKey = vConfig["watsonVisualRecognition"]["vAPIKey"] vAPIClasificador = vConfig["watsonVisualRecognition"]["vIdClasificador"]
66.727273
406
0.757493
37beb8be587eda2253830cd5e5c76fc4dc369c96
10,739
py
Python
official/cv/east/src/east.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
official/cv/east/src/east.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
official/cv/east/src/east.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import mindspore as mstype import mindspore.nn as nn import mindspore.ops as P def _conv( in_channels, out_channels, kernel_size=3, stride=1, padding=0, pad_mode='pad'): """Conv2D wrapper.""" weights = 'ones' layers = [] layers += [nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, pad_mode=pad_mode, weight_init=weights, has_bias=False)] layers += [nn.BatchNorm2d(out_channels)] return nn.SequentialCell(layers) class VGG16FeatureExtraction(nn.Cell): """VGG16FeatureExtraction for deeptext""" def __init__(self): super(VGG16FeatureExtraction, self).__init__() self.relu = nn.ReLU() self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2) self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2) self.conv1_1 = _conv( in_channels=3, out_channels=64, kernel_size=3, padding=1) self.conv1_2 = _conv( in_channels=64, out_channels=64, kernel_size=3, padding=1) self.conv2_1 = _conv( in_channels=64, out_channels=128, kernel_size=3, padding=1) self.conv2_2 = _conv( in_channels=128, out_channels=128, kernel_size=3, padding=1) self.conv3_1 = _conv( in_channels=128, out_channels=256, kernel_size=3, padding=1) self.conv3_2 = _conv( in_channels=256, out_channels=256, kernel_size=3, padding=1) self.conv3_3 = _conv( in_channels=256, out_channels=256, kernel_size=3, padding=1) self.conv4_1 = _conv( in_channels=256, out_channels=512, kernel_size=3, padding=1) self.conv4_2 = _conv( in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv4_3 = _conv( in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv5_1 = _conv( in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv5_2 = _conv( in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv5_3 = _conv( in_channels=512, out_channels=512, kernel_size=3, padding=1) self.cast = P.Cast() def construct(self, out): """ Construction of VGG """ f_0 = out out = self.cast(out, mstype.float32) out = self.conv1_1(out) out = self.relu(out) out = self.conv1_2(out) out = self.relu(out) out = self.max_pool(out) out = self.conv2_1(out) out = self.relu(out) out = self.conv2_2(out) out = self.relu(out) out = self.max_pool(out) f_2 = out out = self.conv3_1(out) out = self.relu(out) out = self.conv3_2(out) out = self.relu(out) out = self.conv3_3(out) out = self.relu(out) out = self.max_pool(out) f_3 = out out = self.conv4_1(out) out = self.relu(out) out = self.conv4_2(out) out = self.relu(out) out = self.conv4_3(out) out = self.relu(out) out = self.max_pool(out) f_4 = out out = self.conv5_1(out) out = self.relu(out) out = self.conv5_2(out) out = self.relu(out) out = self.conv5_3(out) out = self.relu(out) out = self.max_pool(out) f_5 = out return f_0, f_2, f_3, f_4, f_5 class Merge(nn.Cell): def __init__(self): super(Merge, self).__init__() self.conv1 = nn.Conv2d(1024, 128, 1, has_bias=True) self.bn1 = nn.BatchNorm2d(128) self.relu1 = nn.ReLU() self.conv2 = nn.Conv2d( 128, 128, 3, padding=1, pad_mode='pad', has_bias=True) self.bn2 = nn.BatchNorm2d(128) self.relu2 = nn.ReLU() self.conv3 = nn.Conv2d(384, 64, 1, has_bias=True) self.bn3 = nn.BatchNorm2d(64) self.relu3 = nn.ReLU() self.conv4 = nn.Conv2d( 64, 64, 3, padding=1, pad_mode='pad', has_bias=True) self.bn4 = nn.BatchNorm2d(64) self.relu4 = nn.ReLU() self.conv5 = nn.Conv2d(192, 32, 1) self.bn5 = nn.BatchNorm2d(32) self.relu5 = nn.ReLU() self.conv6 = nn.Conv2d( 32, 32, 3, padding=1, pad_mode='pad', has_bias=True) self.bn6 = nn.BatchNorm2d(32) self.relu6 = nn.ReLU() self.conv7 = nn.Conv2d( 32, 32, 3, padding=1, pad_mode='pad', has_bias=True) self.bn7 = nn.BatchNorm2d(32) self.relu7 = nn.ReLU() self.concat = P.Concat(axis=1) def construct(self, x, f1, f2, f3, f4): img_hight = P.Shape()(x)[2] img_width = P.Shape()(x)[3] out = P.ResizeBilinear((img_hight / 16, img_width / 16), True)(f4) out = self.concat((out, f3)) out = self.relu1(self.bn1(self.conv1(out))) out = self.relu2(self.bn2(self.conv2(out))) out = P.ResizeBilinear((img_hight / 8, img_width / 8), True)(out) out = self.concat((out, f2)) out = self.relu3(self.bn3(self.conv3(out))) out = self.relu4(self.bn4(self.conv4(out))) out = P.ResizeBilinear((img_hight / 4, img_width / 4), True)(out) out = self.concat((out, f1)) out = self.relu5(self.bn5(self.conv5(out))) out = self.relu6(self.bn6(self.conv6(out))) out = self.relu7(self.bn7(self.conv7(out))) return out class Output(nn.Cell): def __init__(self, scope=512): super(Output, self).__init__() self.conv1 = nn.Conv2d(32, 1, 1) self.sigmoid1 = nn.Sigmoid() self.conv2 = nn.Conv2d(32, 4, 1) self.sigmoid2 = nn.Sigmoid() self.conv3 = nn.Conv2d(32, 1, 1) self.sigmoid3 = nn.Sigmoid() self.scope = scope self.concat = P.Concat(axis=1) self.PI = 3.1415926535898 def construct(self, x): score = self.sigmoid1(self.conv1(x)) loc = self.sigmoid2(self.conv2(x)) * self.scope angle = (self.sigmoid3(self.conv3(x)) - 0.5) * self.PI geo = self.concat((loc, angle)) return score, geo class EAST(nn.Cell): def __init__(self): super(EAST, self).__init__() self.extractor = VGG16FeatureExtraction() self.merge = Merge() self.output = Output() def construct(self, x_1): f_0, f_1, f_2, f_3, f_4 = self.extractor(x_1) x_1 = self.merge(f_0, f_1, f_2, f_3, f_4) score, geo = self.output(x_1) return score, geo class DiceCoefficient(nn.Cell): def __init__(self): super(DiceCoefficient, self).__init__() self.sum = P.ReduceSum() self.eps = 1e-5 def construct(self, true_cls, pred_cls): intersection = self.sum(true_cls * pred_cls, ()) union = self.sum(true_cls, ()) + self.sum(pred_cls, ()) + self.eps loss = 1. - (2 * intersection / union) return loss class MyMin(nn.Cell): def __init__(self): super(MyMin, self).__init__() self.abs = P.Abs() def construct(self, a, b): return (a + b - self.abs(a - b)) / 2 class EastLossBlock(nn.Cell): def __init__(self): super(EastLossBlock, self).__init__() self.split = P.Split(1, 5) self.min = MyMin() self.log = P.Log() self.cos = P.Cos() self.mean = P.ReduceMean(keep_dims=False) self.sum = P.ReduceSum() self.eps = 1e-5 self.dice = DiceCoefficient() def construct( self, y_true_cls, y_pred_cls, y_true_geo, y_pred_geo, training_mask): ans = self.sum(y_true_cls) classification_loss = self.dice( y_true_cls, y_pred_cls * (1 - training_mask)) # n * 5 * h * w d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = self.split(y_true_geo) d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = self.split(y_pred_geo) area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt) area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred) w_union = self.min(d2_gt, d2_pred) + self.min(d4_gt, d4_pred) h_union = self.min(d1_gt, d1_pred) + self.min(d3_gt, d3_pred) area_intersect = w_union * h_union area_union = area_gt + area_pred - area_intersect iou_loss_map = -self.log((area_intersect + 1.0) / (area_union + 1.0)) # iou_loss_map angle_loss_map = 1 - self.cos(theta_pred - theta_gt) # angle_loss_map angle_loss = self.sum(angle_loss_map * y_true_cls) / ans iou_loss = self.sum(iou_loss_map * y_true_cls) / ans geo_loss = 10 * angle_loss + iou_loss return geo_loss + classification_loss class EastWithLossCell(nn.Cell): def __init__(self, network): super(EastWithLossCell, self).__init__() self.east_network = network self.loss = EastLossBlock() def construct(self, img, true_cls, true_geo, training_mask): socre, geometry = self.east_network(img) loss = self.loss( true_cls, socre, true_geo, geometry, training_mask) return loss
29.502747
79
0.538039
208edc51685c65faff0900a1cfaeda349445762a
298
py
Python
src/network/bo/messages/message.py
TimHabeck/blockchain-lab
3cd050ee43f26cf0a1f70869100f0b40a6abae07
[ "RSA-MD" ]
null
null
null
src/network/bo/messages/message.py
TimHabeck/blockchain-lab
3cd050ee43f26cf0a1f70869100f0b40a6abae07
[ "RSA-MD" ]
null
null
null
src/network/bo/messages/message.py
TimHabeck/blockchain-lab
3cd050ee43f26cf0a1f70869100f0b40a6abae07
[ "RSA-MD" ]
null
null
null
from abc import ABC class Message(ABC): def __init__(self) -> None: self._name = None def get_name(self): return self._name def set_name(self, name): self._name = name def to_dict(self): pass @staticmethod def from_dict(): pass
14.9
31
0.573826
20d46ae887ed60a533225b195bd966d42cf1e440
493
py
Python
3kCTF/2021/web/pawnshop/apache/src/index.py
ruhan-islam/ctf-archives
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
[ "MIT" ]
1
2021-11-02T20:53:58.000Z
2021-11-02T20:53:58.000Z
3kCTF/2021/web/pawnshop/apache/src/index.py
ruhan-islam/ctf-archives
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
[ "MIT" ]
null
null
null
3kCTF/2021/web/pawnshop/apache/src/index.py
ruhan-islam/ctf-archives
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
[ "MIT" ]
null
null
null
#!/usr/bin/python3 from funcs import * form = cgi.FieldStorage() action = form.getvalue('action') if action=='list': list_items() elif action=='bid': mail = form.getvalue('mail') item_id = form.getvalue('item_id') amount = form.getvalue('amount') if(mail != None and item_id != None and amount != None): verify_email(mail) save_bid(mail+"|"+item_id+"|"+amount+"\n\n") api({'msg':'bid saved, we will contact winners when auction ends'}) api({'msg':'error'}) else: api(False)
25.947368
69
0.665314
b30e40b036ccd20726e930a9caacc645f345a20a
2,840
py
Python
Packs/CommonScripts/Scripts/GetDomainDNSDetails/GetDomainDNSDetails.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Packs/CommonScripts/Scripts/GetDomainDNSDetails/GetDomainDNSDetails.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Packs/CommonScripts/Scripts/GetDomainDNSDetails/GetDomainDNSDetails.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
import demistomock as demisto # noqa # pylint: disable=unused-wildcard-import from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import from CommonServerUserPython import * # noqa # pylint: disable=unused-wildcard-import from typing import ( Dict, Any, Optional, Union, List ) import traceback import dns.message import dns.resolver import dns.rdatatype import dns.rdataclass import dns.rdata DNS_QUERY_TTL = 10.0 QTYPES = ["CNAME", "NS", "A", "AAAA"] ''' STANDALONE FUNCTION ''' def make_query(resolver: dns.resolver.Resolver, qname: str, qtype: str, use_tcp: bool) -> Dict[str, Any]: q_rdtype = dns.rdatatype.from_text(qtype) q_rdclass = dns.rdataclass.from_text("IN") try: ans = resolver.resolve( qname, q_rdtype, q_rdclass, tcp=use_tcp, lifetime=DNS_QUERY_TTL, raise_on_no_answer=True ) except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN): return {} if ans.rrset is None: return {} result: Dict[str, List[str]] = {} result[qtype] = [ rr.to_text() for rr in ans.rrset if (rr is not None and rr.rdtype == q_rdtype and rr.rdclass == q_rdclass) ] return result ''' COMMAND FUNCTION ''' def get_domain_dns_details_command(args: Dict[str, Any]) -> CommandResults: outputs: Optional[Dict[str, Dict[str, Any]]] answer: Union[str, Dict[str, Any]] server = args.get('server') use_tcp = argToBoolean(args.get('use_tcp', 'Yes')) qtypes = QTYPES if (arg_qtype := args.get('qtype')) is not None: qtypes = argToList(arg_qtype) qname = args.get('domain') if qname is None: raise ValueError("domain is required") resolver = dns.resolver.Resolver() if server is not None: resolver.nameservers = [server] answer = { 'domain': qname, 'server': server if server is not None else 'system' } # we ask specifically for CNAMEs for qtype in qtypes: answer.update(make_query(resolver, qname, qtype, use_tcp=use_tcp)) outputs = { 'DomainDNSDetails': answer } markdown = tableToMarkdown( f' Domain DNS Details for {qname}', answer, headers=["domain", "server"] + qtypes ) return CommandResults( readable_output=markdown, outputs=outputs, outputs_key_field=['domain', 'server'] ) ''' MAIN FUNCTION ''' def main(): try: return_results(get_domain_dns_details_command(demisto.args())) except Exception as ex: demisto.error(traceback.format_exc()) # print the traceback return_error(f'Failed to execute GetDomainDNSDetails. Error: {str(ex)}') ''' ENTRY POINT ''' if __name__ in ('__main__', '__builtin__', 'builtins'): main()
23.865546
105
0.635915
5b76f0f58bd77a10887debdf519f7d1a031584da
451
py
Python
crypto/crypto-unrandompad/solve.py
NoXLaw/RaRCTF2021-Challenges-Public
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
[ "MIT" ]
2
2021-08-09T17:08:12.000Z
2021-08-09T17:08:17.000Z
crypto/crypto-unrandompad/solve.py
NoXLaw/RaRCTF2021-Challenges-Public
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
[ "MIT" ]
null
null
null
crypto/crypto-unrandompad/solve.py
NoXLaw/RaRCTF2021-Challenges-Public
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
[ "MIT" ]
1
2021-10-09T16:51:56.000Z
2021-10-09T16:51:56.000Z
from pwn import * from sympy.ntheory.modular import crt from gmpy2 import iroot from Crypto.Util.number import long_to_bytes ns = [] cs = [] for _ in range(3): s = remote(sys.argv[1], int(sys.argv[2])) s.recvuntil("n: ") ns.append(int(s.recvline().decode())) s.sendlineafter("opt: ", "2") s.recvuntil("c: ") cs.append(int(s.recvline().decode())) s.close() ptc = int(crt(ns, cs)[0]) print(long_to_bytes(int(iroot(ptc, 3)[0])).decode())
23.736842
52
0.654102
947aaf98d219695563566c85c2549bd230e445ac
245
py
Python
Algorithms/Warmup/mini_max_sum.py
byung-u/HackerRank
4c02fefff7002b3af774b99ebf8d40f149f9d163
[ "MIT" ]
null
null
null
Algorithms/Warmup/mini_max_sum.py
byung-u/HackerRank
4c02fefff7002b3af774b99ebf8d40f149f9d163
[ "MIT" ]
null
null
null
Algorithms/Warmup/mini_max_sum.py
byung-u/HackerRank
4c02fefff7002b3af774b99ebf8d40f149f9d163
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from functools import reduce from itertools import permutations, product, combinations arr = [1, 2, 3, 4, 5] n = len(arr) ret = [reduce(lambda x, y: x + y, i) for i in combinations(arr, n - 1)] print(min(ret), max(ret))
24.5
71
0.677551
947f3eed6772f22eda5ef430e9b3191c16ac374f
2,687
py
Python
___Python/Torsten/Python-Kurs/p05_random/m01_wuerfeln.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
___Python/Torsten/Python-Kurs/p05_random/m01_wuerfeln.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
___Python/Torsten/Python-Kurs/p05_random/m01_wuerfeln.py
uvenil/PythonKurs201806
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
[ "Apache-2.0" ]
null
null
null
import random r = random.Random() def wuerfeln(): return r.randint(1, 6) def muenzwurf(): return r.randint(0, 1) # 0 > Kopf, 1 > Zahl def kugel(): return r.randint(1, 49) def lottoziehung(anzahl): zahl = r.randint(1, 49) if zahl not in ziehung: ziehung.append(zahl) anzahl += 1 if anzahl != 6: lottoziehung(anzahl) d = {} for i in range(100000): augenzahl = wuerfeln() if augenzahl in d: d[augenzahl] += 1 else: d[augenzahl] = 1 print("Würfel klassisch:",d) # 1) Lottozahlen ermittel für '6 aus 49' # Ansatz A: WHILE anzahl = 0 ziehung = [] while anzahl < 6: zahl = r.randint(1, 49) if zahl not in ziehung: ziehung.append(zahl) anzahl += 1 print("While-Schleife:",sorted(ziehung)) # Ansatz B: Rekursiv anzahl = 0 ziehung = [] lottoziehung(anzahl) print("Rekursiv:",sorted(ziehung)) # Ansatz C: kugeln = 0 menge = set() # Menge der bereits gezogenen Kugeln lottoziehung = [] while kugeln < 6: ziehung = kugel() if ziehung not in menge: # Diese Kugel wurde zuvor noch nicht gezogen lottoziehung.append(ziehung) menge.add(ziehung) # Diese Kugel darf nicht noch einmal gezogen werden, deshalb landet sie in der Menge der zuvor gezogenen Kugeln kugeln += 1 print(sorted(lottoziehung)) # Ansatz D: "Lottofee" urne = list(range(1, 50)) # urne = [1, 2, 3, ..., 49] lottoziehung = [] for i in range(6): ziehung = urne.pop(r.randint(0, len(urne)) - 1) lottoziehung.append(ziehung) print(sorted(lottoziehung)) # Ansatz E: Geeignete Methode aus random benutzen lottoziehung = r.sample(range(1, 50), 6) print(sorted(lottoziehung)) # 2) Schreibe eine Funktion wuerfeln2, die fair würfelt. # Bei der Implementierung darf nur die Funktion muenzwurf verwendet werden. # würfeln mit Münzwurf # Dual Dez # 0 0 0 0 # 1 0 0 1 # 0 1 0 2 # 1 1 0 3 # 0 0 1 4 # 1 0 1 5 # 0 1 1 6 # 1 1 1 7 def wuerfeln2(): while True: i_dual = 1 # Zähler Dual-Code i_augenzahl = 0 while i_dual <= 4: i_augenzahl += muenzwurf() * i_dual i_dual *= 2 # Zähler Dual-Code multipliziert mit 2 (1,2,4...) if 0 < i_augenzahl < 7: break return i_augenzahl dict_mw ={} i_schleife = 0 # Schleifenzaehler while i_schleife < 100000: i_augenzahl = wuerfeln2() if i_augenzahl in dict_mw: dict_mw[i_augenzahl] += 1 else: dict_mw[i_augenzahl] = 1 i_schleife += 1 print("Münzwurf: "+str(dict(sorted(dict_mw.items()))))
21.669355
139
0.596948
8469b52da50e995d6953b3f007086e6d9fc96c76
1,082
py
Python
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/env/lib/python2.7/site-packages/cli_helpers/tabular_output/terminaltables_adapter.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Format adapter for the terminaltables module.""" import terminaltables import itertools from cli_helpers.utils import filter_dict_by_key from .preprocessors import (convert_to_string, override_missing_value, style_output) supported_formats = ('ascii', 'double', 'github') preprocessors = (override_missing_value, convert_to_string, style_output) def adapter(data, headers, table_format=None, **kwargs): """Wrap terminaltables inside a function for TabularOutputFormatter.""" keys = ('title', ) table_format_handler = { 'ascii': terminaltables.AsciiTable, 'double': terminaltables.DoubleTable, 'github': terminaltables.GithubFlavoredMarkdownTable, } table = table_format_handler[table_format] t = table([headers] + list(data), **filter_dict_by_key(kwargs, keys)) dimensions = terminaltables.width_and_alignment.max_dimensions( t.table_data, t.padding_left, t.padding_right)[:3] for r in t.gen_table(*dimensions): yield u''.join(r)
30.914286
75
0.698706
b6d65f7817d4f4ae0d0fe72cc3c4ecb679fb86e4
1,010
py
Python
getData/accv.py
Dong-Ki-Lee/emotionFinder
54dca62993bb87f1994be3fafaf1b7c6c60d6d95
[ "MIT" ]
null
null
null
getData/accv.py
Dong-Ki-Lee/emotionFinder
54dca62993bb87f1994be3fafaf1b7c6c60d6d95
[ "MIT" ]
null
null
null
getData/accv.py
Dong-Ki-Lee/emotionFinder
54dca62993bb87f1994be3fafaf1b7c6c60d6d95
[ "MIT" ]
null
null
null
''' Automatically combine consonants and vowels in a sentence 작성자 : Dongki Lee 모듈 기능 : hgtk 라이브러리에서는 전체가 decompose된 항목을 compose, 그 반대의 경우만 할 수 있다. 이 모듈에서는 hgtk의 decompose, compose 기능을 이용하여 문장이 들어왔을 때 가ㄴ다 -> 간다 처럼 받침만이 떨어진 문장을 원래 문장으로 돌릴 수 있게 해주는 기능을 구현하였다. 생성 날자 : 2018.07.05 수정 로그 : 2018.07.05 : 모듈 최초버전 구현 ''' import hgtk import re def combine(input_string): need_to_bind = re.compile('[ㄱ-ㅎ]') m = need_to_bind.search(input_string) if m == None: return input_string else: start = m.start() decompose = list(hgtk.letter.decompose(input_string[start - 1])) remove_key = input_string[start] if decompose[2] == '': decompose[2] = remove_key composed_letter = hgtk.letter.compose(decompose[0], decompose[1], decompose[2]) input_string = input_string[0:start-1] + composed_letter + input_string[start+1:] else: input_string = input_string[0:start] + input_string[start+1:] return input_string
34.827586
109
0.657426
b63a894a11177ac8639ed1aa90a4802fd5dfd07a
523
py
Python
examples/mult.py
enordquist/Go100x
327a8f767a68d7b42e6cd0aea11b2d9cc2d8b5ea
[ "MIT" ]
null
null
null
examples/mult.py
enordquist/Go100x
327a8f767a68d7b42e6cd0aea11b2d9cc2d8b5ea
[ "MIT" ]
null
null
null
examples/mult.py
enordquist/Go100x
327a8f767a68d7b42e6cd0aea11b2d9cc2d8b5ea
[ "MIT" ]
null
null
null
#!/usr/bin/env python import os import go100x import numpy as np os.environ["TIMEMORY_PRECISION"] = "8" n = int(2.0e8) a = np.zeros([n], dtype=np.float) b = np.zeros([n], dtype=np.float) a += 2 b += 3 c = go100x.calculate_cpu(a, b) go100x.set_device(0) o = [32, 64, 128, 256, 512, 1024] for block in o: for ngrid in o + [-1]: if ngrid < 0: ngrid = int((n + block - 1) / block) d = go100x.calculate_gpu([ngrid], [block], a, b) print("\nResults for array of size {} ({})", n, float(n))
19.37037
57
0.585086
b664a3da3d375dc5fe45b712d376cb50db9fed2a
4,702
py
Python
plugins/tff_backend/dal/node_orders.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
null
null
null
plugins/tff_backend/dal/node_orders.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
178
2017-08-02T12:58:06.000Z
2017-12-20T15:01:12.000Z
plugins/tff_backend/dal/node_orders.py
threefoldfoundation/app_backend
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
[ "Apache-2.0" ]
2
2018-01-10T10:43:12.000Z
2018-03-18T10:42:23.000Z
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ import logging from datetime import datetime from google.appengine.api import search from google.appengine.api.search import SortExpression from google.appengine.ext import ndb from framework.bizz.job import run_job, MODE_BATCH from mcfw.exceptions import HttpNotFoundException from mcfw.rpc import returns, arguments from plugins.tff_backend.consts.hoster import NODE_ORDER_SEARCH_INDEX from plugins.tff_backend.models.hoster import NodeOrder from plugins.tff_backend.plugin_consts import NAMESPACE from plugins.tff_backend.utils.search import remove_all_from_index NODE_ORDER_INDEX = search.Index(NODE_ORDER_SEARCH_INDEX, namespace=NAMESPACE) @returns(NodeOrder) @arguments(order_id=(int, long)) def get_node_order(order_id): # type: (int) -> NodeOrder order = NodeOrder.get_by_id(order_id) if not order: raise HttpNotFoundException('order_not_found') return order def index_all_node_orders(): remove_all_from_index(NODE_ORDER_INDEX) run_job(_get_all_node_orders, [], multi_index_node_order, [], mode=MODE_BATCH, batch_size=200) def _get_all_node_orders(): return NodeOrder.query() def index_node_order(order): # type: (NodeOrder) -> list[search.PutResult] logging.info('Indexing node order %s', order.id) document = create_node_order_document(order) return NODE_ORDER_INDEX.put(document) def multi_index_node_order(order_keys): logging.info('Indexing %s node orders', len(order_keys)) orders = ndb.get_multi(order_keys) # type: list[NodeOrder] return NODE_ORDER_INDEX.put([create_node_order_document(order) for order in orders]) def create_node_order_document(order): order_id_str = '%s' % order.id fields = [ search.AtomField(name='id', value=order_id_str), search.AtomField(name='socket', value=order.socket), search.NumberField(name='so', value=order.odoo_sale_order_id or -1), search.NumberField(name='status', value=order.status), search.DateField(name='order_time', value=datetime.utcfromtimestamp(order.order_time)), search.TextField(name='username', value=order.username), ] if order.shipping_info: fields.extend([search.TextField(name='shipping_name', value=order.shipping_info.name), search.TextField(name='shipping_email', value=order.shipping_info.email), search.TextField(name='shipping_phone', value=order.shipping_info.phone), search.TextField(name='shipping_address', value=order.shipping_info.address.replace('\n', ''))]) if order.billing_info: fields.extend([search.TextField(name='billing_name', value=order.billing_info.name), search.TextField(name='billing_email', value=order.billing_info.email), search.TextField(name='billing_phone', value=order.billing_info.phone), search.TextField(name='billing_address', value=order.billing_info.address.replace('\n', ''))]) return search.Document(order_id_str, fields) def search_node_orders(query=None, page_size=20, cursor=None): # type: (unicode, int, unicode) -> tuple[list[NodeOrder], search.Cursor, bool] options = search.QueryOptions(limit=page_size, cursor=search.Cursor(cursor), ids_only=True, sort_options=search.SortOptions( expressions=[SortExpression(expression='order_time', direction=SortExpression.DESCENDING)])) search_results = NODE_ORDER_INDEX.search(search.Query(query, options=options)) # type: search.SearchResults results = search_results.results # type: list[search.ScoredDocument] node_orders = ndb.get_multi([NodeOrder.create_key(long(result.doc_id)) for result in results]) return node_orders, search_results.cursor, search_results.cursor is not None def list_node_orders_by_user(username): return NodeOrder.list_by_user(username)
44.358491
119
0.711825
b67a93dbdc6dae113acee0d8fa2424ce07a736e0
1,746
py
Python
checks/duplicate_content_test.py
thegreenwebfoundation/green-spider
68f22886178bbe5b476a4591a6812ee25cb5651b
[ "Apache-2.0" ]
19
2018-04-20T11:03:41.000Z
2022-01-12T20:58:56.000Z
checks/duplicate_content_test.py
thegreenwebfoundation/green-spider
68f22886178bbe5b476a4591a6812ee25cb5651b
[ "Apache-2.0" ]
160
2018-04-05T16:12:59.000Z
2022-03-01T13:01:27.000Z
checks/duplicate_content_test.py
thegreenwebfoundation/green-spider
68f22886178bbe5b476a4591a6812ee25cb5651b
[ "Apache-2.0" ]
8
2018-11-05T13:07:57.000Z
2021-06-11T11:46:43.000Z
import httpretty from httpretty import httprettified import unittest from checks import duplicate_content from checks import page_content from checks.config import Config @httprettified class TestDuplicateContent(unittest.TestCase): def test_identical(self): page_body = """ <html> <head> <title>Title</title> </head> <body> <h1 class="title">Headline</h1> <p class="intro">Second paragraph with <strong>strong words</strong></p> <p class="text">Third paragraph</p> <ul class="somelist"> <li>A list item</li> </ul> </body> </html> """ url1 = 'http://example.com/' httpretty.register_uri(httpretty.GET, url1, body=page_body) url2 = 'http://www.example.com/' httpretty.register_uri(httpretty.GET, url2, body=page_body) results = {} config = Config(urls=[url1, url2]) page_content_checker = page_content.Checker(config=config, previous_results={}) results['page_content'] = page_content_checker.run() checker = duplicate_content.Checker(config=page_content_checker.config, previous_results=results) result = checker.run() urls_after = checker.config.urls self.assertEqual(result, { 'http://example.com/ http://www.example.com/': { 'exception': None, 'similarity': 1.0 } }) self.assertEqual(urls_after, ['http://example.com/']) if __name__ == '__main__': unittest.main()
30.631579
92
0.548683
1e5477fff3f0d8e4e575997d9206dddf56821f65
189
py
Python
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py
shihab4t/Books-Code
b637b6b2ad42e11faf87d29047311160fe3b2490
[ "Unlicense" ]
null
null
null
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py
shihab4t/Books-Code
b637b6b2ad42e11faf87d29047311160fe3b2490
[ "Unlicense" ]
null
null
null
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/ph-7.11-list-in-function.py
shihab4t/Books-Code
b637b6b2ad42e11faf87d29047311160fe3b2490
[ "Unlicense" ]
null
null
null
def add_numbers(numbers): result = 0 for i in numbers: result += i #print("number =", i) return result result = add_numbers([1, 2, 30, 4, 5, 9]) print(result)
17.181818
41
0.566138
1e8ce7e39fa909fe47e567c835da6345fb42a02c
485
py
Python
infrastructure/azwrapper.py
lizzyTheLizard/homeserver-azure
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
[ "MIT" ]
null
null
null
infrastructure/azwrapper.py
lizzyTheLizard/homeserver-azure
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
[ "MIT" ]
null
null
null
infrastructure/azwrapper.py
lizzyTheLizard/homeserver-azure
e79bd23ea09a1ce1a77afd73bb9acfd402dfdc57
[ "MIT" ]
null
null
null
from az.cli import az import sys # General Wrapper for azure cli calls def azSafe(command): exit_code, result_dict, logs = az(command) if exit_code != 0: print("az " + command + " failed") print(logs) sys.exit(-1) return result_dict # General call to check if a resource already exists def resourceExists(group, name): resourceExists = "resource list -g {} -n {}" list = azSafe(resourceExists.format(group, name)) return len(list) != 0
28.529412
53
0.659794
1ea0c610076601a406a8cb69dd2c1e21593a40b9
1,835
py
Python
Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Packs/CommonScripts/Scripts/RegexGroups/RegexGroups.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 def main(): args = demisto.args() match_target = args['value'] capture_groups = args.get('groups') dict_keys = args.get('keys') regex_flags = 0 for flag in argToList(args.get('flags', '')): if flag in ('dotall', 's'): regex_flags |= re.DOTALL elif flag in ('multiline', 'm'): regex_flags |= re.MULTILINE elif flag in ('ignorecase', 'i'): regex_flags |= re.IGNORECASE elif flag in ('unicode', 'u'): regex_flags |= re.UNICODE else: raise ValueError(f'Unknown flag: {flag}') regex_pattern = re.compile(r'{}'.format(args['regex']), regex_flags) if capture_groups: capture_groups = capture_groups.split(',') # Validating groups input to be integers if not all(x.isdigit() for x in capture_groups): raise ValueError('Error: groups must be integers') if dict_keys: dict_keys = dict_keys.split(',') pattern_match = re.search(regex_pattern, match_target) matches = [] if pattern_match: for i in pattern_match.groups(): matches.append(i) if capture_groups: for j in capture_groups: if len(matches) - 1 < int(j): raise ValueError('Error: Regex group (' + j + ') out of range') matches = [matches[int(x)] for x in capture_groups] if dict_keys: if len(dict_keys) != len(matches): raise ValueError('Error: Number of keys does not match number of items') else: dict_matches = dict(zip(dict_keys, matches)) demisto.results(dict_matches) else: demisto.results(matches) if __name__ in ('__builtin__', 'builtins'): main()
32.192982
84
0.595095
7897cac0ea4d830a3b8371de9199f19f03d22f71
2,632
py
Python
2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py
felixdittrich92/DeepLearning-tensorflow-keras
2880d8ed28ba87f28851affa92b6fa99d2e47be9
[ "Apache-2.0" ]
null
null
null
2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py
felixdittrich92/DeepLearning-tensorflow-keras
2880d8ed28ba87f28851affa92b6fa99d2e47be9
[ "Apache-2.0" ]
null
null
null
2_DeepLearning-Keras/04_Tensorboard/1-tensorboard.py
felixdittrich92/DeepLearning-tensorflow-keras
2880d8ed28ba87f28851affa92b6fa99d2e47be9
[ "Apache-2.0" ]
null
null
null
import os import numpy as np from tensorflow.keras.utils import to_categorical from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import * from tensorflow.keras.activations import * from tensorflow.keras.models import * from tensorflow.keras.optimizers import * from tensorflow.keras.initializers import * from tensorflow.keras.callbacks import * # für Tensorboard # Log erstellen/speichern dir_path = os.path.abspath("../DeepLearning/logs") # Linux und Windows # Dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() # Cast to np.float32 x_train = x_train.astype(np.float32) y_train = y_train.astype(np.float32) x_test = x_test.astype(np.float32) y_test = y_test.astype(np.float32) # Dataset Variablen train_size = x_train.shape[0] test_size = x_test.shape[0] num_features = 784 # 28x28 num_classes = 10 # kategorisieren y_train = to_categorical(y_train, num_classes=10) y_test = to_categorical(y_test, num_classes=10) # input Daten reshapen x_train = x_train.reshape(train_size, num_features) x_test = x_test.reshape(test_size, num_features) # Modell Parameter init_w = TruncatedNormal(mean=0.0, stddev=0.01) init_b = Constant(value=0.0) lr = 0.001 optimizer = Adam(lr=lr) epochs = 20 batch_size = 256 # [32, 1024] Werte dazwischen gibt an wieviele Datenpunkte parrallel verwendet werden zum trainieren # Modell definieren model = Sequential() model.add(Dense(units=500, kernel_initializer=init_w, bias_initializer=init_b, input_shape=(num_features, ))) model.add(Activation("relu")) model.add(Dense(units=300, kernel_initializer=init_w, bias_initializer=init_b)) model.add(Activation("relu")) model.add(Dense(units=100, kernel_initializer=init_w, bias_initializer=init_b)) model.add(Activation("relu")) model.add(Dense(units=num_classes, kernel_initializer=init_w, bias_initializer=init_b)) model.add(Activation("softmax")) model.summary() # Modell kompilieren, trainieren und evaluieren model.compile( loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) # Tensorboard Callback tb = TensorBoard( log_dir=dir_path, histogram_freq=1, # jede Epoche 2 = alle 2 Epochen etc. write_graph=True) model.fit( x=x_train, y=y_train, epochs=epochs, batch_size=batch_size, validation_data=[x_test, y_test], callbacks=[tb]) # benötigt für Tensorboard score = model.evaluate( x_test, y_test, verbose=0) print("Score: ", score) # USE: in Konsole tensorboard --logdir LOGSORDNER # mehrere Modelle vergleichen -> für jedes Modell in Logs Unterordner erstellen und mit Tensorboard den Oberordner angeben
28.301075
122
0.765957
15b74e5fe7101d3d254659d6739b6dff810a68af
489
py
Python
python/decorator/class_based_decorators_with_arguments.py
zeroam/TIL
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
[ "MIT" ]
null
null
null
python/decorator/class_based_decorators_with_arguments.py
zeroam/TIL
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
[ "MIT" ]
null
null
null
python/decorator/class_based_decorators_with_arguments.py
zeroam/TIL
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
[ "MIT" ]
null
null
null
import functools class ClassDecorator(object): def __init__(self, arg1, arg2): print(f'Arguments of decorators {arg1}, {arg2}') self.arg1 = arg1 self.arg2 = arg2 def __call__(self, func): functools.update_wrapper(self, func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper @ClassDecorator("arg1", "arg2") def print_args(*args): for arg in args: print(arg) print_args(1, 2, 3)
20.375
56
0.607362
01be7042a6f0b68cc5fa4de5ee66956770d65fa3
794
py
Python
docs/API/Users_Guide/scripts/BAM_Slice.py
ZhenyuZ/gdc-docs
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
[ "Apache-2.0" ]
67
2016-06-09T14:11:51.000Z
2022-03-16T07:54:44.000Z
docs/API/Users_Guide/scripts/BAM_Slice.py
ZhenyuZ/gdc-docs
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
[ "Apache-2.0" ]
19
2016-06-21T15:51:11.000Z
2021-06-07T09:22:20.000Z
docs/API/Users_Guide/scripts/BAM_Slice.py
ZhenyuZ/gdc-docs
f024d5d4cd86dfa2c9e7d63850eee94d975b7948
[ "Apache-2.0" ]
32
2016-07-15T01:24:19.000Z
2019-03-25T10:42:28.000Z
import requests import json ''' This script will not work until $TOKEN_FILE_PATH is replaced with an actual path. ''' token_file = "$TOKEN_FILE_PATH" file_id = "11443f3c-9b8b-4e47-b5b7-529468fec098" data_endpt = "https://api.gdc.cancer.gov/slicing/view/{}".format(file_id) with open(token_file,"r") as token: token_string = str(token.read().strip()) params = {"gencode": ["BRCA1", "BRCA2"]} response = requests.post(data_endpt, data = json.dumps(params), headers = { "Content-Type": "application/json", "X-Auth-Token": token_string }) file_name = "brca_slices.bam" with open(file_name, "wb") as output_file: output_file.write(response.content)
26.466667
73
0.600756
da7a049f44bfb52f1ea5a044260e1bee670f6bac
1,567
py
Python
MockServer/server.py
mcteacraft/MovingSpirit
90fb85809a46f286b55ecc4e1d2adbe9579ca713
[ "MIT" ]
null
null
null
MockServer/server.py
mcteacraft/MovingSpirit
90fb85809a46f286b55ecc4e1d2adbe9579ca713
[ "MIT" ]
null
null
null
MockServer/server.py
mcteacraft/MovingSpirit
90fb85809a46f286b55ecc4e1d2adbe9579ca713
[ "MIT" ]
null
null
null
from http.server import BaseHTTPRequestHandler, HTTPServer hostName = "localhost" serverPort = 8080 class MyServer(BaseHTTPRequestHandler): state = "Stopped" def __init__(self, request, client_address, server): BaseHTTPRequestHandler.__init__(self, request, client_address, server) def do_GET(self): self.send_response(200) if(self.path == "/minecraft/start"): if(MyServer.state == "Stopped"): MyServer.state = "Starting" elif(self.path == "/minecraft/stop"): if(MyServer.state == "Running"): MyServer.state = "Stopping" else: if(MyServer.state == "Starting"): MyServer.state = "Running" if(MyServer.state == "Stopping"): MyServer.state = "Stopped" if (self.path == "/minecraft/status"): responseJson = '{"status" : "' + MyServer.state + '"}'; self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(responseJson, "utf-8")); else: self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(bytes(MyServer.state, "utf-8")); if __name__ == "__main__": webServer = HTTPServer((hostName, serverPort), MyServer) print("Server started http://%s:%s" % (hostName, serverPort)) try: webServer.serve_forever() except KeyboardInterrupt: pass webServer.server_close() print("Server stopped.")
33.340426
78
0.589024
16f23c8bc19c75ec9e1270d93c6f8d77b4b97e77
9,916
py
Python
pythonProj/FZPython/pyquant/db_models/__init__.py
iHamburg/FZQuant
86b750ec33d01badfd3f324d6f1599118b9bf8ff
[ "MIT" ]
null
null
null
pythonProj/FZPython/pyquant/db_models/__init__.py
iHamburg/FZQuant
86b750ec33d01badfd3f324d6f1599118b9bf8ff
[ "MIT" ]
null
null
null
pythonProj/FZPython/pyquant/db_models/__init__.py
iHamburg/FZQuant
86b750ec33d01badfd3f324d6f1599118b9bf8ff
[ "MIT" ]
2
2019-04-10T10:05:00.000Z
2021-11-24T17:17:23.000Z
#!/usr/bin/env python # coding: utf8 import json import datetime from pprint import pprint from sqlalchemy import Column, String,Integer, Float, DateTime from sqlalchemy import Table, Text from sqlalchemy import ForeignKey from sqlalchemy.orm import relationship import pandas as pd from pyquant.libs.mysqllib import session from pyquant.libs.mysqllib import BaseModel as Base import pyquant.libs.utillib as utillib from pyquant.libs.cachelib import cache from pyquant.utils.monitor import listener, Monitor, addCache # Many-Many Relation symbolgroup_symbol = Table('symbolgroup_symbol', Base.metadata, Column('symbol_id', ForeignKey('symbol.id'), primary_key=True), Column('symbolgroup_id', ForeignKey('symbolgroup.id'), primary_key=True)) stockIndex_symbol = Table('stockIndex_symbol', Base.metadata, Column('stockIndex_id', ForeignKey('stockIndex.id'), primary_key=True), Column('symbol_id', ForeignKey('symbol.id'), primary_key=True)) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) username = Column(String) # fullname = Column(String) password = Column(String) # def __repr__(self): # return "<User(name='%s', fullname='%s', password='%s')>" % ( # self.name, self.fullname, self.password) class Symbol(Base): __tablename__ = 'symbol' __table_args__ = { 'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8', } id = Column(Integer, primary_key=True) exchange_id = Column(String) ticker = Column(String) instrument = Column(String) name = Column(String) sector = Column(String) symbolgroup = relationship('SymbolGroup',secondary = symbolgroup_symbol,back_populates = 'symbol') stockIndex = relationship('StockIndex',secondary = stockIndex_symbol,back_populates = 'symbol') def __repr__(self): return "<Symbol(id='%s', exchange_id='%s', ticker='%s', instrument='%s', name='%s', sector='%s')>" % ( self.id, self.exchange_id, self.ticker, self.instrument, self.name, self.sector) @classmethod # @listener(Monitor) def get_by_ticker(cls, ticker, index=False, lock_mode=None): cache_key = '%s-%s-%s-%s-%s' % (cls.__name__,'get_stock_by_ticker',ticker, index, lock_mode) cache_value = cache.get(cache_key) if cache_value: #如果有缓存,直接返回缓存 return cache_value query = session.query(cls) if lock_mode: query = query.with_lockmode(lock_mode) query = query.filter(cls.ticker==ticker, cls.instrument==('index' if index else 'stock')) obj = query.first() cache.set(cache_key, obj) return obj @staticmethod def get_list_by_symbolgroup_id(symbolgroup_id, limit=30, offset=0): return session.query(Symbol).filter(Symbol.symbolgroup.any(id=symbolgroup_id)).\ limit(limit).offset(offset).all() @classmethod def get_index_list(cls): return session.query(cls).filter(Symbol.instrument == 'index').all() @property def index(self): return True if self.instrument == 'index' else False def _test_get_all(): objs = Symbol.get_all(columns='id', limit=None) for row in objs: print(row) def _test_index(): symbol = Symbol.get_by_id(2408) print('index', symbol.index) class DailyPrice(Base): """ 每日价格 """ __tablename__ = 'dailyPrice' id = Column(Integer, primary_key=True) symbol_id = Column(Integer, ForeignKey('symbol.id')) price_date = Column(DateTime) open_price = Column(Float) high_price = Column(Float) low_price = Column(Float) close_price = Column(Float) volume = Column(Integer) symbol = relationship("Symbol",back_populates="daily_price") def __repr__(self): return "<Daily_price( symbol_id='%s', price_date='%s', o='%s', h='%s', l='%s', c='%s', v=='%s')>" % ( self.symbol_id, self.price_date, self.open_price, self.high_price, self.low_price, self.close_price, self.volume) def to_dict(self): obj = super(DailyPrice, self).to_dict() obj['price_date'] = str(obj['price_date']) return obj @classmethod # @listener(Monitor) def get_by_symbol_id(cls, symbol_id, fromdate=None, todate=None, output = 'dict', isCache=True): """ 根据symbol_id 查daily price :param symbol_id: :param fromdate: :param todate: :param output: :return: """ if not todate: todate = str(datetime.date.today()) cache_key = '%s-%s-%s-%s-%s-%s' % (cls.__name__, 'get_by_symbol_id', symbol_id, fromdate, todate, output) # print('cache key', cache_key) if isCache: #用cache cache_value = cache.get(cache_key) if isinstance(cache_value, pd.DataFrame): if not cache_value.empty: return cache_value if cache_value: #如果有缓存,直接返回缓存 return cache_value where = [] if isinstance(symbol_id, (list, tuple)): # where.append(DailyPrice.symbol_id.in_(symbol_id)) else: where.append(DailyPrice.symbol_id == symbol_id) if fromdate: where.append(DailyPrice.price_date >= fromdate) if todate: where.append(DailyPrice.price_date < todate) # print(where) if output == 'df': df = pd.read_sql(session.query(DailyPrice).filter(*where).statement, session.bind) del df['id'] del df['symbol_id'] #设置index df['price_date'] = df['price_date'].astype('datetime64[ns]') df = df.set_index('price_date') # columns 改名 df.columns = ['open', 'high', 'low','close', 'volume'] # 更换columns顺序 cols = ['open', 'high', 'close', 'low', 'volume'] objs = df.ix[:, cols] elif output == 'dict': # print('保存缓存', cache_key) objs = [row.to_dict() for row in session.query(DailyPrice).filter(*where).all()] else: #models objs = session.query(DailyPrice).filter(*where).all() cache.set(cache_key, objs) return objs def _test_multi_symbol(): print(DailyPrice.get_by_symbol_id([17,18], fromdate='2017-01-01', output='df')[:5]) class StockIndex(Base): __tablename__ = 'stockIndex' id = Column(Integer, primary_key=True) name = Column(String) symbol_id = Column(Integer, ForeignKey('symbol.id')) symbol = relationship('Symbol', secondary=stockIndex_symbol, back_populates='stockIndex') def _test_get_symbols(): stockindex = StockIndex.get_by_id(2) print(stockindex.symbol) class SymbolGroup(Base): __tablename__ = 'symbolgroup' id = Column(Integer, primary_key=True) name = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User", back_populates="symbolgroup") symbol = relationship('Symbol', secondary=symbolgroup_symbol, back_populates='symbolgroup') @staticmethod def get_system_groups(): return session.query(__class__).filter(SymbolGroup.user_id == 0).all() class Strategy(Base): __tablename__ = 'strategy' id = Column(Integer, primary_key=True) name = Column(String) user_id = Column(Integer, ForeignKey('user.id')) filePath = Column(String) desc = Column(String) # One-Many Relations Symbol.daily_price = relationship("DailyPrice", back_populates="symbol") User.symbolgroup = relationship("SymbolGroup", back_populates="user") def _query_relation(): query = session.query(DailyPrice).filter(DailyPrice.id == '7798525').all() pprint.pprint([row.to_dict() for row in query]) def _query_join(): pprint.pprint(session.query(DailyPrice).join(Symbol).filter(Symbol.id == 2433).limit(10).all()) def _symbol_find_all(): # print(Symbol.find_all((Symbol.id > 200),10)) print(Symbol.find_all(limit = 10)) def _add_user_symbolgroup(): user = User(username='new user', password='123') print(user) sg = SymbolGroup(name='上证50') user.symbolgroup = [sg] session.add(user) session.commit() def _test_m_m_relation1(): # sd = SymbolGroup.get_by_id(3) # print(sd.symbol) si = StockIndex.get_by_id(2) print(si.symbol) def _test_add_m_m_relation(): sd = SymbolGroup.get(3) sd.symbol.append(Symbol.get(19)) session.commit() def _test_delete_m_m_relation(): sd = SymbolGroup.get(3) sd.symbol.remove(Symbol.get(19)) session.commit() def _test_add_user(): user = User(username='new user222', password='123') session.add(user) session.commit() if __name__ == '__main__': """""" import pprint # _query() # # _get() # print(Symbol.get_by_ticker('000001', True)) # query_relation() # _query_join() # _symbol_find_all() # print(Symbol.query().limit(10).all()) # print(User.get(1).to_dict()) # print(Symbol.get(17).to_dict()) # query = session.query(Symbol). \ # filter(Symbol.symbolgroup.any(id=3)). \ # all() # # print(query) # _test_add_m_m_relation() # _test_delete_m_m_relation() # print(session.query(SymbolGroup).filter(SymbolGroup.user_id == 0).all()) # _symbol_find_all() # print(DailyPrice.get_by_id(100).to_dict()) # _test_m_m_relation1() # print(StockIndex.get_all()) # _test_m_m_relation1() # print(Symbol.get_stock_by_ticker('000001', index=True)) # print(Symbol.get_by_id(20)) # print(DailyPrice.get_by_symbol_id(17, fromdate='2017-01-01', output='df')) # _test_multi_symbol() # arr = [12,34] # s = 'key:%s' % arr # print(s) # _test_get_max_date() # _test_get_all() # _test_index() # _test_m_m_relation1() # _test_add_user() _test_get_symbols()
26.655914
125
0.640581
c146650f80b6d9b0345e1e6e8e2d34975d1f6326
1,149
py
Python
api/clean/sequence_num.py
Latent-Lxx/dazhou-dw
902b4b625cda4c9e4eb205017b8955b81f37a0b5
[ "MIT" ]
null
null
null
api/clean/sequence_num.py
Latent-Lxx/dazhou-dw
902b4b625cda4c9e4eb205017b8955b81f37a0b5
[ "MIT" ]
null
null
null
api/clean/sequence_num.py
Latent-Lxx/dazhou-dw
902b4b625cda4c9e4eb205017b8955b81f37a0b5
[ "MIT" ]
1
2022-02-11T04:44:37.000Z
2022-02-11T04:44:37.000Z
# !/usr/bin/python3 # -*- coding: utf-8 -*- # @Time : 2021/7/19 下午7:00 # @Author : Latent # @Email : [email protected] # @File : sequence_num.py # @Software: PyCharm # @class : 对于库存的清洗 """ 字段说明: 1.inventory_id ---->数据库自增 2.num ---> 当前库存 3.num_level ---> 库存等级 """ class Sequence_Num(object): # 1. 库存等级换算 ------> 库存0-50->紧张 50-100 -> 正常 100以上充足 @classmethod def sequence_num_level(cls, data): platform = data['platform'] if platform != 'pdd': _func_none = (lambda x: x if type(x) == int else 0) item_num = int(_func_none(data['public']['num'])) if item_num <= 50: num_level = '紧张' elif 50 < item_num <= 100: num_level = '正常' else: num_level = '充足' else: item_num = int(data['public']['num']) if item_num <= 300: num_level = '紧张' elif 300 < item_num <= 999: num_level = '正常' else: num_level = '充足' num_info = {'num': item_num, 'num_level': num_level} return num_info
24.978261
63
0.491732
c1e4b11d7e74da3041c02009a83ef0d1d92d36f0
1,936
py
Python
main.py
alexpod1000/TF_PoseNet
0329a16275ec974d660e99564949ca95d71389ff
[ "MIT" ]
1
2020-03-04T02:32:07.000Z
2020-03-04T02:32:07.000Z
main.py
alexpod1000/TF_PoseNet
0329a16275ec974d660e99564949ca95d71389ff
[ "MIT" ]
null
null
null
main.py
alexpod1000/TF_PoseNet
0329a16275ec974d660e99564949ca95d71389ff
[ "MIT" ]
null
null
null
import cv2 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import matplotlib matplotlib.use('TkAgg') from utils.model_utils import perform_prediction, decode_predictions parts = [ "nose", "leftEye", "rightEye", "leftEar", "rightEar", "leftShoulder", "rightShoulder", "leftElbow", "rightElbow", "leftWrist", "rightWrist", "leftHip", "rightHip", "leftKnee", "rightKnee", "leftAnkle", "rightAnkle" ] min_conf_score = 0.2 model_path = 'models/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite' # Resolution = ((InputImageSize - 1) / OutputStride) + 1 # (513 - 1 / 32) + 1 = 17 (our case), so we are using the "worst", accuracy wise interpreter = tf.lite.Interpreter(model_path=model_path) image = cv2.imread('images/1.jpg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) heatmaps, offsets, displacements_fwd, displacements_bwd, resized_image = perform_prediction(image, interpreter) keypoints = decode_predictions(heatmaps, offsets, output_stride=32) resize_y_ratio = image.shape[0]/resized_image.shape[0] resize_x_ratio = image.shape[1]/resized_image.shape[1] image_cpy = np.copy(image) pose_conf = np.array([keypoint["confidence"] for keypoint in keypoints]).mean() for keypoint in keypoints: scale = 5 # rescale to original (not resized by model) image coordinates pos_y = int(keypoint["y"] * resize_y_ratio) pos_x = int(keypoint["x"] * resize_x_ratio) confidence_score = keypoint["confidence"] if confidence_score > min_conf_score: cv2.circle(image_cpy, (pos_x, pos_y), scale, (255, 0, 0), thickness=cv2.FILLED) cv2.putText(image_cpy, parts[keypoint["part_index"]], (pos_x, pos_y), 0, 0.4, (0, 255, 0)) print("Confidence for {}: {}".format(parts[keypoint["part_index"]], confidence_score)) print("Confidence for pose {}".format(pose_conf)) plt.imshow(image_cpy) plt.show()
28.470588
111
0.708678
a9b755b3d21103a71c140e3ae9bbb69470f88938
2,157
py
Python
7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
7-assets/past-student-repos/LambdaSchool-master/m7/71a1/hashtable/test_hashtable_no_collisions.py
eengineergz/Lambda
1fe511f7ef550aed998b75c18a432abf6ab41c5f
[ "MIT" ]
null
null
null
""" This is the same test, but with big hash tables that are _unlikely_ to have collisions after the 3 inserts we do. Does not collide with DJB2 or FNV-1-64. But could collide with other hashes. """ import unittest from hashtable import HashTable class TestHashTable(unittest.TestCase): def test_hash_table_insertion_and_retrieval(self): ht = HashTable(0x10000) ht.put("key-0", "val-0") ht.put("key-1", "val-1") ht.put("key-2", "val-2") return_value = ht.get("key-0") self.assertTrue(return_value == "val-0") return_value = ht.get("key-1") self.assertTrue(return_value == "val-1") return_value = ht.get("key-2") self.assertTrue(return_value == "val-2") def test_hash_table_pution_overwrites_correctly(self): ht = HashTable(0x10000) ht.put("key-0", "val-0") ht.put("key-1", "val-1") ht.put("key-2", "val-2") ht.put("key-0", "new-val-0") ht.put("key-1", "new-val-1") ht.put("key-2", "new-val-2") return_value = ht.get("key-0") self.assertTrue(return_value == "new-val-0") return_value = ht.get("key-1") self.assertTrue(return_value == "new-val-1") return_value = ht.get("key-2") self.assertTrue(return_value == "new-val-2") def test_hash_table_removes_correctly(self): ht = HashTable(0x10000) ht.put("key-0", "val-0") ht.put("key-1", "val-1") ht.put("key-2", "val-2") return_value = ht.get("key-0") self.assertTrue(return_value == "val-0") return_value = ht.get("key-1") self.assertTrue(return_value == "val-1") return_value = ht.get("key-2") self.assertTrue(return_value == "val-2") ht.delete("key-2") ht.delete("key-1") ht.delete("key-0") return_value = ht.get("key-0") self.assertTrue(return_value is None) return_value = ht.get("key-1") self.assertTrue(return_value is None) return_value = ht.get("key-2") self.assertTrue(return_value is None) if __name__ == '__main__': unittest.main()
29.958333
76
0.590635
8243304ed31524da22fd730f6960d925ef517d1c
4,232
py
Python
vkapp/bot/migrations/0001_initial.py
ParuninPavel/lenta4_hack
6d3340201deadf5757e37ddd7cf5580b928d7bda
[ "MIT" ]
1
2017-11-23T13:33:13.000Z
2017-11-23T13:33:13.000Z
vkapp/bot/migrations/0001_initial.py
ParuninPavel/lenta4_hack
6d3340201deadf5757e37ddd7cf5580b928d7bda
[ "MIT" ]
null
null
null
vkapp/bot/migrations/0001_initial.py
ParuninPavel/lenta4_hack
6d3340201deadf5757e37ddd7cf5580b928d7bda
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2017-10-21 13:14 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Admin', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ], ), migrations.CreateModel( name='AdminReview', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('rating', models.IntegerField()), ('date_time', models.DateTimeField(auto_now_add=True)), ('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Admin')), ], ), migrations.CreateModel( name='Blogger', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('balance', models.FloatField(default=0)), ], ), migrations.CreateModel( name='Income', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('type', models.CharField(choices=[('PROP', 'Предложение новости'), ('PUB', 'Опубликование новости')], default='PROP', max_length=4)), ('amount', models.FloatField(default=0)), ('date_time', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='News', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('link', models.CharField(blank=True, max_length=300, null=True)), ('media', models.CharField(blank=True, max_length=3000, null=True)), ('date_time', models.DateTimeField(auto_now_add=True)), ('blogger', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bot.Blogger')), ], ), migrations.CreateModel( name='Payment', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('mount', models.FloatField(default=0)), ('date_time', models.DateTimeField(auto_now_add=True)), ('blogger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Blogger')), ], ), migrations.CreateModel( name='Publication', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('date_time', models.DateTimeField(auto_now_add=True)), ('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.Admin')), ('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News')), ], ), migrations.CreateModel( name='VKUser', fields=[ ('vk_id', models.IntegerField(primary_key=True, serialize=False)), ], ), migrations.AddField( model_name='payment', name='payer', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'), ), migrations.AddField( model_name='income', name='news', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News'), ), migrations.AddField( model_name='blogger', name='vk_user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'), ), migrations.AddField( model_name='adminreview', name='news', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.News'), ), migrations.AddField( model_name='admin', name='vk_user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bot.VKUser'), ), ]
39.185185
150
0.553639
6b7e59d167fcf735ba0f1030602e287f95102618
112
py
Python
comp/microsoft/has_solution/009_longest_semi_alernating_substr.py
cc13ny/all-in
bc0b01e44e121ea68724da16f25f7e24386c53de
[ "MIT" ]
1
2017-05-18T06:11:02.000Z
2017-05-18T06:11:02.000Z
comp/microsoft/has_solution/009_longest_semi_alernating_substr.py
cc13ny/all-in
bc0b01e44e121ea68724da16f25f7e24386c53de
[ "MIT" ]
1
2016-02-09T06:00:07.000Z
2016-02-09T07:20:13.000Z
comp/microsoft/has_solution/009_longest_semi_alernating_substr.py
cc13ny/all-in
bc0b01e44e121ea68724da16f25f7e24386c53de
[ "MIT" ]
2
2019-06-27T09:07:26.000Z
2019-07-01T04:40:13.000Z
''' same as 005. - 002: the substring - 005: the length ''' class Solution: def longest_substr(self, s):
10.181818
32
0.633929
d407e07335c75b4a785c579550380e60429aee7c
4,724
py
Python
research/cv/Pix2Pix/src/utils/config.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
1
2021-11-18T08:17:44.000Z
2021-11-18T08:17:44.000Z
research/cv/Pix2Pix/src/utils/config.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
null
null
null
research/cv/Pix2Pix/src/utils/config.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
2
2019-09-01T06:17:04.000Z
2019-10-04T08:39:45.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =========================================================================== """ Define the common options that are used in both training and test. """ import argparse import ast def get_args(): ''' get args. ''' parser = argparse.ArgumentParser(description='Pix2Pix Model') # parameters parser.add_argument('--device_target', type=str, default='Ascend', choices=('Ascend', 'GPU'), help='device where the code will be implemented (default: Ascend)') parser.add_argument('--run_distribute', type=int, default=0, help='distributed training, default is 0.') parser.add_argument('--device_num', type=int, default=1, help='device num, default is 1.') parser.add_argument('--device_id', type=int, default=6, help='device id, default is 0.') parser.add_argument('--save_graphs', type=ast.literal_eval, default=False, help='whether save graphs, default is False.') parser.add_argument('--init_type', type=str, default='normal', help='network initialization, default is normal.') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal, default is 0.02.') parser.add_argument('--pad_mode', type=str, default='CONSTANT', choices=('CONSTANT', 'REFLECT', 'SYMMETRIC'), help='scale images to this size, default is CONSTANT.') parser.add_argument('--load_size', type=int, default=286, help='scale images to this size, default is 286.') parser.add_argument('--batch_size', type=int, default=1, help='batch_size, default is 1.') parser.add_argument('--LAMBDA_Dis', type=float, default=0.5, help='weight for Discriminator Loss, default is 0.5.') parser.add_argument('--LAMBDA_GAN', type=int, default=1, help='weight for GAN Loss, default is 1.') parser.add_argument('--LAMBDA_L1', type=int, default=100, help='weight for L1 Loss, default is 100.') parser.add_argument('--beta1', type=float, default=0.5, help='adam beta1, default is 0.5.') parser.add_argument('--beta2', type=float, default=0.999, help='adam beta2, default is 0.999.') parser.add_argument('--lr', type=float, default=0.0002, help='the initial learning rate, default is 0.0002.') parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy, default is linear.') parser.add_argument('--epoch_num', type=int, default=200, help='epoch number for training, default is 200.') parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate, default is 100.') parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs with the dynamic learning rate, default is 100.') parser.add_argument('--dataset_size', type=int, default=400, choices=(400, 1096), help='for Facade_dataset,the number is 400; for Maps_dataset,the number is 1096.') # The location of input and output data parser.add_argument('--train_data_dir', type=str, default=None, help='the file path of input data during training.') parser.add_argument('--val_data_dir', type=str, default=None, help='the file path of input data during validating.') parser.add_argument('--train_fakeimg_dir', type=str, default='./results/fake_img/', help='during training, the file path of stored fake img.') parser.add_argument('--loss_show_dir', type=str, default='./results/loss_show', help='during training, the file path of stored loss img.') parser.add_argument('--ckpt_dir', type=str, default='./results/ckpt/', help='during training, the file path of stored CKPT.') parser.add_argument('--ckpt', type=str, default=None, help='during validating, the file path of the CKPT used.') parser.add_argument('--predict_dir', type=str, default='./results/predict/', help='during validating, the file path of Generated image.') args = parser.parse_args() return args
63.837838
120
0.671253
d474d5eb551dacae95e039d18d26a387b48c2cc2
2,903
py
Python
indl/metrics.py
SachsLab/indl
531d2e0c2ee765004aedc553af40e258262f86cb
[ "Apache-2.0" ]
1
2021-02-22T01:39:50.000Z
2021-02-22T01:39:50.000Z
indl/metrics.py
SachsLab/indl
531d2e0c2ee765004aedc553af40e258262f86cb
[ "Apache-2.0" ]
null
null
null
indl/metrics.py
SachsLab/indl
531d2e0c2ee765004aedc553af40e258262f86cb
[ "Apache-2.0" ]
null
null
null
from typing import List __all__ = ['dprime', 'quickplot_history'] def dprime(y_true, y_pred, pmarg: float = 0.01, outputs: List[str] = ['dprime', 'bias', 'accuracy']) -> tuple: """ Calculate D-Prime for binary data. 70% for both classes is d=1.0488. Highest possible is 6.93, but effectively 4.65 for 99% http://www.birmingham.ac.uk/Documents/college-les/psych/vision-laboratory/sdtintro.pdf This function is not designed to behave as a valid 'Tensorflow metric'. Args: y_true (array-like): True labels. y_pred (array-like): Predicted labels. pmarg: outputs: list of outputs among 'dprime', 'bias', 'accuracy' Returns: Calculated d-prime value. """ import numpy as np from scipy.stats import norm # TODO: Adapt this function for tensorflow # y_pred = ops.convert_to_tensor(y_pred) # y_true = math_ops.cast(y_true, y_pred.dtype) # return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1) # TODO: Check that true_y only has 2 classes, and test_y is entirely within true_y classes. b_true = y_pred == y_true b_pos = np.unique(y_true, return_inverse=True)[1].astype(bool) true_pos = np.sum(np.logical_and(b_true, b_pos)) true_neg = np.sum(np.logical_and(b_true, ~b_pos)) false_pos = np.sum(np.logical_and(~b_true, b_pos)) false_neg = np.sum(np.logical_and(~b_true, ~b_pos)) tpr = true_pos / (true_pos + false_neg) tpr = max(pmarg, min(tpr, 1-pmarg)) fpr = false_pos / (false_pos + true_neg) fpr = max(pmarg, min(fpr, 1 - pmarg)) ztpr = norm.ppf(tpr, loc=0, scale=1) zfpr = norm.ppf(fpr, loc=0, scale=1) # Other measures of performance: # sens = tp ./ (tp+fp) # spec = tn ./ (tn+fn) # balAcc = (sens+spec)/2 # informedness = sens+spec-1 output = tuple() for out in outputs: if out == 'dprime': dprime = ztpr - zfpr output += (dprime,) elif out == 'bias': bias = -(ztpr + zfpr) / 2 output += (bias,) elif out == 'accuracy': accuracy = 100 * (true_pos + true_neg) / (true_pos + false_pos + false_neg + true_neg) output += (accuracy,) return output def quickplot_history(history) -> None: """ A little helper function to do a quick plot of model fit results. Args: history (tf.keras History): """ import matplotlib.pyplot as plt if hasattr(history, 'history'): history = history.history hist_metrics = [_ for _ in history.keys() if not _.startswith('val_')] for m_ix, m in enumerate(hist_metrics): plt.subplot(len(hist_metrics), 1, m_ix + 1) plt.plot(history[m], label='Train') plt.plot(history['val_' + m], label='Valid.') plt.xlabel('Epoch') plt.ylabel(m) plt.legend() plt.tight_layout() plt.show()
31.554348
110
0.617637
2e0c32714f6d997eca9a7323c26f7d7c44b13150
1,007
py
Python
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py
Kreijeck/learning
eaffee08e61f2a34e01eb8f9f04519aac633f48c
[ "MIT" ]
null
null
null
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py
Kreijeck/learning
eaffee08e61f2a34e01eb8f9f04519aac633f48c
[ "MIT" ]
null
null
null
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch02_math/ex03_perfectnumber_test.py
Kreijeck/learning
eaffee08e61f2a34e01eb8f9f04519aac633f48c
[ "MIT" ]
null
null
null
# Beispielprogramm für das Buch "Python Challenge" # # Copyright 2020 by Michael Inden import pytest from ch02_math.solutions.ex03_perfectnumber import is_perfect_number_simple, calc_perfect_numbers, is_perfect_number_based_on_proper_divisors @pytest.mark.parametrize("n, expected", [(6, True), (28, True), (496, True), (8128, True)]) def test_is_perfect_number_simple(n, expected): assert is_perfect_number_simple(n) == expected @pytest.mark.parametrize("n, expected", [(50, [6, 28]), (1000, [6, 28, 496]), (10000, [6, 28, 496, 8128])]) def test_calc_perfect_numbers(n, expected): assert calc_perfect_numbers(n) == expected @pytest.mark.parametrize("n, expected", [ (6, True), (28, True), (496, True), (8128, True)]) def test_is_perfect_number_based_on_proper_divisors(n, expected): assert is_perfect_number_based_on_proper_divisors(n) == expected
34.724138
141
0.647468
d84fb925e5f198a242b49a3625906021e8cf6205
3,412
py
Python
src/torch/npu/random.py
Ascend/pytorch
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
[ "BSD-3-Clause" ]
1
2021-12-02T03:07:35.000Z
2021-12-02T03:07:35.000Z
src/torch/npu/random.py
Ascend/pytorch
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
[ "BSD-3-Clause" ]
1
2021-11-12T07:23:03.000Z
2021-11-12T08:28:13.000Z
src/torch/npu/random.py
Ascend/pytorch
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2020 Huawei Technologies Co., Ltd # Copyright (c) 2019, Facebook CORPORATION. # All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from . import _lazy_init, _lazy_call, device_count, current_device __all__ = ['manual_seed', 'manual_seed_all', 'seed', 'seed_all', 'initial_seed'] def manual_seed(seed): r"""Sets the seed for generating random numbers for the current NPU. It's safe to call this function if NPU is not available; in that case, it is silently ignored. Args: seed (int): The desired seed. .. warning:: If you are working with a multi-NPU model, this function is insufficient to get determinism. To seed all NPUs, use :func:`manual_seed_all`. """ seed = int(seed) def cb(): idx = current_device() default_generator = torch.npu.default_generators[idx] default_generator.manual_seed(seed) _lazy_call(cb) def manual_seed_all(seed): r"""Sets the seed for generating random numbers on all NPUs. It's safe to call this function if NPU is not available; in that case, it is silently ignored. Args: seed (int): The desired seed. """ seed = int(seed) def cb(): for i in range(device_count()): default_generator = torch.npu.default_generators[i] default_generator.manual_seed(seed) _lazy_call(cb) def seed(): r"""Sets the seed for generating random numbers to a random number for the current NPU. It's safe to call this function if NPU is not available; in that case, it is silently ignored. .. warning:: If you are working with a multi-NPU model, this function will only initialize the seed on one NPU. To initialize all NPUs, use :func:`seed_all`. """ def cb(): idx = current_device() default_generator = torch.npu.default_generators[idx] default_generator.seed() _lazy_call(cb) def seed_all(): r"""Sets the seed for generating random numbers to a random number on all NPUs. It's safe to call this function if NPU is not available; in that case, it is silently ignored. """ def cb(): random_seed = 0 seeded = False for i in range(device_count()): default_generator = torch.npu.default_generators[i] if not seeded: default_generator.seed() random_seed = default_generator.initial_seed() seeded = True else: default_generator.manual_seed(random_seed) _lazy_call(cb) def initial_seed(): r"""Returns the current random seed of the current NPU. .. warning:: This function eagerly initializes NPU. """ _lazy_init() idx = current_device() default_generator = torch.npu.default_generators[idx] return default_generator.initial_seed()
30.738739
91
0.668816
d8b8db492830d698e6b17933d3730996e12d5da3
4,114
py
Python
Scarky2/builder/views.py
kopringo/Scarky2
93c59cd31113749045caff68274f779a61360167
[ "MIT" ]
null
null
null
Scarky2/builder/views.py
kopringo/Scarky2
93c59cd31113749045caff68274f779a61360167
[ "MIT" ]
null
null
null
Scarky2/builder/views.py
kopringo/Scarky2
93c59cd31113749045caff68274f779a61360167
[ "MIT" ]
null
null
null
#-*- coding: utf-8 -*- from django.shortcuts import render from django.http.response import HttpResponseRedirect, HttpResponse from django.conf import settings from django.core.urlresolvers import reverse import uuid import json from models import Problem, Language # Create your views here. def home(request): return HttpResponseRedirect(reverse('builder', args=['new',])) def builder(request, pid): params = {} secret = '' problem = None if pid == 'new': params['new'] = True if request.POST: user = None if request.user.is_authenticated(): user = request.user problem = Problem.create_problem(user) problem.name = request.POST.get('name', '') problem.content = request.POST.get('content', '') problem.save() if request.is_ajax(): return HttpResponse(json.dumps({'pid': problem.code, 'secret': problem.secret}), content_type='application/json') else: return HttpResponseRedirect('%s?secret=%s' % (reverse('problem', args=[problem.code]), problem.secret)) else: secret = request.GET.get('secret', request.POST.get('secret', '')) try: problem = Problem.objects.get(code=pid) if (not request.user.is_authenticated() and secret != problem.secret) or \ (request.user.is_authenticated() and problem.user != request.user): raise Exception('access-denied') except Problem.DoesNotExist as e: return HttpResponseRedirect('/?not-found') except Exception as e: return HttpResponseRedirect('/?access-denied') if request.POST: name = request.POST.get('name', '') content = request.POST.get('content', '') input = request.POST.get('input', '') output = request.POST.get('output', '') problem.name = name problem.content = content problem.input = input problem.output = output problem.save() return HttpResponseRedirect('/builder/%s?secret=%s' % (pid, problem.secret)) languages = Language.objects.all().filter(visible=True) if len(languages) == 0: Language.sync_languages() languages = Language.objects.all().filter(visible=True) params['languages'] = languages params['problem'] = problem params['problem_code'] = pid params['problem_secret'] = secret return render(request, 'builder/home.html', params) def builder_upload(request): file = 'sdf' return HttpResponse(json.dumps({'file': file}), content_type='application/json') def problem(request, pid): params = {} try: problem = Problem.objects.get(code=pid) except Problem.DoesNotExist: return HttpResponseRedirect(reverse('problems')) #if problem.secret != request.GET.get('secret', '~!@#$%^#@#$@#!@!...'): # pass # jesli jest secret to edycja i statystyki params['problem'] = problem params['host'] = request.META['HTTP_HOST'] return render(request, 'builder/problem.html', params) def widget_js(request, pid): params = {pid: pid} try: problem = Problem.objects.get(code=pid) except Problem.DoesNotExist: pass params['host'] = request.META['HTTP_HOST'] params['problem'] = problem return render(request, 'builder/widget_js.html', params) def widget(request, pid): params = {} try: problem = Problem.objects.get(code=pid) except Problem.DoesNotExist: pass return render(request, 'builder/widget.html', params) def problems(request): params = {} return render(request, 'builder/problems.html', params) def api_1_problems(request): pass def api_1_problem(request, pid): pass def api_1_submissions(request, pid): pass def api_1_submission(request, pid, sid): pass
28.769231
129
0.59893
993259bb8f1d2c008f6332049af9e6a0cef4bdf4
10,355
py
Python
paddlenlp/transformers/ppminilm/tokenizer.py
mukaiu/PaddleNLP
0315365dbafa6e3b1c7147121ba85e05884125a5
[ "Apache-2.0" ]
null
null
null
paddlenlp/transformers/ppminilm/tokenizer.py
mukaiu/PaddleNLP
0315365dbafa6e3b1c7147121ba85e05884125a5
[ "Apache-2.0" ]
null
null
null
paddlenlp/transformers/ppminilm/tokenizer.py
mukaiu/PaddleNLP
0315365dbafa6e3b1c7147121ba85e05884125a5
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle import six import shutil from paddle.utils import try_import from paddlenlp.utils.env import MODEL_HOME from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer __all__ = ['PPMiniLMTokenizer'] class PPMiniLMTokenizer(PretrainedTokenizer): r""" Constructs an PPMiniLM tokenizer. It uses a basic tokenizer to do punctuation splitting, lower casing and so on, and follows a WordPiece tokenizer to tokenize as subwords. This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer` which contains most of the main methods. For more information regarding those methods, please refer to this superclass. Args: vocab_file (str): The vocabulary file path (ends with '.txt') required to instantiate a `WordpieceTokenizer`. do_lower_case (str, optional): Whether or not to lowercase the input when tokenizing. Defaults to`True`. unk_token (str, optional): A special token representing the *unknown (out-of-vocabulary)* token. An unknown token is set to be `unk_token` inorder to be converted to an ID. Defaults to "[UNK]". sep_token (str, optional): A special token separating two different sentences in the same input. Defaults to "[SEP]". pad_token (str, optional): A special token used to make arrays of tokens the same size for batching purposes. Defaults to "[PAD]". cls_token (str, optional): A special token used for sequence classification. It is the last token of the sequence when built with special tokens. Defaults to "[CLS]". mask_token (str, optional): A special token representing a masked token. This is the token used in the masked language modeling task which the model tries to predict the original unmasked ones. Defaults to "[MASK]". Examples: .. code-block:: from paddlenlp.transformers import PPMiniLMTokenizer tokenizer = PPMiniLMTokenizer.from_pretrained('ppminilm-6l-768h') encoded_inputs = tokenizer('He was a puppeteer') # encoded_inputs: # { 'input_ids': [1, 4444, 4385, 1545, 6712, 10062, 9568, 9756, 9500, 2], # 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # } """ resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained pretrained_resource_files_map = { "vocab_file": { "ppminilm-6l-768h": "https://bj.bcebos.com/paddlenlp/models/transformers/ppminilm-6l-768h/vocab.txt", } } pretrained_init_configuration = { "ppminilm-6l-768h": { "do_lower_case": True }, } def __init__(self, vocab_file, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the " "vocabulary from a pretrained model please use " "`tokenizer = PPMiniLMTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" .format(vocab_file)) self.do_lower_case = do_lower_case self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token) self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=unk_token) @property def vocab_size(self): """ Return the size of vocabulary. Returns: int: The size of vocabulary. """ return len(self.vocab) def _tokenize(self, text): r""" End-to-end tokenization for PPMiniM models. Args: text (str): The text to be tokenized. Returns: List[str]: A list of string representing converted tokens. """ split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_string(self, tokens): r""" Converts a sequence of tokens (list of string) in a single string. Since the usage of WordPiece introducing `##` to concat subwords, also remove `##` when converting. Args: tokens (List[str]): A list of string representing tokens to be converted. Returns: str: Converted string from tokens. Examples: .. code-block:: from paddlenlp.transformers import PPMiniLMTokenizer tokenizer = PPMiniLMTokenizer.from_pretrained('ppminilm-6l-768h') tokens = tokenizer.tokenize('He was a puppeteer') strings = tokenizer.convert_tokens_to_string(tokens) #he was a puppeteer """ out_string = " ".join(tokens).replace(" ##", "").strip() return out_string def num_special_tokens_to_add(self, pair=False): r""" Returns the number of added tokens when encoding a sequence with special tokens. Note: This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this inside your training loop. Args: pair (bool, optional): Whether the input is a sequence pair or a single sequence. Defaults to `False` and the input is a single sequence. Returns: int: Number of tokens added to sequences """ token_ids_0 = [] token_ids_1 = [] return len( self.build_inputs_with_special_tokens( token_ids_0, token_ids_1 if pair else None)) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): r""" Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (List[int]): List of IDs to which the special tokens will be added. token_ids_1 (List[int], optional): Optional second list of IDs for sequence pairs. Defaults to `None`. Returns: List[int]: List of input_id with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] _cls = [self.cls_token_id] _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + token_ids_1 + _sep def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None): r""" Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An offset_mapping has the following format: - single sequence: ``(0,0) X (0,0)`` - pair of sequences: ``(0,0) A (0,0) B (0,0)`` Args: offset_mapping_ids_0 (List[tuple]): List of char offsets to which the special tokens will be added. offset_mapping_ids_1 (List[tuple], optional): Optional second list of wordpiece offsets for offset mapping pairs. Defaults to `None`. Returns: List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens. """ if offset_mapping_1 is None: return [(0, 0)] + offset_mapping_0 + [(0, 0)] return [(0, 0)] + offset_mapping_0 + [(0, 0) ] + offset_mapping_1 + [(0, 0)] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): r""" Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (List[int]): A list of `inputs_ids` for the first sequence. token_ids_1 (List[int], optional): Optional second list of IDs for sequence pairs. Defaults to `None`. Returns: List[int]: List of token_type_id according to the given sequence(s). """ _sep = [self.sep_token_id] _cls = [self.cls_token_id] if token_ids_1 is None: return len(_cls + token_ids_0 + _sep) * [0] return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + _sep) * [1]
38.494424
119
0.592854
cfd871f28f6c1b0bbbc8ffe42207aa261daa1915
549
py
Python
Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py
mxsph/Data-Analytics
c82ff54b78f50b6660d7640bfee96ea68bef598f
[ "MIT" ]
3
2020-08-24T19:02:09.000Z
2021-05-27T20:22:41.000Z
Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py
mxsph/Data-Analytics
c82ff54b78f50b6660d7640bfee96ea68bef598f
[ "MIT" ]
342
2020-08-13T10:24:23.000Z
2021-08-12T14:01:52.000Z
Organisation/Recherche/Datenvisualisierung/matplotlib_plot/Wetterplots_Giessen.py
visuanalytics/visuanalytics
f9cce7bc9e3227568939648ddd1dd6df02eac752
[ "MIT" ]
8
2020-09-01T07:11:18.000Z
2021-04-09T09:02:11.000Z
import matplotlib.pyplot as plt # Daten zum Plotten hoch = [11, 11, 12, 14, 18, 22, 20, 23, 23, 21, 20, 23, 11, 11, 16, 23, 24, 23, 15, 17, 18, 21, 22, 22, 16, 20, 22, 17, 18, 16] tief = [-5, -3, -2, 1, 1, 4, 6, 5, 6, 2, 5, 2, -2, -2, 1, 6, 8, 8, 5, 6, 7, 5, 3, 4, 2, 3, 6, 8, 7, 5] tage = list(range(1, 31)) plt.plot(tage, hoch, ":r", tage, hoch, "or", tage, tief, ":b", tage, tief, "ob") plt.xlabel("Tag im April 2020") plt.ylabel("Temperatur in Grad Celsius") plt.title("Temperaturen im April 2020") plt.grid() plt.show()
36.6
120
0.54827
cfe40386467f5a82bc05967bf49df0d025067384
10,250
py
Python
src/onegov/election_day/upgrade.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/election_day/upgrade.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/election_day/upgrade.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
""" Contains upgrade tasks that are executed when the application is being upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`. """ from onegov.ballot import Election from onegov.ballot import Vote from onegov.core.orm.types import JSON from onegov.core.orm.types import UTCDateTime from onegov.core.upgrade import upgrade_task from onegov.election_day.collections import ArchivedResultCollection from onegov.election_day.models import ArchivedResult from onegov.election_day.models import Subscriber from sqlalchemy import Column from sqlalchemy import Enum from sqlalchemy import Text @upgrade_task('Create archived results') def create_archived_results(context): """ Create an initial archived result entry for all existing votes and elections. Because we don't have a real request here, the generated URL are wrong! To fix the links, login after the update and call the 'update-results' view. """ ArchivedResultCollection(context.session).update_all(context.request) @upgrade_task('Add ID to archived results') def add_id_to_archived_results(context): """ Add the IDs of the elections/votes as meta information to the results. Normally, the right election and vote should be found. To be sure, you call the 'update-results' view to ensure that everything is right. """ session = context.session results = session.query(ArchivedResult) results = results.filter(ArchivedResult.schema == context.app.schema) for result in results: if result.type == 'vote': vote = session.query(Vote).filter( Vote.date == result.date, Vote.domain == result.domain, Vote.shortcode == result.shortcode, Vote.title_translations == result.title_translations ).first() if vote and vote.id in result.url: result.external_id = vote.id if result.type == 'election': election = session.query(Election).filter( Election.date == result.date, Election.domain == result.domain, Election.shortcode == result.shortcode, Election.title_translations == result.title_translations, Election.counted_entities == result.counted_entities, Election.total_entities == result.total_entities, ).first() if election and election.id in result.url: result.external_id = election.id @upgrade_task('Update vote progress') def update_vote_progress(context): """ Recalculate the vote progress for the archived results. """ session = context.session results = session.query(ArchivedResult) results = results.filter( ArchivedResult.schema == context.app.schema, ArchivedResult.type == 'vote' ) for result in results: vote = session.query(Vote).filter_by(id=result.external_id) vote = vote.first() if vote: result.counted_entities, result.total_entities = vote.progress @upgrade_task('Add elected candidates to archived results') def add_elected_candidates(context): """ Adds the elected candidates to the archived results, """ session = context.session results = session.query(ArchivedResult) results = results.filter( ArchivedResult.schema == context.app.schema, ArchivedResult.type == 'election' ) for result in results: election = session.query(Election).filter_by(id=result.external_id) election = election.first() if election: result.elected_candidates = election.elected_candidates @upgrade_task('Add content columns to archived results') def add_content_columns_to_archived_results(context): if not context.has_column('archived_results', 'content'): context.operations.add_column( 'archived_results', Column('content', JSON) ) @upgrade_task('Change last change columns') def change_last_change_columns(context): if not context.has_column('archived_results', 'last_modified'): context.operations.add_column( 'archived_results', Column('last_modified', UTCDateTime, nullable=True) ) if context.has_column('archived_results', 'last_result_change'): context.operations.execute( 'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format( 'archived_results', 'last_result_change' ) ) if ( context.has_column('notifications', 'last_change') and not context.has_column('notifications', 'last_modified') ): context.operations.execute( 'ALTER TABLE {} RENAME COLUMN {} TO {};'.format( 'notifications', 'last_change', 'last_modified' ) ) if context.has_column('notifications', 'last_modified'): context.operations.execute( 'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format( 'notifications', 'last_modified' ) ) @upgrade_task('Make subscriber polymorphic') def make_subscriber_polymorphic(context): if not context.has_column('subscribers', 'type'): context.operations.add_column( 'subscribers', Column('type', Text, nullable=True) ) if ( context.has_column('subscribers', 'phone_number') and not context.has_column('subscribers', 'address') ): context.operations.execute( 'ALTER TABLE {} RENAME COLUMN {} TO {};'.format( 'subscribers', 'phone_number', 'address' ) ) if context.has_column('subscribers', 'type'): susbscribers = context.session.query(Subscriber) susbscribers = susbscribers.filter(Subscriber.type.is_(None)) for subscriber in susbscribers: subscriber.type = 'sms' @upgrade_task('Make notifications polymorphic') def make_notifications_polymorphic(context): if ( context.has_column('notifications', 'action') and not context.has_column('notifications', 'type') ): context.operations.execute( 'ALTER TABLE {} RENAME COLUMN {} TO {};'.format( 'notifications', 'action', 'type' ) ) context.operations.execute( 'ALTER TABLE {} ALTER COLUMN {} DROP NOT NULL;'.format( 'notifications', 'type' ) ) @upgrade_task( 'Apply static data', requires='onegov.ballot:Replaces results group with name and district' ) def apply_static_data(context): principal = getattr(context.app, 'principal', None) if not principal: return for vote in context.session.query(Vote): for ballot in vote.ballots: assert vote.date and vote.date.year in principal.entities for result in ballot.results: assert ( result.entity_id in principal.entities[vote.date.year] or result.entity_id == 0 ) result.name = principal.entities.\ get(vote.date.year, {}).\ get(result.entity_id, {}).\ get('name', '') result.district = principal.entities.\ get(vote.date.year, {}).\ get(result.entity_id, {}).\ get('district', '') for election in context.session.query(Election): assert election.date and election.date.year in principal.entities for result in election.results: assert ( result.entity_id in principal.entities[election.date.year] or result.entity_id == 0 ) result.name = principal.entities.\ get(election.date.year, {}).\ get(result.entity_id, {}).\ get('name', '') result.district = principal.entities.\ get(election.date.year, {}).\ get(result.entity_id, {}).\ get('district', '') @upgrade_task('Add election compound to archive') def add_election_compound_to_archive(context): old_type = Enum('election', 'vote', name='type_of_result') new_type = Enum( 'election', 'election_compound', 'vote', name='type_of_result' ) tmp_type = Enum( 'election', 'election_compound', 'vote', name='_type_of_result' ) tmp_type.create(context.operations.get_bind(), checkfirst=False) context.operations.execute( 'ALTER TABLE archived_results ALTER COLUMN type ' 'TYPE _type_of_result USING type::text::_type_of_result' ) old_type.drop(context.operations.get_bind(), checkfirst=False) new_type.create(context.operations.get_bind(), checkfirst=False) context.operations.execute( 'ALTER TABLE archived_results ALTER COLUMN type ' 'TYPE type_of_result USING type::text::type_of_result' ) tmp_type.drop(context.operations.get_bind(), checkfirst=False) @upgrade_task('Add contraints to notifications and sources') def add_contraints_to_notifications_and_sources(context): # We use SQL (rather than operations.xxx) so that we can drop and add # the constraints in one statement for ref in ('election', 'vote'): for table in ('notifications', 'upload_data_source_item'): context.operations.execute( f'ALTER TABLE {table} ' f'DROP CONSTRAINT {table}_{ref}_id_fkey, ' f'ADD CONSTRAINT {table}_{ref}_id_fkey' f' FOREIGN KEY ({ref}_id) REFERENCES {ref}s (id)' f' ON UPDATE CASCADE' ) @upgrade_task('Enable expats on votes and elections') def enable_expats(context): principal = getattr(context.app, 'principal', None) if not principal: return for vote in context.session.query(Vote): ballot = vote.ballots.first() if ballot: if ballot.results.filter_by(entity_id=0).first(): vote.expats = True for election in context.session.query(Election): if election.results.filter_by(entity_id=0).first(): election.expats = True
34.863946
78
0.633659
cff8c74b564e9aa23283843ea7fd5738bfa7ce69
3,009
py
Python
timetable.py
jerluebke/SOWAS
d606bcd6757503257d01381da56602016261f578
[ "MIT" ]
null
null
null
timetable.py
jerluebke/SOWAS
d606bcd6757503257d01381da56602016261f578
[ "MIT" ]
null
null
null
timetable.py
jerluebke/SOWAS
d606bcd6757503257d01381da56602016261f578
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # pylint: disable = C, R from collections import namedtuple import matplotlib as mpl from matplotlib.patches import Patch from matplotlib.gridspec import GridSpec import matplotlib.pyplot as plt import numpy as np mpl.rcParams["font.size"] = 12 # colors cmap = plt.get_cmap("Set1") color_mapping = { "th" : cmap(0), # theory "ex" : cmap(1), # experiment "ev" : cmap(2) # evaluation } class item(namedtuple("item", ["description", "start", "duration", "domain"])): """ item in the timetable containing startpoint (starting with 0), duration, domain (theory, experiment, evaluation) and description (as it appears in the plot) """ __slots__ = () @property def color(self): return color_mapping[self.domain] ############# # data # ############# items = [ # item: description, start, duration, domain item("Einarbeitung\n in Python", 0, 2, "th"), item("Erstellen der\n Simulation", 1, 4, "th"), # item("Ergänzung der\n Simulation", 5, 2, "th"), item("Bau der Mess-\n vorrichtung", 0, 2, "ex"), item("Aufnahme der\n Messreihen", 2, 4, "ex"), item("Anpassung des\n Aufbaus", 6, 3, "ex"), item("Auswertung der\nDaten", 7, 3, "ev"), item("Erstellung des\nPosters", 8, 11, "ev") ] data_as_array = np.array([[item.start, item.duration] for item in items]) y_values = np.arange(len(items)) starting_points = data_as_array[:,0] durations = data_as_array[:,1] y_labels = [item.description for item in items] colors = [item.color for item in items] kwargs = { "height" : .4, "align" : "center" } ################# # plotting # ################# fig = plt.figure(figsize=(8, 6)) # make two subplots - actual plot and legend gs = GridSpec(2, 1, height_ratios=[11, 1]) # make timetable ax = plt.subplot(gs[0]) ax.barh(y_values, durations, left=starting_points, color=colors, **kwargs) # adjust yaxis ax.invert_yaxis() ax.set_yticks(y_values) ax.set_yticklabels(y_labels) # hide yticks ax.tick_params(axis='y', length=0) # xaxis: set ticks, label and limits ax.set_xticks(np.arange(12)) ax.set_xlabel("Wochen (09.04.2018 - 22.06.2018)") ax.set_xlim((0, 11)) # turn grid off to avoid conflicts with local settings ax.grid(False) # place grid below elements in plot ax.set_axisbelow(True) ax.grid(axis='x') # make legend in second subplot legend_ax = plt.subplot(gs[1]) # remove ticks and boundary box legend_ax.set(xticks=[], yticks=[]) legend_ax.set_axis_off() # make and map proxy artists to legend plt.legend(handles=[Patch(color=c, label=l) for c, l in zip(color_mapping.values(), ("Theorie", "Experiment", "Auswertung"))], loc="lower center", ncol=3) # allign subplots plt.tight_layout() plt.savefig("timetable.png", format="png", dpi=300) plt.savefig("timetable.eps", format="eps", dpi=1000)
26.628319
78
0.63011
5c8ba138654ebae397537c0aba3ec6dff61d9382
771
py
Python
01_class/transfer_parent_class_method.py
wuyueCreator/python-test
6072ac9264a257c89925469238c14fff3bda5630
[ "MIT" ]
1
2019-03-25T03:44:54.000Z
2019-03-25T03:44:54.000Z
01_class/transfer_parent_class_method.py
wuyueCreator/python-test
6072ac9264a257c89925469238c14fff3bda5630
[ "MIT" ]
null
null
null
01_class/transfer_parent_class_method.py
wuyueCreator/python-test
6072ac9264a257c89925469238c14fff3bda5630
[ "MIT" ]
null
null
null
class A: def spam(self): print('A.spam') class B(A): def spam(self): print('B.spam') super().spam() # Call parent spam() class C: def __init__(self): self.x = 0 class D(C): def __init__(self): super().__init__() self.y = 1 # super() 的另外一个常见用法出现在覆盖Python特殊方法的代码中,比如: class Proxy: def __init__(self, obj): self._obj = obj # Delegate attribute lookup to internal obj def __getattr__(self, name): return getattr(self._obj, name) # Delegate attribute assignment def __setattr__(self, name, value): if name.startswith('_'): super().__setattr__(name, value) # Call original __setattr__ else: setattr(self._obj, name, value)
19.275
73
0.583658
5cdbb678909c4b3e3437233198e313bdeb3e63d0
4,260
py
Python
quant/api/kkex.py
doubleDragon/QuantBot
53a1d6c62ecece47bf777da0c0754430b706b7fd
[ "MIT" ]
7
2017-10-22T15:00:09.000Z
2019-09-19T11:45:43.000Z
quant/api/kkex.py
doubleDragon/QuantBot
53a1d6c62ecece47bf777da0c0754430b706b7fd
[ "MIT" ]
1
2018-01-19T16:19:40.000Z
2018-01-19T16:19:40.000Z
quant/api/kkex.py
doubleDragon/QuantBot
53a1d6c62ecece47bf777da0c0754430b706b7fd
[ "MIT" ]
5
2017-12-11T15:10:29.000Z
2018-12-21T17:40:58.000Z
#!/usr/bin/env python # -*- coding: UTF-8 -*- from urllib import urlencode from urlparse import urljoin import requests from hashlib import md5 BASE_URL = 'https://kkex.com/api/v1' TIMEOUT = 5 class PublicClient(object): def __init__(self): super(PublicClient, self).__init__() @classmethod def _build_parameters(cls, parameters): # sort the keys so we can test easily in Python 3.3 (dicts are not # ordered) keys = list(parameters.keys()) keys.sort() return '&'.join(["%s=%s" % (k, parameters[k]) for k in keys]) def url_for(self, path, path_arg=None, parameters=None): # build the basic url url = "%s/%s" % (BASE_URL, path) # If there is a path_arh, interpolate it into the URL. # In this case the path that was provided will need to have string # interpolation characters in it, such as PATH_TICKER if path_arg: url = url % (path_arg) # Append any parameters to the URL. if parameters: url = "%s?%s" % (url, self._build_parameters(parameters)) return url @classmethod def _get(cls, url, params=None): try: resp = requests.get(url, timeout=TIMEOUT, params=params) except requests.exceptions.RequestException as e: raise e else: if resp.status_code == requests.codes.ok: return resp.json() def depth(self, symbol): url = self.url_for('depth') params = { 'symbol': symbol } return self._get(url, params) class PrivateClient(PublicClient): def __init__(self, api_key, api_secret): super(PrivateClient, self).__init__() self._key = api_key self._secret = api_secret self.api_root = 'https://kkex.com' def _sign(self, params): sign = list(sorted(params.items()) + [('secret_key', self._secret)]) signer = md5() signer.update(urlencode(sign).encode('utf-8')) return signer.hexdigest().upper() def _post(self, path, params=None): if params is None: params = {} params['api_key'] = self._key sign = self._sign(params) params['sign'] = sign url = urljoin(self.api_root, path) try: resp = requests.post(url, data=params, timeout=5) except requests.exceptions.RequestException as e: raise e else: if resp.status_code == requests.codes.ok: return resp.json() def profile(self): return self._post('/api/v1/profile') def balance(self): return self._post('/api/v1/userinfo') def buy_limit(self, symbol, amount, price): params = { 'symbol': symbol, 'type': 'buy', 'price': price, 'amount': amount } return self._post('/api/v1/trade', params) def sell_limit(self, symbol, amount, price): params = { 'symbol': symbol, 'type': 'sell', 'price': price, 'amount': amount } return self._post('/api/v1/trade', params) def cancel_order(self, symbol, order_id): params = {'symbol': symbol, 'order_id': order_id} return self._post('/api/v1/cancel_order', params) def cancel_all_orders(self, symbol): params = { 'symbol': symbol } return self._post('/api/v1/cancel_all_orders', params) def order_info(self, symbol, order_id): params = { 'symbol': symbol, 'order_id': order_id } return self._post('/api/v1/order_info', params) def orders_info(self, symbol, order_ids): order_id_p = ','.join(order_ids) params = { 'symbol': symbol, 'order_id': order_id_p } return self._post('/api/v1/orders_info', params) def _get_orders_history(self, symbol, status=0, page=1, pagesize=10): params = { 'symbol': symbol, 'status': status, 'current_page': page, 'page_length': pagesize } return self._post('/api/v1/order_history', params)
28.590604
76
0.562911
a4603d1b25c4c01de3ede09c65761ebeeef0fc49
991
py
Python
initial-settings.py
gifted-nguvu/darkstar-dts-converter
aa17a751a9f3361ca9bbb400ee4c9516908d1297
[ "MIT" ]
2
2020-03-18T18:23:27.000Z
2020-08-02T15:59:16.000Z
initial-settings.py
gifted-nguvu/darkstar-dts-converter
aa17a751a9f3361ca9bbb400ee4c9516908d1297
[ "MIT" ]
5
2019-07-07T16:47:47.000Z
2020-08-10T16:20:00.000Z
initial-settings.py
gifted-nguvu/darkstar-dts-converter
aa17a751a9f3361ca9bbb400ee4c9516908d1297
[ "MIT" ]
1
2020-03-18T18:23:30.000Z
2020-03-18T18:23:30.000Z
from conans import ConanFile, CMake, tools import os.path import sys class LocalConanFile(ConanFile): settings = "arch_build" generator = [] def requirements(self): profile = "default" if "--profile" in sys.argv: profile = sys.argv[sys.argv.index("--profile") + 1] profile = os.path.abspath(profile) if os.path.exists(profile) else profile print(f"Configuring CMake arch to {self.settings.arch_build} for {profile} profile. Helps cross-compiling.") self.run(f"conan profile update settings.cmake:arch={self.settings.arch_build} {profile}") print(f"Adding the bincrafters remote @ https://bincrafters.jfrog.io/artifactory/api/conan/public-conan.") self.run(f"conan remote add bincrafters https://bincrafters.jfrog.io/artifactory/api/conan/public-conan --force") print("Configuring settings to work correctly with bincrafters.") self.run(f"conan config set general.revisions_enabled=1")
41.291667
121
0.696266
778b53e8126b091296e7e1d7fb2989f969872569
1,067
py
Python
zencad/examples/1.GeomPrim/2.prim2d/textshape.py
Spiritdude/zencad
4e63b1a6306dd235f4daa2791b10249f7546c95b
[ "MIT" ]
5
2018-04-11T14:11:40.000Z
2018-09-12T19:03:36.000Z
zencad/examples/1.GeomPrim/2.prim2d/textshape.py
Spiritdude/zencad
4e63b1a6306dd235f4daa2791b10249f7546c95b
[ "MIT" ]
null
null
null
zencad/examples/1.GeomPrim/2.prim2d/textshape.py
Spiritdude/zencad
4e63b1a6306dd235f4daa2791b10249f7546c95b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ ZenCad API example: textshape date: 04.10.2019 """ import os from zencad import * zencad_example_directory = zencad.moduledir + "/examples" testfont = os.path.join(zencad_example_directory, "fonts/testfont.ttf") mandarinc = os.path.join(zencad_example_directory, "fonts/mandarinc.ttf") register_font(testfont) register_font(mandarinc) m0 = textshape(text="ZenCad", fontname="Ubuntu Mono", size=100) m1 = textshape(text="ZenCad", fontname="Mandarinc", size=100) disp(m0, color.white) disp(m0.rotateX(deg(90)).translate(0, 70, 0)) disp(m1.translate( 0, 200, 0), color.green) disp(m1.rotateX(deg(90)).translate( 0, 270, 0), color.yellow) #########################Advanced Example######################################## x = 400 y = 100 z = 50 deep = 10 #find the geometric center of the textshape m1center = m1.center() m2 = ( box(x, y, z) - m1.extrude(deep).up(z-deep).translate(x/2 - m1center.x, y/2 - m1center.y, 0) ) disp(m2.forw(400)) ################################################################################ show()
24.25
81
0.615745
24af93a81c7b2b92ca665acd7e7576d818f94d98
2,054
py
Python
toolsparty-master/information-gathering/ip-extender.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
2
2021-11-17T03:35:03.000Z
2021-12-08T06:00:31.000Z
toolsparty-master/information-gathering/ip-extender.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
null
null
null
toolsparty-master/information-gathering/ip-extender.py
Zusyaku/Termux-And-Lali-Linux-V2
b1a1b0841d22d4bf2cc7932b72716d55f070871e
[ "Apache-2.0" ]
2
2021-11-05T18:07:48.000Z
2022-02-24T21:25:07.000Z
#!/usr/bin/env python # coding: utf-8 # -**- Author: LandGrey -**- import os import sys def ip_extender(ips=None, files=None, switch=3, extend=5, is_format=False): results = [] cidr_groups = [] none_cidr_groups = [] cidr_dict = {} if not ips and files: ips = [] with open(files, 'r') as f: for line in f.readlines(): if line.strip(): ips.append(line.strip()) ips = list(set(ips)) for ip in ips: prefix = ".".join(ip.split(".")[:3]) if prefix not in cidr_dict.keys(): cidr_dict[prefix] = [1, ip] else: cidr_dict[prefix][0] += 1 cidr_dict[prefix].append(ip) for k, v in cidr_dict.items(): if v[0] >= switch: cidr_groups.append(k) else: for _ in v[1:]: none_cidr_groups.append(_) if not is_format: for _ in cidr_groups: results.extend(extend_ips([_ + ".128"])) results.extend(extend_ips(none_cidr_groups, extend=extend)) else: for _ in cidr_groups: results.append(_ + ".0/24") for _ in none_cidr_groups: r = extend_ips([_], extend=extend) results.append(r[0] + "-" + r[-1]) return results def extend_ips(ips, extend=128): results = ips var0 = [] for ip in ips: ip_chunk = ip.split(".") for chunk in range(min(int(ip_chunk[3]) - int(extend), int(ip_chunk[3]) - 1) if int(ip_chunk[3]) - int(extend) > 0 else 1, min(int(ip_chunk[3]) + int(extend) + 1, 256)): var0.append("{0}.{1}.{2}.{3}".format(ip_chunk[0], ip_chunk[1], ip_chunk[2], str(chunk))) results.extend(var0) return sorted(list(set(results)), key=lambda x: (len(x), str(x))) if __name__ == "__main__": if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]): exit("[*] Usage: python ip-extender.py single_ip_list.txt") for ip in ip_extender(files=sys.argv[1], is_format=True): print(ip)
31.121212
119
0.543817
24d549d8e10dc17807c702172c3db31e6793dde3
31
py
Python
frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
frappe-bench/env/lib/python2.7/site-packages/bleach_whitelist/__init__.py
Semicheche/foa_frappe_docker
a186b65d5e807dd4caf049e8aeb3620a799c1225
[ "MIT" ]
null
null
null
from bleach_whitelist import *
15.5
30
0.83871
d9198234423e5d3c84ef6a9aac1201bc80bc1c51
1,605
py
Python
etl/io_config/server_protocol.py
cloud-cds/cds-stack
d68a1654d4f604369a071f784cdb5c42fc855d6e
[ "Apache-2.0" ]
6
2018-06-27T00:09:55.000Z
2019-03-07T14:06:53.000Z
etl/io_config/server_protocol.py
cloud-cds/cds-stack
d68a1654d4f604369a071f784cdb5c42fc855d6e
[ "Apache-2.0" ]
3
2021-03-31T18:37:46.000Z
2021-06-01T21:49:41.000Z
etl/io_config/server_protocol.py
cloud-cds/cds-stack
d68a1654d4f604369a071f784cdb5c42fc855d6e
[ "Apache-2.0" ]
3
2020-01-24T16:40:49.000Z
2021-09-30T02:28:55.000Z
import asyncio import json import logging import socket, errno from etl.io_config.core import get_environment_var SRV_LOG_FMT = '%(asctime)s|%(name)s|%(process)s-%(thread)s|%(levelname)s|%(message)s' logging.basicConfig(level=logging.INFO, format=SRV_LOG_FMT) MAGIC_NUMBER = b'trews_magic_number' CONNECTION_CLOSED = 'Connection Closed' LMC_ALERT_SERVER_IP = get_environment_var('LMC_ALERT_SERVER_IP', 'alerts.default.svc.cluster.local') LMC_ALERT_SERVER_PORT = 31000 TREWS_ALERT_SERVER_IP = get_environment_var('TREWS_ALERT_SERVER_IP', 'trews-alerts.default.svc.cluster.local') TREWS_ALERT_SERVER_PORT = 31000 async def read_message(reader, writer): try: data = await reader.readuntil(MAGIC_NUMBER) except asyncio.streams.IncompleteReadError: return CONNECTION_CLOSED # Decode and return message EOM = -1 * len(MAGIC_NUMBER) data = data[:EOM] logging.debug('Receiving from {}: {}'.format(writer.get_extra_info('peername'), data)) return json.loads(data.decode()) async def write_message(writer, message): logging.debug('Sending to {}: {}'.format(writer.get_extra_info('sockname'), message)) if type(message) != dict: raise ValueError('write_message takes a dictionary as the second argument') try: writer.write(json.dumps(message).encode() + MAGIC_NUMBER) await writer.drain() return True except (socket.error, IOError) as e: if e.errno == errno.EPIPE: logging.error(e) else: logging.error("Other error: {}".format(e)) writer.close() return False
32.1
89
0.709657
d9504bf6bd55f14bbf36d7503f8289596841f780
108
py
Python
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
Indexical-Metrics-Measure-Advisory/watchmen
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
[ "MIT" ]
null
null
null
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
Indexical-Metrics-Measure-Advisory/watchmen
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
[ "MIT" ]
null
null
null
packages/watchmen-meta/src/watchmen_meta/analysis/__init__.py
Indexical-Metrics-Measure-Advisory/watchmen
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
[ "MIT" ]
null
null
null
from .pipeline_index_service import PipelineIndexService from .topic_index_service import TopicIndexService
36
56
0.907407
d95f44159ce85d870bd47768845befcafc69f3e9
933
py
Python
INBa/2015/Chinkirov_V_V/task_4_29.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[ "Apache-2.0" ]
null
null
null
INBa/2015/Chinkirov_V_V/task_4_29.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[ "Apache-2.0" ]
null
null
null
INBa/2015/Chinkirov_V_V/task_4_29.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[ "Apache-2.0" ]
null
null
null
# Задача 4. Вариант 28. # Напишите программу, которая выводит имя, # под которым скрывается Эмиль Эрзог. # Дополнительно необходимо вывести область интересов указанной личности, # место рождения, годы рождения и смерти (если человек умер), # вычислить возраст на данный момент (или момент смерти). # Для хранения всех необходимых данных требуется использовать переменные. # После вывода информации программа должна дожидаться пока пользователь # нажмет Enter для выхода. #Чинкиров.В.В. # 28.03.2016 name = "Эмиль Эрзог" city = "Эльбёф,Франция" rod = int (1895) dead = int (1934) age = int (dead - rod) interest = "Писатель" print(name+" наиболее известен как Андреа Моруа - Французский писатель и член Французской академии. ") print("Место рождения: "+city) print("Год рождения: "+str(rod)) print("Год смерти: "+str(dead)) print("Возраст смерти: "+str(age)) print("Область интересов: "+interest) input("Нажмите Enter для закрытия")
35.884615
102
0.757771
79d3fba129da163bfef8d0d4759998082ab4c008
1,331
py
Python
profiles/urls.py
Thames1990/BadBatBets
8dffb69561668b8991bf4103919e4b254d4ca56a
[ "MIT" ]
null
null
null
profiles/urls.py
Thames1990/BadBatBets
8dffb69561668b8991bf4103919e4b254d4ca56a
[ "MIT" ]
null
null
null
profiles/urls.py
Thames1990/BadBatBets
8dffb69561668b8991bf4103919e4b254d4ca56a
[ "MIT" ]
null
null
null
from django.conf.urls import url from . import views app_name = 'profiles' urlpatterns = [ url( r'^$', views.profile, name='profile' ), # Login mechanism url( r'^login/$', views.login_user, name='login' ), # Logout mechanism url( r'^logout/$', views.logout_user, name='logout' ), # Signup mechanism url( r'^signup/$', views.signup, name='signup' ), # Change Password url( r'^change_password/$', views.change_password, name='change_password' ), # General terms and conditions url( r'^general_terms_and_conditions/$', views.general_terms_and_conditions_view, name='general_terms_and_conditions' ), # Privacy policy url( r'^privacy_policy/$', views.privacy_policy_view, name='privacy_policy' ), # Provide feedback url( r'^feedback/$', views.feedback, name='feedback' ), # Resolve Feedback url( r'^feedback/(?P<id>[0-9]+)/resolve/$', views.resolve_feedback, name='resolve_feedback' ), # Deposit funds in account url( r'^payment/$', views.payment, name='payment' ), ]
17.285714
48
0.525169
ccde64f635ab8dae261e9151bd5fa024ac46889a
6,096
py
Python
pymantic/tests/test_primitives.py
dnswd/blazegraph-python
046a6b47406b0f56d71abc6039f4d7586a1708d2
[ "BSD-3-Clause" ]
42
2016-01-15T14:31:48.000Z
2022-03-10T14:32:25.000Z
pymantic/tests/test_primitives.py
igor-kim/blazegraph-python
7be8d219e00acb51d949bf49aaaed90c2c2344e5
[ "BSD-3-Clause" ]
3
2016-10-02T18:36:42.000Z
2019-09-18T15:48:58.000Z
pymantic/tests/test_primitives.py
igor-kim/blazegraph-python
7be8d219e00acb51d949bf49aaaed90c2c2344e5
[ "BSD-3-Clause" ]
11
2016-08-18T09:47:52.000Z
2021-12-26T06:22:18.000Z
from nose.tools import * from pymantic.primitives import * import random def en(s): return Literal(s, "en") def test_to_curie_multi_match(): """Test that the longest match for prefix is used""" namespaces = {'short': "aa", 'long': "aaa"} curie = to_curie("aaab", namespaces) print curie assert curie == 'long:b' def test_simple_add(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) assert t in g def test_simple_remove(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) g.remove(t) assert t not in g def test_match_VVV_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(None, None, None) assert t in matches def test_match_sVV_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(NamedNode("http://example.com"), None, None) assert t in matches def test_match_sVo_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(NamedNode("http://example.com"), None, en("Never!")) assert t in matches def test_match_spV_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"), None) assert t in matches def test_match_Vpo_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(None, NamedNode("http://purl.org/dc/terms/issued"), en("Never!")) assert t in matches def test_match_VVo_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(None, None, en("Never!")) assert t in matches def test_match_VpV_pattern(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),en("Never!")) g = Graph() g.add(t) matches = g.match(None, NamedNode("http://purl.org/dc/terms/issued"), None) assert t in matches def generate_triples(n=10): for i in range(1,n): yield Triple(NamedNode("http://example/" + str(random.randint(1,1000))), NamedNode("http://example/terms/" + str(random.randint(1,1000))), Literal(random.randint(1,1000))) def test_10000_triples(): n = 10000 g = Graph() for t in generate_triples(n): g.add(t) assert len(g) > n * .9 matches = g.match(NamedNode("http://example.com/42"), None, None) matches = g.match(None, NamedNode("http://example/terms/42"), None) matches = g.match(None, None, Literal(42)) def test_iter_10000_triples(): n = 10000 g = Graph() triples = set() for t in generate_triples(n): g.add(t) triples.add(t) assert len(g) > n * .9 for t in g: triples.remove(t) assert len(triples) == 0 # Dataset Tests def test_add_quad(): q = Quad(NamedNode("http://example.com/graph"),NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!")) ds = Dataset() ds.add(q) assert q in ds def test_remove_quad(): q = Quad(NamedNode("http://example.com/graph"),NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!")) ds = Dataset() ds.add(q) ds.remove(q) assert q not in ds def test_ds_len(): n = 10 ds = Dataset() for q in generate_quads(n): ds.add(q) assert len(ds) == 10 def test_match_ds_sVV_pattern(): q = Quad(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"), NamedNode("http://example.com/graph")) ds = Dataset() ds.add(q) matches = ds.match(subject=NamedNode("http://example.com")) assert q in matches def test_match_ds_quad_pattern(): q = Quad(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!"), NamedNode("http://example.com/graph")) ds = Dataset() ds.add(q) matches = ds.match(graph="http://example.com/graph") assert q in matches def test_add_graph(): t = Triple(NamedNode("http://example.com"), NamedNode("http://purl.org/dc/terms/issued"),Literal("Never!")) g = Graph("http://example.com/graph") g.add(t) ds = Dataset() ds.add_graph(g) assert t in ds def generate_quads(n): for i in range(n): yield Quad(NamedNode("http://example/" + str(random.randint(1,1000))), NamedNode("http://purl.org/dc/terms/" + str(random.randint(1,100))), Literal(random.randint(1,1000)), NamedNode("http://example/graph/"+str(random.randint(1,1000)))) def test_10000_quads(): n = 10000 ds = Dataset() for q in generate_quads(n): ds.add(q) assert len(ds) > n * .9 matches = ds.match(subject=NamedNode("http://example.com/42"), graph=NamedNode("http://example/graph/42")) def test_iter_10000_quads(): n = 10000 ds = Dataset() quads = set() for q in generate_quads(n): ds.add(q) quads.add(q) assert len(ds) > n * .9 for quad in ds: quads.remove(quad) assert len(quads) == 0 def test_interfaceName(): assert Literal("Bob", "en").interfaceName == "Literal" assert NamedNode().interfaceName == "NamedNode" def test_BlankNode_id(): b1 = BlankNode() b2 = BlankNode() assert b1.value != b2.value
32.253968
147
0.60876
033cebcbd7d667e2cf8eb5c214611871d49fe72b
20,012
py
Python
wz/ui/gridbase.py
gradgrind/WZ
672d93a3c9d7806194d16d6d5b9175e4046bd068
[ "Apache-2.0" ]
null
null
null
wz/ui/gridbase.py
gradgrind/WZ
672d93a3c9d7806194d16d6d5b9175e4046bd068
[ "Apache-2.0" ]
null
null
null
wz/ui/gridbase.py
gradgrind/WZ
672d93a3c9d7806194d16d6d5b9175e4046bd068
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ ui/gridbase.py Last updated: 2021-10-10 Widget with tiles on grid layout (QGraphicsScene/QGraphicsView). =+LICENCE============================= Copyright 2021 Michael Towers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =-LICENCE======================================== """ ##### Configuration ##################### FONT_DEFAULT = 'Droid Sans' FONT_SIZE_DEFAULT = 11 FONT_COLOUR = '442222' # rrggbb BORDER_COLOUR = '000088' # rrggbb MARK_COLOUR = 'E00000' # rrggbb # Line width for borders UNDERLINE_WIDTH = 3.0 BORDER_WIDTH = 1.0 SCENE_MARGIN = 10.0 # Margin around content in GraphicsView widgets ##################### ### Messages _TILE_OUT_OF_BOUNDS = ("Kachel außerhalb Tabellenbereich:\n" " Zeile {row}, Höhe {rspan}, Spalte {col}, Breite {cspan}") _NOTSTRING = "In <grid::Tile>: Zeichenkette erwartet: {val}" ##################################################### import sys, os, copy from PySide6.QtWidgets import QGraphicsView, QGraphicsScene, \ QGraphicsRectItem, QGraphicsSimpleTextItem, QGraphicsLineItem from PySide6.QtGui import (QFont, QPen, QColor, QBrush, QTransform, QPainter, QPdfWriter, QPageLayout) from PySide6.QtCore import Qt, QMarginsF, QRectF, QBuffer, QByteArray class GridError(Exception): pass ### --- class GridView(QGraphicsView): """This is the "view" widget for the grid. The actual grid is implemented as a "scene". """ def __init__(self): self._scale = 1.0 super ().__init__() # Change update mode: The default, MinimalViewportUpdate, seems # to cause artefacts to be left, i.e. it updates too little. # Also BoundingRectViewportUpdate seems not to be 100% effective. #self.setViewportUpdateMode(self.BoundingRectViewportUpdate) self.setViewportUpdateMode(self.FullViewportUpdate) self.ldpi = self.logicalDpiX() if self.logicalDpiY() != self.ldpi: REPORT('WARNING', "LOGICAL DPI different for x and y") self.MM2PT = self.ldpi / 25.4 # def set_scene(self, scene): """Set the QGraphicsScene for this view. The size will be fixed to that of the initial <sceneRect> (to prevent it from being altered by pop-ups). <scene> may be <None>, to remove the current scene. """ self.setScene(scene) if scene: self.setSceneRect(scene._sceneRect) # def mousePressEvent(self, event): point = event.pos() # print("POS:", point, self.mapToGlobal(point), self.itemAt(point)) # The Tile may not be the top item. items = self.items(point) button = event.button() if items: for item in items: # Give all items at this point a chance to react, starting # with the topmost. An item can break the chain by # returning a false value. try: if button == Qt.LeftButton: if not item.leftclick(): return elif button == Qt.RightButton: if not item.rightclick(): return except AttributeError: pass # ### View scaling def scaleUp (self): self.scale(1) # def scaleDn (self): self.scale(-1) # def scale(self, delta): t = QTransform() self._scale += self._scale * delta / 10 t.scale(self._scale, self._scale) self.setTransform(t) ### --------------- ### class GridViewRescaling(GridView): """An QGraphicsView that automatically adjusts the scaling of its scene to fill the viewing window. """ def __init__(self): super().__init__() # Apparently it is a good idea to disable scrollbars when using # this resizing scheme. With this resizing scheme they would not # appear anyway, so this doesn't lose any features! self.setHorizontalScrollBarPolicy (Qt.ScrollBarAlwaysOff) self.setVerticalScrollBarPolicy (Qt.ScrollBarAlwaysOff) def resizeEvent(self, event): self.resize() return super().resizeEvent(event) def resize(self, qrect=None): if qrect == None: qrect = self.sceneRect() self.fitInView(qrect, Qt.KeepAspectRatio) ### class GridBase(QGraphicsScene): def __init__(self, gview, rowheights, columnwidths): """Set the grid size. <columnwidths>: a list of column widths (mm) <rowheights>: a list of row heights (mm) Rows and columns are 0-indexed. """ super().__init__() self._gview = gview self._styles = {'*': CellStyle(FONT_DEFAULT, FONT_SIZE_DEFAULT, align = 'c', border = 1, mark = MARK_COLOUR) } self.xmarks = [0.0] x = 0.0 for c in columnwidths: x += c * self._gview.MM2PT self.xmarks.append(x) self.ymarks = [0.0] y = 0.0 for r in rowheights: y += r * self._gview.MM2PT self.ymarks.append(y) # Allow a little margin self._sceneRect = QRectF(-SCENE_MARGIN, -SCENE_MARGIN, x + 2 * SCENE_MARGIN, y + 2 * SCENE_MARGIN) # def style(self, name): return self._styles[name] # def new_style(self, name, base = None, **params): if base: style0 = self._styles[base] self._styles[name] = style0.copy(**params) else: self._styles[name] = CellStyle(params.pop('font', None), params.pop('size', None), **params) # def ncols(self): return len(self.xmarks) - 1 # def nrows(self): return len(self.ymarks) - 1 # def screen_coordinates(self, x, y): """Return the screen coordinates of the given scene point. """ viewp = self._gview.mapFromScene(x, y) return self._gview.mapToGlobal(viewp) # def basic_tile(self, row, col, tag, text, style, cspan = 1, rspan = 1): """Add a basic tile to the grid, checking coordinates and converting row + col to x + y point-coordinates for the <Tile> class. """ # Check bounds if (row < 0 or col < 0 or (row + rspan) >= len(self.ymarks) or (col + cspan) >= len(self.xmarks)): raise GridError(_TILE_OUT_OF_BOUNDS.format( row = row, col = col, cspan = cspan, rspan = rspan)) x = self.xmarks[col] y = self.ymarks[row] w = self.xmarks[col + cspan] - x h = self.ymarks[row + rspan] - y t = Tile(self, tag, x, y, w, h, text, self._styles[style]) self.addItem(t) return t # ### pdf output def setPdfMargins(self, left = 15, top = 15, right = 15, bottom = 15): self._pdfmargins = (left, top, right, bottom) return self._pdfmargins # def pdfMargins(self): try: return self._pdfmargins except AttributeError: return self.setPdfMargins() # def to_pdf(self, filepath): """Produce and save a pdf of the table. The output orientation is selected according to the aspect ratio of the table. If the table is too big for the page area, it will be shrunk to fit. """ if not filepath.endswith('.pdf'): filepath += '.pdf' printer = QPdfWriter(filepath) printer.setPageSize(printer.A4) printer.setPageMargins(QMarginsF(*self.pdfMargins()), QPageLayout.Millimeter) sceneRect = self._sceneRect sw = sceneRect.width() sh = sceneRect.height() if sw > sh: printer.setPageOrientation(QPageLayout.Orientation.Landscape) painter = QPainter() painter.begin(printer) scaling = printer.logicalDpiX() / self._gview.ldpi # Do drawing with painter page_layout = printer.pageLayout() pdf_rect = page_layout.paintRect(QPageLayout.Point) pdf_w = pdf_rect.width() pdf_h = pdf_rect.height() if sw > pdf_w or sh > pdf_h: # Shrink to fit page self.render(painter) else: # Scale resolution to keep size pdf_rect.setWidth(sw * scaling) pdf_rect.setHeight(sh * scaling) self.render(painter, pdf_rect) painter.end() return filepath # # An earlier, alternative implementation of the pdf writer: def to_pdf0(self, filepath): """Produce and save a pdf of the table. The output orientation is selected according to the aspect ratio of the table. If the table is too big for the page area, it will be shrunk to fit. """ qbytes = QByteArray() qbuf = QBuffer(qbytes) qbuf.open(qbuf.WriteOnly) printer = QPdfWriter(qbuf) printer.setPageSize(printer.A4) printer.setPageMargins(QMarginsF(*self.pdfMargins()), QPageLayout.Millimeter) sceneRect = self._sceneRect sw = sceneRect.width() sh = sceneRect.height() if sw > sh: printer.setPageOrientation(QPageLayout.Orientation.Landscape) pdf_dpmm = printer.resolution() / 25.4 # pdf resolution, dots per mm scene_dpmm = self._gview.MM2PT # scene resolution, dots per mm natural_scale = pdf_dpmm / scene_dpmm page_layout = printer.pageLayout() pdf_rect = page_layout.paintRect(QPageLayout.Millimeter) swmm = sw / self._gview.MM2PT shmm = sh / self._gview.MM2PT painter = QPainter(printer) pdf_wmm = pdf_rect.width() pdf_hmm = pdf_rect.height() if swmm > pdf_wmm or shmm > pdf_hmm: # Shrink to fit page self.render(painter) else: # Scale resolution to keep size pdf_rect.setWidth(sw * natural_scale) pdf_rect.setHeight(sh * natural_scale) self.render(painter, pdf_rect) painter.end() qbuf.close() # Write resulting file if not filepath.endswith('.pdf'): filepath += '.pdf' with open(filepath, 'wb') as fh: fh.write(bytes(qbytes)) return filepath ### class CellStyle: """Handle various aspects of cell styling. Also manage caches for fonts, pens and brushes. """ _fonts = {} _brushes = {} _pens = {} # @classmethod def getFont(cls, fontFamily, fontSize, fontBold, fontItalic): ftag = (fontFamily, fontSize, fontBold, fontItalic) try: return cls._fonts[ftag] except: pass font = QFont() if fontFamily: font.setFamily(fontFamily) if fontSize: font.setPointSizeF(fontSize) if fontBold: font.setBold(True) if fontItalic: font.setItalic(True) cls._fonts[ftag] = font return font # @classmethod def getPen(cls, width, colour = None): """Manage a cache for pens of different width and colour. """ if width: wc = (width, colour or BORDER_COLOUR) try: return cls._pens[wc] except AttributeError: cls._pens = {} except KeyError: pass pen = QPen('#FF' + wc[1]) pen.setWidthF(wc[0]) cls._pens[wc] = pen return pen else: try: return cls._noPen except AttributeError: cls._noPen = QPen() cls._noPen.setStyle(Qt.NoPen) return cls._noPen # @classmethod def getBrush(cls, colour): """Manage a cache for brushes of different colour. <colour> is a colour in the form 'RRGGBB'. """ try: return cls._brushes[colour or FONT_COLOUR] except: pass brush = QBrush(QColor('#FF' + (colour or FONT_COLOUR))) cls._brushes[colour] = brush return brush # def __init__(self, font, size, align = 'c', highlight = None, bg = None, border = 1, border_colour = None, mark = None): """ <font> is the name of the font (<None> => default, not recommended, unless the cell is to contain no text). <size> is the size of the font (<None> => default, not recommended, unless the cell is to contain no text). <align> is the horizontal (l, c or r) OR vertical (b, m, t) alignment. Vertical alignment is for rotated text (-90° only). <highlight> can set bold, italic and font colour: 'bi:RRGGBB'. All bits are optional, but the colon must be present if a colour is given. <bg> can set the background colour ('RRGGBB'). <border>: Only three border types are supported here: 0: none 1: all sides 2: (thicker) underline <border_colour>: 'RRGGBB', default is <BORDER_COLOUR>. <mark> is a colour ('RRGGBB') which can be selected as an "alternative" font colour. """ # Font self.setFont(font, size, highlight) self.colour_marked = mark # Alignment self.setAlign(align) # Background colour self.bgColour = self.getBrush(bg) if bg else None # Border self.border = border self.border_colour = border_colour # def setFont(self, font, size, highlight): self._font, self._size, self._highlight = font, size, highlight try: emph, clr = highlight.split(':') except: emph, clr = highlight or '', None self.fontColour = self.getBrush(clr) self.font = self.getFont(font, size, 'b' in emph, 'i' in emph) # def setAlign(self, align): if align in 'bmt': # Vertical self.alignment = ('c', align, True) else: self.alignment = (align, 'm', False) # def copy(self, font = None, size = None, align = None, highlight = None, mark = None, bg = None, border = None): """Make a copy of this style, but with changes specified by the parameters. Note that a change to a 'None' parameter value is not possible. """ newstyle = copy.copy(self) if font or size or highlight: newstyle.setFont(font or self._font, size or self._size, highlight or self._highlight) if mark: newstyle.colour_marked = mark if align: newstyle.setAlign(align) if bg: newstyle.bgColour = self.getBrush(bg) if border != None: newstyle.border = border return newstyle ### class Tile(QGraphicsRectItem): """The graphical representation of a table cell. This cell can span rows and columns. It contains a simple text element. Both cell and text can be styled to a limited extent (see <CellStyle>). """ def __init__(self, grid, tag, x, y, w, h, text, style): self._style = style self._grid = grid self.tag = tag self.height0 = h self.width0 = w super().__init__(0, 0, w, h) self.setFlag(self.ItemClipsChildrenToShape, True) self.setPos(x, y) # Background colour if style.bgColour != None: self.setBrush(style.bgColour) # Border if style.border == 1: # Set the pen for the rectangle boundary pen0 = CellStyle.getPen(BORDER_WIDTH, style.border_colour) else: # No border for the rectangle pen0 = CellStyle.getPen(None) if style.border != 0: # Thick underline line = QGraphicsLineItem(self) line.setPen(CellStyle.getPen(UNDERLINE_WIDTH, style.border_colour)) line.setLine(0, h, w, h) self.setPen(pen0) # Alignment and rotation self.halign, self.valign, self.rotation = style.alignment # Text self.textItem = QGraphicsSimpleTextItem(self) self.textItem.setFont(style.font) self.textItem.setBrush(style.fontColour) self.setText(text or '') # def mark(self): if self._style.colour_marked: self.textItem.setBrush(self._style.getBrush(self._style.colour_marked)) # def unmark(self): self.textItem.setBrush(self._style.fontColour) # def margin(self): return 0.4 * self._grid._gview.MM2PT # def value(self): return self._text # def setText(self, text): if type(text) != str: raise GridError(_NOTSTRING.format(val = repr(text))) self._text = text self.textItem.setText(text) self.textItem.setScale(1) w = self.textItem.boundingRect().width() h = self.textItem.boundingRect().height() if text: scale = 1 maxw = self.width0 - self.margin() * 2 maxh = self.height0 - self.margin() * 2 if self.rotation: maxh -= self.margin() * 4 if w > maxh: scale = maxh / w if h > maxw: _scale = maxw / h if _scale < scale: scale = _scale if scale < 0.6: self.textItem.setText('###') scale = (maxh / self.textItem.boundingRect().width()) if scale < 1: self.textItem.setScale(scale) trf = QTransform().rotate(-90) self.textItem.setTransform(trf) else: maxw -= self.margin() * 4 if w > maxw: scale = maxw / w if h > maxh: _scale = maxh / h if _scale < scale: scale = _scale if scale < 0.6: self.textItem.setText('###') scale = (maxw / self.textItem.boundingRect().width()) if scale < 1: self.textItem.setScale(scale) # This print line can help find box size problems: # print("BOX-SCALE: %5.3f (%s) *** w: %6.2f / %6.2f *** h: %6.2f / %6.2f" # % (scale, text, w, maxw, h, maxh)) bdrect = self.textItem.mapRectToParent( self.textItem.boundingRect()) yshift = - bdrect.top() if self.rotation else 0.0 w = bdrect.width() h = bdrect.height() xshift = 0.0 if self.halign == 'l': xshift += self.margin() elif self.halign == 'r': xshift += self.width0 - self.margin() - w else: xshift += (self.width0 - w) / 2 if self.valign == 't': yshift += self.margin() elif self.valign == 'b': yshift += self.height0 - self.margin() - h else: yshift += (self.height0 - h) / 2 self.textItem.setPos(xshift, yshift) # def leftclick(self): return self._grid.tile_left_clicked(self) # def rightclick(self): return self._grid.tile_right_clicked(self) #--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--# #TODO ...
34.803478
84
0.560164
064ba62a570f85c226bd1d351ff184276e78c21f
10,283
py
Python
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py
opencomputeproject/Rack-Manager
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
[ "MIT" ]
5
2019-11-11T07:57:26.000Z
2022-03-28T08:26:53.000Z
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py
opencomputeproject/Rack-Manager
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
[ "MIT" ]
3
2019-09-05T21:47:07.000Z
2019-09-17T18:10:45.000Z
Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/bladebios_lib.py
opencomputeproject/Rack-Manager
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
[ "MIT" ]
11
2019-07-20T00:16:32.000Z
2022-01-11T14:17:48.000Z
# Copyright (C) Microsoft Corporation. All rights reserved. # This program is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. #!/usr/bin/python # -*- coding: utf-8 -*- from ipmicmd_library import * def get_server_bios_config(serverid): try: if serverid < 1 or serverid > 48: return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure) else: interface = get_ipmi_interface(serverid) if "Failed:" in interface: return set_failure_dict(interface,completion_code.failure) ipmi_cmd = 'ocsoem biosconfig' # IPMI command to get server bios config details cmdinterface = interface + ' ' + ipmi_cmd bios_config = parse_get_bios_config(cmdinterface, "getserverbiosconfig") if bios_config is None or not bios_config: # Check empty or none #return set_failure_dict("Empty data for biosconfig", "-1") return set_failure_dict("Empty data for biosconfig",completion_code.failure) except Exception, e: #Log_Error("Failed Exception:",e) return set_failure_dict(("getbiosconfig Exception: ", e),completion_code.failure) return bios_config def set_server_bios_config(serverid, majorconfig, minorconfig): try: if serverid < 1 or serverid > 48: return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure) else: interface = get_ipmi_interface(serverid) if "Failed:" in interface: return set_failure_dict(interface,completion_code.failure) ipmi_cmd = 'ocsoem setbiosconfig' + ' ' + str(majorconfig) + ' ' + str(minorconfig) # IPMI command to set server bios config details cmdinterface = interface + ' ' + ipmi_cmd bios_config = parse_set_bios_config(cmdinterface, "setserverbiosconfig") if bios_config is None or not bios_config: # Check empty or none return set_failure_dict("Empty data for setbiosconfig",completion_code.failure) except Exception, e: #Log_Error("Failed Exception:",e) return set_failure_dict(("setbiosconfig Exception: ", e),completion_code.failure) return bios_config def get_bios_code(serverid, version): try: if serverid < 1 or serverid > 48: return set_failure_dict("Expected server-id between 1 to 48",completion_code.failure) else: interface = get_ipmi_interface(serverid) if "Failed:" in interface: return set_failure_dict(interface,completion_code.failure) ipmi_cmd = 'ocsoem bioscode' + ' ' + version # IPMI command to get server bios code details cmdinterface = interface + ' ' + ipmi_cmd bios_code = parse_bioscode(cmdinterface, "getserverbioscode") if bios_code is None or not bios_code: # Check empty or none return set_failure_dict("Empty data for getserverbioscode",completion_code.failure) except Exception, e: #Log_Error("Failed Exception:",e) return set_failure_dict(("getbioscode Exception: ", e),completion_code.failure) return bios_code def parse_get_bios_config(interface, command): try: completionstate = True output = call_ipmi(interface, command) if "ErrorCode" in output: return output biosconfigrsp = {} biosconfigrsp["AvailableConfigurations"] = {} if(output['status_code'] == 0): biosdata = output['stdout'].split('\n\n') #Gets current and chosen config details from output current_config_details = biosdata.pop(0) currentconfig = current_config_details.split('\n') for cfgval in currentconfig: if "Current BIOS Configuration" in cfgval: biosconfigrsp["Current BIOS Configuration"] = cfgval.split(":")[-1] elif "Chosen BIOS Configuration" in cfgval: biosconfigrsp["Chosen BIOS Configuration"] = cfgval.split(":")[-1] elif "Available Configuration Name" in cfgval: biosconfigrsp["AvailableConfigName"] = cfgval.split(":")[-1] # Gets all available configuration details for availablecfg in biosdata: configdata = availablecfg.split('\n') config_value= filter(None, configdata) # Skipping empty lists if any if len(config_value) == 0: break else: if config_value[0].lower().strip('-').strip() == "Available Configurations".lower(): available_config_data = availablecfg.split('*') available_config_value= filter(None, available_config_data) config_info = get_config_data(available_config_value) if completion_code.cc_key in config_info.keys(): completionstate &= False biosconfigrsp["AvailableConfigurations"] = None else: biosconfigrsp["AvailableConfigurations"] = config_info if(completionstate): biosconfigrsp[completion_code.cc_key] = completion_code.success else: biosconfigrsp[completion_code.cc_key] = completion_code.failure return biosconfigrsp else: error_data = output['stderr'].split('\n') biosconfigrsp[completion_code.cc_key] = completion_code.failure for data in error_data: if "Error" in data: biosconfigrsp[completion_code.desc] = data.split(":")[-1] elif "Completion Code" in data: biosconfigrsp[completion_code.ipmi_code] = data.split(":")[-1] return biosconfigrsp except Exception,e: #log.exception("GetserverBiosConfig: Exception error:" ,e) return set_failure_dict(("parse_get_bios_config() Exception: ",e),completion_code.failure) def get_config_data(configdata): try: config_rsp = {} config_id = 1 for value in configdata: config_data = value.split('\n') config_info = filter(None, config_data) # Removes empty strings # Skipping empty lists if any if len(config_info) == 0: break config_rsp[config_id] = {} for value in config_info: if "ConfigName" in value: config_rsp[config_id]["Config Name"] = value.split(":")[-1].strip() elif "ConfigValue" in value: config_rsp[config_id]["Config Value"] = value.split(":")[-1].strip() config_id = config_id + 1 except Exception,e: config_rsp[completion_code.cc_key] = completion_code.failure config_rsp[completion_code.desc] = "Get available config data, Exception: ", e return config_rsp def parse_set_bios_config(interface, command): try: output = call_ipmi(interface, command) if "ErrorCode" in output: return output setbiosconfigrsp = {} if(output['status_code'] == 0): sdata = output['stdout'].split('\n') completionstate = sdata.pop(0) if "Completion Status" in completionstate: setbiosconfigrsp[completion_code.cc_key] = completionstate.split(":")[-1] return setbiosconfigrsp else: error_data = output['stderr'].split('\n') setbiosconfigrsp[completion_code.cc_key] = completion_code.failure for data in error_data: if "Error" in data: setbiosconfigrsp[completion_code.desc] = data.split(":")[-1] elif "Completion Code" in data: setbiosconfigrsp[completion_code.ipmi_code] = data.split(":")[-1] return setbiosconfigrsp except Exception, e: #log.exception("Exception error is: ",e) return set_failure_dict(("parse_set_bios_config() Exception ",e),completion_code.failure) def parse_bioscode(interface, command): try: output = call_ipmi(interface, command) if "ErrorCode" in output: return output biosrsp = {} if(output['status_code'] == 0): sdata = output['stdout'].split('\n') biosrsp["Bios Code"] = str(sdata[0]) biosrsp[completion_code.cc_key] = completion_code.success return biosrsp else: error_data = output['stderr'].split('\n') biosrsp[completion_code.cc_key] = completion_code.failure for data in error_data: if "Error" in data: biosrsp[completion_code.desc] = data.split(":")[-1] elif "Completion Code" in data: biosrsp[completion_code.ipmi_code] = data.split(":")[-1] return biosrsp except Exception, e: #log.exception("Exception error is: %s " %e) #print "Exception: ", e return set_failure_dict(("ParseGetBiosCodeResult() Exception: ",e),completion_code.failure)
42.143443
142
0.561704
d1cb1afdc983b8fa4778f32f8458e3c566262013
6,272
py
Python
official/cv/unet/src/unet_nested/unet_model.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
official/cv/unet/src/unet_nested/unet_model.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
official/cv/unet/src/unet_nested/unet_model.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Model of UnetPlusPlus import mindspore.nn as nn import mindspore.ops as P from .unet_parts import UnetConv2d, UnetUp class NestedUNet(nn.Cell): """ Nested unet """ def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True, use_ds=True): super(NestedUNet, self).__init__() self.in_channel = in_channel self.n_class = n_class self.feature_scale = feature_scale self.use_deconv = use_deconv self.use_bn = use_bn self.use_ds = use_ds filters = [64, 128, 256, 512, 1024] filters = [int(x / self.feature_scale) for x in filters] # Down Sample self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same") self.conv00 = UnetConv2d(self.in_channel, filters[0], self.use_bn) self.conv10 = UnetConv2d(filters[0], filters[1], self.use_bn) self.conv20 = UnetConv2d(filters[1], filters[2], self.use_bn) self.conv30 = UnetConv2d(filters[2], filters[3], self.use_bn) self.conv40 = UnetConv2d(filters[3], filters[4], self.use_bn) # Up Sample self.up_concat01 = UnetUp(filters[1], filters[0], self.use_deconv, 2) self.up_concat11 = UnetUp(filters[2], filters[1], self.use_deconv, 2) self.up_concat21 = UnetUp(filters[3], filters[2], self.use_deconv, 2) self.up_concat31 = UnetUp(filters[4], filters[3], self.use_deconv, 2) self.up_concat02 = UnetUp(filters[1], filters[0], self.use_deconv, 3) self.up_concat12 = UnetUp(filters[2], filters[1], self.use_deconv, 3) self.up_concat22 = UnetUp(filters[3], filters[2], self.use_deconv, 3) self.up_concat03 = UnetUp(filters[1], filters[0], self.use_deconv, 4) self.up_concat13 = UnetUp(filters[2], filters[1], self.use_deconv, 4) self.up_concat04 = UnetUp(filters[1], filters[0], self.use_deconv, 5) # Finale Convolution self.final1 = nn.Conv2d(filters[0], n_class, 1) self.final2 = nn.Conv2d(filters[0], n_class, 1) self.final3 = nn.Conv2d(filters[0], n_class, 1) self.final4 = nn.Conv2d(filters[0], n_class, 1) self.stack = P.Stack(axis=0) def construct(self, inputs): x00 = self.conv00(inputs) # channel = filters[0] x10 = self.conv10(self.maxpool(x00)) # channel = filters[1] x20 = self.conv20(self.maxpool(x10)) # channel = filters[2] x30 = self.conv30(self.maxpool(x20)) # channel = filters[3] x40 = self.conv40(self.maxpool(x30)) # channel = filters[4] x01 = self.up_concat01(x10, x00) # channel = filters[0] x11 = self.up_concat11(x20, x10) # channel = filters[1] x21 = self.up_concat21(x30, x20) # channel = filters[2] x31 = self.up_concat31(x40, x30) # channel = filters[3] x02 = self.up_concat02(x11, x00, x01) # channel = filters[0] x12 = self.up_concat12(x21, x10, x11) # channel = filters[1] x22 = self.up_concat22(x31, x20, x21) # channel = filters[2] x03 = self.up_concat03(x12, x00, x01, x02) # channel = filters[0] x13 = self.up_concat13(x22, x10, x11, x12) # channel = filters[1] x04 = self.up_concat04(x13, x00, x01, x02, x03) # channel = filters[0] final1 = self.final1(x01) final2 = self.final2(x02) final3 = self.final3(x03) final4 = self.final4(x04) if self.use_ds: final = self.stack((final1, final2, final3, final4)) return final return final4 class UNet(nn.Cell): """ Simple UNet with skip connection """ def __init__(self, in_channel, n_class=2, feature_scale=2, use_deconv=True, use_bn=True): super(UNet, self).__init__() self.in_channel = in_channel self.n_class = n_class self.feature_scale = feature_scale self.use_deconv = use_deconv self.use_bn = use_bn filters = [64, 128, 256, 512, 1024] filters = [int(x / self.feature_scale) for x in filters] # Down Sample self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same") self.conv0 = UnetConv2d(self.in_channel, filters[0], self.use_bn) self.conv1 = UnetConv2d(filters[0], filters[1], self.use_bn) self.conv2 = UnetConv2d(filters[1], filters[2], self.use_bn) self.conv3 = UnetConv2d(filters[2], filters[3], self.use_bn) self.conv4 = UnetConv2d(filters[3], filters[4], self.use_bn) # Up Sample self.up_concat1 = UnetUp(filters[1], filters[0], self.use_deconv, 2) self.up_concat2 = UnetUp(filters[2], filters[1], self.use_deconv, 2) self.up_concat3 = UnetUp(filters[3], filters[2], self.use_deconv, 2) self.up_concat4 = UnetUp(filters[4], filters[3], self.use_deconv, 2) # Finale Convolution self.final = nn.Conv2d(filters[0], n_class, 1) def construct(self, inputs): x0 = self.conv0(inputs) # channel = filters[0] x1 = self.conv1(self.maxpool(x0)) # channel = filters[1] x2 = self.conv2(self.maxpool(x1)) # channel = filters[2] x3 = self.conv3(self.maxpool(x2)) # channel = filters[3] x4 = self.conv4(self.maxpool(x3)) # channel = filters[4] up4 = self.up_concat4(x4, x3) up3 = self.up_concat3(up4, x2) up2 = self.up_concat2(up3, x1) up1 = self.up_concat1(up2, x0) final = self.final(up1) return final
42.378378
106
0.615912
ee3d6d3e804f9ef1a9f8dd5e3ba587f570138ac4
90
py
Python
examples/helloWorld.py
Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi
7fcae0e43de51843565c2403fa66da26cb79a04b
[ "MIT" ]
3
2016-02-29T09:22:05.000Z
2018-05-16T23:10:38.000Z
examples/helloWorld.py
Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi
7fcae0e43de51843565c2403fa66da26cb79a04b
[ "MIT" ]
3
2016-01-20T20:58:28.000Z
2017-02-06T08:28:30.000Z
examples/helloWorld.py
Devoxx4KidsDE/workshop-minecraft-modding-raspberry-pi
7fcae0e43de51843565c2403fa66da26cb79a04b
[ "MIT" ]
3
2016-01-20T20:02:57.000Z
2021-03-10T20:21:59.000Z
from mcpi import minecraft mc = minecraft.Minecraft.create() mc.postToChat("Hello World")
22.5
33
0.788889
c99b1c8bbe063bb3a62611598e7d312d2aee21ba
298
py
Python
spht/urls.py
consbio/spht
96ec6a0931851b33eace08720d4d18ab34775a52
[ "BSD-2-Clause" ]
1
2019-08-04T21:13:41.000Z
2019-08-04T21:13:41.000Z
spht/urls.py
consbio/spht
96ec6a0931851b33eace08720d4d18ab34775a52
[ "BSD-2-Clause" ]
23
2018-04-12T20:43:15.000Z
2022-02-10T12:10:53.000Z
spht/urls.py
consbio/spht
96ec6a0931851b33eace08720d4d18ab34775a52
[ "BSD-2-Clause" ]
null
null
null
from django.conf.urls import url from django.views.generic import TemplateView from spht.views import IntersectView urlpatterns = [ url(r'^$', TemplateView.as_view(template_name='spht/tool.html')), url(r'^intersect/tiles/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+).png$', IntersectView.as_view()) ]
29.8
91
0.697987
094ea321a49ca2dc60c8a40c13f143c9e2cd5be6
5,306
py
Python
kts/ui/static.py
konodyuk/kts
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
[ "MIT" ]
18
2019-02-14T13:10:07.000Z
2021-11-26T07:10:13.000Z
kts/ui/static.py
konodyuk/kts
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
[ "MIT" ]
2
2019-02-17T14:06:42.000Z
2019-09-15T18:05:54.000Z
kts/ui/static.py
konodyuk/kts
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
[ "MIT" ]
2
2019-09-15T13:12:42.000Z
2020-04-15T14:05:54.000Z
CSS_STYLE = """ .kts {{ line-height: 1.6; }} .kts * {{ box-sizing: content-box; }} .kts-wrapper {{ display: inline-flex; flex-direction: column; background-color: {first}; padding: 10px; border-radius: 20px; }} .kts-wrapper-border {{ border: 0px solid {second}; }} .kts-pool {{ display: flex; flex-wrap: wrap; background-color: {second}; padding: 5px; border-radius: 20px; margin: 5px; }} .kts-field {{ text-align: left; border-radius: 15px; padding: 5px 15px; margin: 5px; display: inline-block; }} .kts-field-bg {{ background-color: {second}; }} .kts-field-bold {{ font-weight: bold; }} .kts-field-third {{ color: {third}; }} .kts-field-accent {{ color: {accent}; }} .kts-field-bg:hover {{ background-color: {fourth}; }} .kts-annotation {{ text-align: left; margin-left: 20px; margin-bottom: -5px; display: inline-block; color: {third}; }} .kts-title {{ text-align: center; display: inline-block; font-weight: bold; color: {third}; }} .kts-code {{ background-color: {second}; text-align: left; border-radius: 15px; padding: 0.5em 15px; margin: 5px; color: white; display: inline-block; }} .kts-code:hover {{ background-color: {fourth}; }} .kts-code > pre {{ background-color: {second}; overflow: auto; white-space: pre-wrap; }} .kts-code:hover > pre {{ background-color: {fourth}; }} .kts-output {{ background-color: {second}; text-align: left; border-radius: 15px; padding: 5px 15px; margin: 5px; font-weight: bold; font-family: monospace; color: {accent}; overflow: auto; max-height: 4.8em; display: flex; flex-direction: column-reverse; }} .kts-df {{ background-color: {second}; text-align: left; border-radius: 15px; padding: 5px 15px; margin: 5px; display: inline-block; color: {accent}; }} .kts-title-with-cross {{ display: grid; grid-template-columns: 1em auto 1em; margin-left: 5px; margin-right: 5px; }} .kts-cross-circle {{ background-color: {second}; width: 1em; height: 1em; position: relative; border-radius: 50%; cursor: pointer; z-index: 2; margin-top: 2px; max-width: none; }} .kts-cross-before, .kts-cross-after {{ background-color: {third}; content: ''; position: absolute; width: 0.75em; height: 2px; border-radius: 0; top: calc((1em - 2px) / 2); z-index: 0; }} .kts-cross-before {{ -webkit-transform: rotate(-45deg); -moz-transform: rotate(-45deg); transform: rotate(-45deg); left: calc(1em / 8); }} .kts-cross-after {{ -webkit-transform: rotate(-135deg); -moz-transform: rotate(-135deg); transform: rotate(-135deg); right: calc(1em / 8); }} #kts-hidden {{ display: none }} .kts-thumbnail {{ margin: 0; cursor: pointer; }} .kts-thumbnail-first {{ background-color: {first}; }} .kts-thumbnail-second {{ background-color: {second}; }} #kts-collapsible {{ -webkit-transition: max-height {anim_height}, padding {anim_padding}; -moz-transition: max-height {anim_height}, padding {anim_padding}; -ms-transition: max-height {anim_height}, padding {anim_padding}; -o-transition: max-height {anim_height}, padding {anim_padding}; transition: max-height {anim_height}, padding {anim_padding}; padding: 0; margin: 2px; align-self: flex-start; max-height: 100px; overflow: hidden; }} .kts-check {{ display: none; }} .kts-check:checked + #kts-collapsible {{ padding: 10px; max-height: {max_height_expanded}; }} .kts-check:checked + #kts-collapsible > #kts-hidden {{ display: inline-flex; }} .kts-check:checked + #kts-collapsible > .kts-thumbnail {{ display: none; }} .kts-check:checked + .kts-wrapper-border {{ border: 2px solid {second}; }} .kts-check-outer {{ display: none; }} .kts-check-outer:checked + #kts-collapsible {{ padding: 10px; max-height: {max_height_expanded}; }} .kts-check-outer:checked + #kts-collapsible > #kts-hidden {{ display: inline-flex; }} .kts-check-outer:checked + #kts-collapsible > .kts-thumbnail {{ display: none; }} .kts-check-outer:checked + .kts-wrapper-border {{ border: 2px solid {second}; }} .kts-inner-wrapper {{ flex-direction: column; }} .kts-progressbar-wrapper {{ display: flex; flex-direction: row; align-items: center; height: 1.6em; }} .kts-progressbar-outer {{ box-sizing: padding-box; display: flex; flex-direction: row; background-color: {second}; align-items: center; padding: 3px; border-radius: 15px; width: 100%; }} .kts-progressbar-inner {{ background-color: {third}; height: 0.7em; border-radius: 15px; }} .kts-hbar-container {{ display: block; position: relative; height: min(calc(100% - 3px), 1.5em); margin: 2px; }} .kts-hbar {{ position: absolute; display: inline-block; background-color: {third}; text-align: left; height: 100%; border-radius: 15px; }} .kts-hbar-line {{ position: absolute; display: inline-block; background-color: {accent}; text-align: left; height: 1px; top: 50%; }} .kts-inner-column {{ display: flex; flex-direction: column; padding: auto; }} .kts-row {{ display: flex; flex-direction: row; }} .kts-hoverable-line, .kts-hoverable-line * {{ pointer-events: all; transition: all 0.1s ease-out; }} .kts-hoverable-line:hover * {{ stroke: {second_accent}; stroke-width: 10; }} """
19.224638
72
0.650773
0959f644ddbc06b75da28e53ec3916db243825e1
2,087
py
Python
ecrire_json.py
Maxim01/Programmes
dbe5b83b3c65776ccc00049793fa85313fb76065
[ "Apache-2.0" ]
null
null
null
ecrire_json.py
Maxim01/Programmes
dbe5b83b3c65776ccc00049793fa85313fb76065
[ "Apache-2.0" ]
null
null
null
ecrire_json.py
Maxim01/Programmes
dbe5b83b3c65776ccc00049793fa85313fb76065
[ "Apache-2.0" ]
null
null
null
import json import time import subprocess import sys MAC_ARG = "VIDE" ACTION_ARG = "VIDE" ADD_ARG = "VIDE" MDP_ARG = "VIDE" DEST_ARG = "VIDE" MDP_SERR_ARG = "VIDE" def mdp_serrure(): global MAC_ARG global MDP_SERR_ARG with open('/home/Devismes_Bridge/Equipements/' + MAC_ARG + '/Pass.json') as f: dataa = json.load(f) dataa['Password']['Pass'] = MDP_SERR_ARG with open('/home/Devismes_Bridge/Equipements/' + MAC_ARG + '/Pass.json', 'w') as f: json.dump(dataa, f, indent=2) def mail_dest(): global DEST_ARG with open('/home/Devismes_Bridge/JSON_List/mail.json') as f: dataa = json.load(f) print "OK: ", DEST_ARG dataa['mail']['Dest'] = DEST_ARG with open('/home/Devismes_Bridge/JSON_List/mail.json', 'w') as f: json.dump(dataa, f, indent=2) def mail_origine(): #base SQL !!!! global ADD_ARG global MDP_ARG with open('/home/Devismes_Bridge/JSON_List/mail.json') as f: dataa = json.load(f) dataa['mail']['adresse'] = ADD_ARG with open('/home/Devismes_Bridge/JSON_List/mail.json', 'w') as f: json.dump(dataa, f, indent=2) def Arguments(): global MAC_ARG global ADD_ARG global MDP_ARG global DEST_ARG global MDP_SERR_ARG print "Arguments: ", sys.argv print "NB d'arguments: ", len(sys.argv) if (len(sys.argv) == 4) and (sys.argv[1] == '1'): #on modifie le mot de passe de la serrure selectionnee print "modifie mot de passe serrure" MAC_ARG = sys.argv[2] MDP_SERR_ARG = sys.argv[3] mdp_serrure() if (len(sys.argv) == 3) and (sys.argv[1] == '2'): #on modifie le mail de destination print "modifie mail destination" DEST_ARG = sys.argv[2] mail_dest() if (len(sys.argv) == 3) and (sys.argv[1] == '3'): #on modifie le mail d'origine et mot de passe print "modifie mail d'origine et mot de passe" ADD_ARG = sys.argv[2] mail_origine() def main(): print "MAIN" Arguments() if __name__ == "__main__": main()
22.202128
106
0.615716
1195d97ce7b7fc191ee2c37ce674f6e799bdf4b2
107
py
Python
main.ru.py
vv31415926/python_lessons_01_4
f5e67d008a5401335c7b5589d9dacc125856560d
[ "MIT" ]
null
null
null
main.ru.py
vv31415926/python_lessons_01_4
f5e67d008a5401335c7b5589d9dacc125856560d
[ "MIT" ]
null
null
null
main.ru.py
vv31415926/python_lessons_01_4
f5e67d008a5401335c7b5589d9dacc125856560d
[ "MIT" ]
null
null
null
s = input('Введите ФИО через пробел:') lst = s.split() print( 'Привет, ' ) for si in lst: print( si)
17.833333
38
0.598131
e115b2e949a11782a5bb56a5b2bb6a3795f7d276
330
py
Python
menucard/admin.py
baniasbaabe/happy-qr
bf44ac19306ea6405cc7c9a100e6f83afca125b4
[ "MIT" ]
1
2021-01-23T21:42:10.000Z
2021-01-23T21:42:10.000Z
menucard/admin.py
baniasbaabe/happy-qr
bf44ac19306ea6405cc7c9a100e6f83afca125b4
[ "MIT" ]
null
null
null
menucard/admin.py
baniasbaabe/happy-qr
bf44ac19306ea6405cc7c9a100e6f83afca125b4
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from menucard.models import * admin.site.register(Vorspeise) admin.site.register(Hauptspeise) admin.site.register(Nachspeise) admin.site.register(Snacks) admin.site.register(AlkoholfreieDrinks) admin.site.register(AlkoholhaltigeDrinks) admin.site.register(Besucher)
25.384615
41
0.833333
0183624fde61b9b8bb023787016c964c88412b6b
170
py
Python
flask/app/views.py
hou2zi0/flask-app-docker
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
[ "MIT" ]
null
null
null
flask/app/views.py
hou2zi0/flask-app-docker
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
[ "MIT" ]
null
null
null
flask/app/views.py
hou2zi0/flask-app-docker
0e51b1f00201fc6eb46a62d0d8f2701bc02d4031
[ "MIT" ]
null
null
null
from app import app @app.route('/') def index(): return "Hello from Flask! 🐵" @app.route('/affe') def affe(): return "Hello from Flask! Affe sagt Hallo! 🐵"
17
49
0.611765
0968faab53e0aa82c8b7c026041088ebbd25206c
2,274
py
Python
python/my_sql_conn.py
EstherLacan/jiangfw
a449b1925742873c76dc1b3284aedb359204bc76
[ "Apache-2.0" ]
1
2020-07-29T16:43:46.000Z
2020-07-29T16:43:46.000Z
python/my_sql_conn.py
EstherLacan/jiangfw
a449b1925742873c76dc1b3284aedb359204bc76
[ "Apache-2.0" ]
null
null
null
python/my_sql_conn.py
EstherLacan/jiangfw
a449b1925742873c76dc1b3284aedb359204bc76
[ "Apache-2.0" ]
null
null
null
# -*- coding: UTF-8 -*- import MySQLdb class DbFunctions(object): """ 数据库连接 """ def __init__(self, server, username, password, dbname): self.server = server self.username = username self.password = password self.dbname = dbname self.db = None self.cur = None def connection_open(self): self.db = MySQLdb.connect(host=self.server, user=self.username, passwd=self.password, db=self.dbname) self.cur = self.db.cursor() def connection_close(self): self.db.close() def mysql_qry(self, sql, bool): # 1 for select and 0 for insert update delete self.connection_open() try: self.cur.execute(sql) if bool: return self.cur.fetchall() else: self.db.commit() return True except MySQLdb.Error, e: try: print "Mysql Error:- " + str(e) except IndexError: print "Mysql Error:- " + str(e) self.connection_close() def mysql_insert(self, table, fields, values): sql = "INSERT INTO " + table + " (" + fields + ") VALUES (" + values + ")"; return self.mysql_qry(sql, 0) def mysql_update(self, table, values, conditions): sql = "UPDATE " + table + " SET " + values + " WHERE " + conditions return self.mysql_qry(sql, 0) def mysql_delete(self, table, conditions): sql = "DELETE FROM " + table + " WHERE " + conditions return self.mysql_qry(sql, 0) def mysql_select(self, table): sql = "SELECT * FROM " + table return self.mysql_qry(sql, 1) def insert_by_many(self, tablname, rows): try: # sql = 'INSERT INTO table values(%s,%s,%s)' # 批量插入 sql = 'INSERT INTO ' + tablname + ' values(%s,%s,%s)' self.connection_open() self.cur.executemany(sql, rows) self.db.commit() except Exception as e: print e self.db.rollback() self.connection_close() print '[insert_by_many executemany] total:', len(rows) # db = DbFunctions("localhost, "root", "Root@123", "db") # result = db.mysql_qry("",1)
29.921053
109
0.547054
61e5a5b59a9e4dbc5ab5ac9e9ca08a0245f2a295
3,267
py
Python
BigData_exp/exp3/exp3/my_html.py
DolorHunter/hfut-exp-archived
c67e26c1f4fba550c8974eaba10dfa302b928868
[ "BSD-2-Clause" ]
12
2020-12-07T05:49:05.000Z
2022-03-25T09:09:36.000Z
BigData_exp/exp3/exp3/my_html.py
DolorHunter/hfut-exp
c67e26c1f4fba550c8974eaba10dfa302b928868
[ "BSD-2-Clause" ]
null
null
null
BigData_exp/exp3/exp3/my_html.py
DolorHunter/hfut-exp
c67e26c1f4fba550c8974eaba10dfa302b928868
[ "BSD-2-Clause" ]
1
2021-01-08T08:53:53.000Z
2021-01-08T08:53:53.000Z
import requests # filename cannot be 'html.py' which will lead to conflict to bs!! from bs4 import BeautifulSoup import csv # Get raw html info def get_html_info(url): try: print('url:'+url) re = requests.get(url, timeout=30) re.raise_for_status() re.encoding = 'utf-8' print("Get raw info.") return re.text except: print("[Error] Failed to get HTML info!") # Save raw html info to the file def save_html_info(html_info): file = open('raw_html_info.txt', 'w') for info in html_info: file.write(info) file.close() print("Saved raw info to the file.") # Re raw html info from the list def re_html_info(raw_html_info): # Using bs4 to extract raw info soup = BeautifulSoup(raw_html_info, 'html.parser') data = soup.find_all('tr') # print(data) # test ready_info = [] i = 0 for info in data: if i < 2: # escape the title i += 1 continue else: info = str(info) re_info = {} # Save serial numbers to list serial_num_start = info.find( '; height: 18.75pt">') serial_num_end = info.find('</td>') serial_num = info[serial_num_start+19: serial_num_end] re_info['序号'] = serial_num # " ".join(serial_num.split()) # Save student names to list student_name_start = info.find( 'left: medium none">', serial_num_end, len(info)) student_name_end = info.find('</td>', student_name_start, len(info)) student_name = info[student_name_start+19: student_name_end] re_info['姓名'] = student_name # " ".join(student_name.split()) # Save school names to list school_name_start = info.find( 'left: medium none">', student_name_end, len(info)) school_name_end = info.find('</td>', school_name_start, len(info)) school_name = info[school_name_start+19: school_name_end] re_info['录取学校'] = school_name # " ".join(school_name.split()) # Save school types to list school_types_start = info.find( 'left: medium none">', school_name_end, len(info)) school_types_end = info.find('</td>', school_types_start, len(info)) school_types = info[school_types_start+19: school_types_end] re_info['大学类型'] = school_types # " ".join(school_types.split()) ready_info.append(re_info) print("Info is ready.") return ready_info def save_to_csv(ready_info): with open('re_html_info.csv', 'wt', encoding='utf-16') as csvfile: csvout = csv.DictWriter(csvfile, ['序号', '姓名', '录取学校', '大学类型']) csvout.writeheader() csvout.writerows(ready_info) print("Save to csv.") def main(): url = 'http://www.sszx.cn/jxjy/xkjs/201802/t20180205_8967.htm' raw_html_info = get_html_info(url) # Get raw html info save_html_info(raw_html_info) # Save raw html info to file ready_info = re_html_info(raw_html_info) # re raw html info from file print(ready_info) save_to_csv(ready_info) # Save ready info to csv if __name__ == '__main__': main()
33.680412
80
0.603612
28b0d798dce294187b09ea0093ced120bf38031b
2,144
py
Python
Algorithms/Implementation/Bomberman.py
baby5/HackerRank
1e68a85f40499adb9b52a4da16936f85ac231233
[ "MIT" ]
null
null
null
Algorithms/Implementation/Bomberman.py
baby5/HackerRank
1e68a85f40499adb9b52a4da16936f85ac231233
[ "MIT" ]
null
null
null
Algorithms/Implementation/Bomberman.py
baby5/HackerRank
1e68a85f40499adb9b52a4da16936f85ac231233
[ "MIT" ]
null
null
null
#coding:utf-8 R, C, N = map(int, raw_input().split()) grid_1 = [[] for i in xrange(R)] bomb_list = [] for i in xrange(R): row_list = list(raw_input()) grid_1[i] = row_list for j in xrange(len(row_list)): if row_list[j] == 'O': bomb_list.append((i, j)) def is_vaild(x, y): return x >= 0 and x < R and y >= 0 and y < C# and (x, y) not in bomb_list 愚蠢至极! def have_bomb(x, y, grid): if x+1 < R and grid[x+1][y] == 'O': return 1 elif x-1 >= 0 and grid[x-1][y] == 'O': return 1 elif y+1 < C and grid[x][y+1] == 'O': return 1 elif y-1 >= 0 and grid[x][y-1] == 'O': return 1 else: return 0 if N % 2 == 0: for _ in xrange(R): print ''.join(['O'] * C) else: if N == 1: for row in grid_1: print ''.join(row) else: while 1: grid_3 = [['O']*C for _ in xrange(R)] for bomb in bomb_list: print bomb #middle x, y = bomb grid_3[x][y] = '.' #up i, j = x-1, y if is_vaild(i, j): grid_3[i][j] = '.' #down i, j = x+1, y if is_vaild(i, j): grid_3[i][j] = '.' #left i, j = x, y-1 if is_vaild(i, j): grid_3[i][j] = '.' #right i, j = x, y+1 if is_vaild(i, j): grid_3[i][j] = '.' if N/2 % 2 != 0: for row in grid_3: print ''.join(row) break for i in xrange(R): row = [] for j in xrange(C): if grid_3[i][j] == 'O': row.append('.') elif have_bomb(i, j, grid_3): row.append('.') else: row.append('O') print ''.join(row) break
25.831325
83
0.35028
3a7d6a374822bd399b7fc85dc349d7dbd4212ce5
1,228
py
Python
pytestDemo/common/read_data.py
lthinktime/testdemo
509656d62535ed06e222c08671db11e31d9b3162
[ "Apache-2.0" ]
null
null
null
pytestDemo/common/read_data.py
lthinktime/testdemo
509656d62535ed06e222c08671db11e31d9b3162
[ "Apache-2.0" ]
null
null
null
pytestDemo/common/read_data.py
lthinktime/testdemo
509656d62535ed06e222c08671db11e31d9b3162
[ "Apache-2.0" ]
null
null
null
import yaml import json from configparser import ConfigParser from common.logger import logger class MyConfigParser(ConfigParser): # 重写configparser 中的 optionxform 函数,解决 .ini 文件中的 键option 自动转为小写的问题 def __init__(self, defaults=None): ConfigParser.__init__(self, defaults=defaults) def optionxform(self, optionstr): return optionstr class ReadFileData(): def __init__(self): pass def load_yaml(self, file_path): logger.info("加载 {} 文件......".format(file_path)) with open(file_path, encoding='utf-8') as f: data = yaml.safe_load(f) logger.info("读到数据 ==>> {} ".format(data)) return data def load_json(self, file_path): logger.info("加载 {} 文件......".format(file_path)) with open(file_path, encoding='utf-8') as f: data = json.load(f) logger.info("读到数据 ==>> {} ".format(data)) return data def load_ini(self, file_path): logger.info("加载 {} 文件......".format(file_path)) config = MyConfigParser() config.read(file_path, encoding="UTF-8") data = dict(config._sections) # print("读到数据 ==>> {} ".format(data)) return data data = ReadFileData()
28.55814
69
0.614821
a33ece80edcfc6ff8944ba4518931f91f0b1ccbe
2,094
py
Python
research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
1
2021-07-03T06:52:20.000Z
2021-07-03T06:52:20.000Z
research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
null
null
null
research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
2
2019-09-01T06:17:04.000Z
2019-10-04T08:39:45.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ RandomCrop operator. """ from mindspore.dataset.vision import py_transforms from mindspore.dataset.vision import py_transforms_util from mindspore.dataset.vision import utils class RandomCrop(py_transforms.RandomCrop): """ RandomCrop inherits from py_transforms.RandomCrop but derives/uses the original image size as the output size. Please refer to py_transforms.RandomCrop for argument specifications. """ def __init__(self, padding=4, pad_if_needed=False, fill_value=0, padding_mode=utils.Border.CONSTANT): # Note the `1` for the size argument is only set for passing the check. super(RandomCrop, self).__init__(1, padding=padding, pad_if_needed=pad_if_needed, fill_value=fill_value, padding_mode=padding_mode) def __call__(self, img): """ Call method. Args: img (PIL image): Image to be padded and then randomly cropped back to the same size. Returns: img (PIL image), Randomly cropped image. """ if not py_transforms_util.is_pil(img): raise TypeError( py_transforms_util.augment_error_message.format(type(img))) return py_transforms_util.random_crop( img, img.size, self.padding, self.pad_if_needed, self.fill_value, self.padding_mode, )
36.736842
90
0.658548
a37c14fdb39ad32cb41eecf0cb0e42257fe19ade
617
py
Python
quant/common/log.py
doubleDragon/QuantBot
53a1d6c62ecece47bf777da0c0754430b706b7fd
[ "MIT" ]
7
2017-10-22T15:00:09.000Z
2019-09-19T11:45:43.000Z
quant/common/log.py
doubleDragon/QuantBot
53a1d6c62ecece47bf777da0c0754430b706b7fd
[ "MIT" ]
1
2018-01-19T16:19:40.000Z
2018-01-19T16:19:40.000Z
quant/common/log.py
doubleDragon/QuantBot
53a1d6c62ecece47bf777da0c0754430b706b7fd
[ "MIT" ]
5
2017-12-11T15:10:29.000Z
2018-12-21T17:40:58.000Z
#!/usr/bin/env python # -*- coding: UTF-8 -*- import logging from logging.handlers import RotatingFileHandler def get_logger(log_name, level=logging.DEBUG): """ 日志的疯转 :param level: 日志级别 :param log_name: 日志对象名 :return: 日志对象名 """ logger = logging.getLogger(log_name) logger.setLevel(level) rt_handler = RotatingFileHandler(log_name, maxBytes=100 * 1024 * 1024, backupCount=10) rt_handler.setLevel(level) formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s') rt_handler.setFormatter(formatter) logger.addHandler(rt_handler) return logger
25.708333
90
0.698541
6e9f8f97f56424abab696c62b47c86b133739767
784
py
Python
06 APIs, Scraping I/kommetarezaehlen.py
manuelapaganini/20_21_Workfile
5ec3637d18cbd73256b56682d9b99547e21a24d9
[ "MIT" ]
6
2019-08-06T14:53:34.000Z
2020-10-16T19:44:16.000Z
06 APIs, Scraping I/kommetarezaehlen.py
manuelapaganini/20_21_Workfile
5ec3637d18cbd73256b56682d9b99547e21a24d9
[ "MIT" ]
1
2020-06-25T09:46:58.000Z
2020-06-25T09:46:58.000Z
06 APIs, Scraping I/kommetarezaehlen.py
manuelapaganini/20_21_Workfile
5ec3637d18cbd73256b56682d9b99547e21a24d9
[ "MIT" ]
2
2019-09-16T13:05:51.000Z
2019-09-27T09:07:49.000Z
import requests from bs4 import BeautifulSoup import pandas as pd import datetime import sys def kommentarezaehlen(url): r = requests.get(url) soup = BeautifulSoup(r.text,'xml') storybox = soup.find_all('div', {'class':'text'}) lst = [] for elem in storybox: try: t = elem.find('h2').text except: t = 'Kein Titel' try: k = elem.find('a', {'class':'standard comments'}).text.replace("\n", "") except: k = 'Keine Kommentare' mini_dict = {'Titel': t, 'Kommentar': k} lst.append(mini_dict) now = datetime.datetime.now() return pd.DataFrame(lst).to_csv(str(now)+'-watson.csv') if __name__== "__main__": kommentarezaehlen(sys.argv[1])
23.757576
84
0.567602
287b71be0094ff9c0a8a646a19e81197612eb1f3
3,673
py
Python
zplus_scraper/pipelines.py
tstaec/ZplusScraper
38c4f9534b8ee3822a80b48a6827ef4f52793c0b
[ "MIT" ]
null
null
null
zplus_scraper/pipelines.py
tstaec/ZplusScraper
38c4f9534b8ee3822a80b48a6827ef4f52793c0b
[ "MIT" ]
1
2021-07-04T12:02:57.000Z
2021-07-04T12:02:57.000Z
zplus_scraper/pipelines.py
tstaec/ZplusScraper
38c4f9534b8ee3822a80b48a6827ef4f52793c0b
[ "MIT" ]
null
null
null
from datetime import datetime import mysql from scrapy.exceptions import NotConfigured from database import create_database class ZplusscraperPipeline: def process_item(self, item, spider): return item class DatabasePipeline(object): def __init__(self, db, user, passwd, host): self.db = db self.user = user self.passwd = passwd self.host = host @classmethod def from_crawler(cls, crawler): db_settings = crawler.settings.getdict("DB_SETTINGS") if not db_settings: # if we don't define db config in settings raise NotConfigured # then raise error db = db_settings['db'] user = db_settings['user'] passwd = db_settings['passwd'] host = db_settings['host'] return cls(db, user, passwd, host) # returning pipeline instance def open_spider(self, spider): print('open spider was called. Initializing database') self.context = mysql.connector.connect( user=self.user, passwd=self.passwd, host=self.host, charset='utf8mb4', use_unicode=True) create_database(self.context, self.db) def close_spider(self, spider): print('closing spider') self.context.close() def process_item(self, item, spider): existing_article = self.get_existing_article(item) if existing_article is None: article_id = self.save_article(item) else: article_id = existing_article[0] if item['datazplus'] is not None and item['article_html'] is not None: self.update_article(item) self.save_scrape_run(item, article_id) return item def get_existing_article(self, article): href = article['href'] if href is None: return None cursor = self.context.cursor(buffered=True) sql_command = "SELECT id, title FROM articles WHERE href = %s" returned_rows = cursor.execute(sql_command, (href,)) result = cursor.fetchone() cursor.close() return result def save_article(self, article): cursor = self.context.cursor(buffered=True) sql_command = """INSERT INTO articles (created, last_modified, title, href, article_html) VALUES (%s, %s, %s, %s, %s)""" str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') cursor.execute(sql_command, (str_now, str_now, article['title'], article['href'], article['article_html'])) self.context.commit() row_id = cursor.lastrowid cursor.close() return row_id def update_article(self, article): cursor = self.context.cursor(buffered=True) sql_command = """UPDATE articles SET last_modified = %s, article_html = %s """ str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') cursor.execute(sql_command, (str_now, article['article_html'])) self.context.commit() cursor.close() return None def save_scrape_run(self, article, article_id): cursor = self.context.cursor(buffered=True) sql_command = """INSERT INTO scrape_run (created, datazplus, article_id) VALUES (%s, %s, %s)""" str_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S') cursor.execute(sql_command, (str_now, article['datazplus'], article_id)) self.context.commit() cursor.close() return None
34.327103
115
0.586986
95f916a0d2f23768a5613ddf564dc02ba2c599ac
478
py
Python
ggit_platform/admin.py
girlsgoit/GirlsGoIT
447cd15c44ebee4af9e942a079d681be8683239f
[ "MIT" ]
1
2019-02-27T21:20:54.000Z
2019-02-27T21:20:54.000Z
ggit_platform/admin.py
girlsgoit/GirlsGoIT
447cd15c44ebee4af9e942a079d681be8683239f
[ "MIT" ]
null
null
null
ggit_platform/admin.py
girlsgoit/GirlsGoIT
447cd15c44ebee4af9e942a079d681be8683239f
[ "MIT" ]
null
null
null
from django.contrib import admin from markdownx.admin import MarkdownxModelAdmin from .models import Event from .models import Member from .models import MemberRole from .models import Region from .models import Story from .models import Track admin.site.register(Track, MarkdownxModelAdmin) admin.site.register(Region) admin.site.register(Member) admin.site.register(MemberRole) admin.site.register(Event, MarkdownxModelAdmin) admin.site.register(Story, MarkdownxModelAdmin)
28.117647
47
0.83682
66ad35acb4d63dc9937ca5276e6b238cb6b79da1
10,058
py
Python
spider/got/manager/TweetManager.py
iecasszyjy/tweet_search-master
e4978521a39964c22ae46bf35d6ff17710e8e6c6
[ "MIT" ]
null
null
null
spider/got/manager/TweetManager.py
iecasszyjy/tweet_search-master
e4978521a39964c22ae46bf35d6ff17710e8e6c6
[ "MIT" ]
2
2021-03-31T18:54:16.000Z
2021-12-13T19:49:08.000Z
spider/got/manager/TweetManager.py
iecasszyjy/tweet_search-master
e4978521a39964c22ae46bf35d6ff17710e8e6c6
[ "MIT" ]
null
null
null
import urllib,urllib2,json,re,datetime,sys,cookielib from .. import models from pyquery import PyQuery import requests import random random.seed(1) def fetch_activities(tweet_id): retusers = [] favorusers = [] re_url = 'https://twitter.com/i/activity/retweeted_popup?id=%s'%(tweet_id) favor_url = 'https://twitter.com/i/activity/favorited_popup?id=%s'%(tweet_id) headers = { 'Host':"twitter.com", 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)), 'Accept':"application/json, text/javascript, */*; q=0.01", 'Accept-Language':"de,en-US;q=0.7,en;q=0.3", 'X-Requested-With':"XMLHttpRequest", 'Referer':'https://twitter.com/', 'Connection':"keep-alive", } re_users = PyQuery(requests.get(re_url,headers=headers).json()['htmlUsers'])('ol.activity-popup-users') for re_user in re_users('div.account'): userPQ = PyQuery(re_user) userd = { 'screen_name':userPQ.attr('data-screen-name'), 'user_id':userPQ.attr('data-user-id'), 'data_name':userPQ.attr('data-name'), 'avatar_src':userPQ('img.avatar').attr('src'), 'userbadges':userPQ('span.UserBadges').text(), 'bio':userPQ('p.bio').text(), } retusers.append({userd['screen_name']:userd}) favor_users = PyQuery(requests.get(favor_url,headers=headers).json()['htmlUsers'])('ol.activity-popup-users') for favor_user in favor_users('div.account'): userPQ = PyQuery(favor_user) userd = { 'screen_name':userPQ.attr('data-screen-name'), 'user_id':userPQ.attr('data-user-id'), 'data_name':userPQ.attr('data-name'), 'avatar_src':userPQ('img.avatar').attr('src'), 'userbadges':userPQ('span.UserBadges').text(), 'bio':userPQ('p.bio').text(), } favorusers.append({userd['screen_name']:userd}) return retusers,favorusers def fetch_entities(tweetPQ): hashtags = [] urls = [] for url in tweetPQ('p.js-tweet-text a'): d = dict(url.items()) if d.has_key('data-expanded-url'): #d['class'] == 'twitter-timeline-link' #pdb.set_trace() urls.append({'href':d['href'],'expanded_url':d['data-expanded-url']}) if d['href'].startswith('/hashtag/'): hashtags.append(d['href'].split('?')[0].split('/')[-1]) tweetPQ('p.js-tweet-text a.twitter-timeline-link').remove() return hashtags,urls def getTweet(tweetHTML): tweetPQ = PyQuery(tweetHTML) tweet = models.Tweet() #base info id = tweetPQ.attr("data-tweet-id") conversation_id = tweetPQ.attr('data-conversation-id') dateSec = int(tweetPQ("small.time span.js-short-timestamp").attr("data-time")) #permalink = tweetPQ.attr("data-permalink-path") #user screen_name = tweetPQ.attr('data-screen-name') user_id = tweetPQ.attr('data-user-id') data_name = tweetPQ.attr('data-name') avatar_src = tweetPQ('img.avatar').attr('src') userbadges = tweetPQ('span.UserBadges').text() #text hashtags,urls = fetch_entities(tweetPQ) mentions = tweetPQ.attr("data-mentions") lang = tweetPQ("p.js-tweet-text").attr('lang') raw_text = re.sub(r"\s+", " ", tweetPQ("p.js-tweet-text").text().replace('# ', '#').replace('@ ', '@')) standard_text = re.sub(r"\s+", " ", tweetPQ("p.js-tweet-text").text().replace('# ', '').replace('@ ', '')) tweetPQ('p.js-tweet-text')('a').remove() tweetPQ('p.js-tweet-text')('img').remove() clean_text = tweetPQ("p.js-tweet-text").text() #media quote_id = tweetPQ('div.QuoteTweet a.QuoteTweet-link').attr('data-conversation-id') has_cards = tweetPQ.attr('data-has-cards') card_url = tweetPQ('div.js-macaw-cards-iframe-container').attr('data-card-url') img_src = tweetPQ('div.AdaptiveMedia-container img').attr('src') video_src = tweetPQ('div.AdaptiveMedia-container video').attr('src') geo = '' geoSpan = tweetPQ('span.Tweet-geo') if len(geoSpan) > 0: geo = geoSpan.attr('title') #action retweet_id = tweetPQ.attr('data-retweet-id') retweeter = tweetPQ.attr('data-retweeter') #retusers,favorusers = fetch_activities(id) replies = int(tweetPQ("span.ProfileTweet-action--reply span.ProfileTweet-actionCount").attr("data-tweet-stat-count").replace(",", "")) retweets = int(tweetPQ("span.ProfileTweet-action--retweet span.ProfileTweet-actionCount").attr("data-tweet-stat-count").replace(",", "")) favorites = int(tweetPQ("span.ProfileTweet-action--favorite span.ProfileTweet-actionCount").attr("data-tweet-stat-count").replace(",", "")) ## tweet model tweet.id = id tweet.conversation_id = conversation_id tweet.is_reply = tweet.id != tweet.conversation_id tweet.created_at = datetime.datetime.fromtimestamp(dateSec) #tweet.permalink = 'https://twitter.com' + permalink #user tweet.user = { 'screen_name':screen_name, 'user_id':user_id, 'data_name':data_name, 'avatar_src':avatar_src, 'userbadges':userbadges, } #media tweet.media = { 'quote_id':quote_id, 'has_cards':has_cards, 'card_url':card_url, 'img_src':img_src, 'video_src':video_src, 'geo':geo, } #text tweet.hashtags = hashtags tweet.urls = urls tweet.mentions = mentions.split(' ') if mentions != None else None tweet.lang = lang tweet.raw_text = raw_text tweet.standard_text = standard_text #tweet.clean_text = clean_text #action tweet.action = { #'retusers':retusers, #'favorusers':favorusers, 'replies':replies, 'retweets':retweets, 'favorites':favorites, 'retweet_id':retweet_id, 'retweeter':retweeter, 'is_retweet':True if retweet_id != None else False, } return tweet class TweetManager: def __init__(self): pass @staticmethod def getTweetsById(tweet_id): url = 'https://twitter.com/xxx/status/%s'%(tweet_id) headers = { 'Host':"twitter.com", 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)), 'Accept':"application/json, text/javascript, */*; q=0.01", 'Accept-Language':"de,en-US;q=0.7,en;q=0.3", 'X-Requested-With':"XMLHttpRequest", 'Referer':'https://twitter.com/', 'Connection':"keep-alive", } tweets = PyQuery(requests.get(url,headers=headers).content)('div.js-original-tweet') for tweetHTML in tweets: return getTweet(tweetHTML) @staticmethod def getTweets(tweetCriteria, refreshCursor='', receiveBuffer=None, bufferLength=100, proxy=None): results = [] resultsAux = [] cookieJar = cookielib.CookieJar() if hasattr(tweetCriteria, 'username') and (tweetCriteria.username.startswith("\'") or tweetCriteria.username.startswith("\"")) and (tweetCriteria.username.endswith("\'") or tweetCriteria.username.endswith("\"")): tweetCriteria.username = tweetCriteria.username[1:-1] active = True while active: json = TweetManager.getJsonReponse(tweetCriteria, refreshCursor, cookieJar, proxy) if len(json['items_html'].strip()) == 0: break if not json.has_key('min_position'): break refreshCursor = json['min_position'] if refreshCursor == None: break tweets = PyQuery(json['items_html'])('div.js-stream-tweet') if len(tweets) == 0: break for tweetHTML in tweets: tweet = getTweet(tweetHTML) if hasattr(tweetCriteria, 'sinceTimeStamp'): if tweet.created_at < tweetCriteria.sinceTimeStamp: active = False break if hasattr(tweetCriteria, 'untilTimeStamp'): if tweet.created_at <= tweetCriteria.untilTimeStamp: results.append(tweet.__dict__) else: results.append(tweet.__dict__) #resultsAux.append(tweet) if receiveBuffer and len(resultsAux) >= bufferLength: receiveBuffer(resultsAux) resultsAux = [] if tweetCriteria.maxTweets > 0 and len(results) >= tweetCriteria.maxTweets: active = False break if receiveBuffer and len(resultsAux) > 0: receiveBuffer(resultsAux) return results @staticmethod def getJsonReponse(tweetCriteria, refreshCursor, cookieJar, proxy): url = "https://twitter.com/i/search/timeline?q=%s&src=typd&max_position=%s" urlGetData = '' if hasattr(tweetCriteria, 'username'): urlGetData += ' from:' + tweetCriteria.username if hasattr(tweetCriteria, 'querySearch'): urlGetData += ' ' + tweetCriteria.querySearch if hasattr(tweetCriteria, 'near'): urlGetData += "&near:" + tweetCriteria.near + " within:" + tweetCriteria.within if hasattr(tweetCriteria, 'since'): urlGetData += ' since:' + tweetCriteria.since if hasattr(tweetCriteria, 'until'): urlGetData += ' until:' + tweetCriteria.until if hasattr(tweetCriteria, 'topTweets'): if tweetCriteria.topTweets: url = "https://twitter.com/i/search/timeline?q=%s&src=typd&max_position=%s" if hasattr(tweetCriteria, 'tweetType'): url = url + tweetCriteria.tweetType url = url % (urllib.quote(urlGetData), refreshCursor) ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.%s'%(random.randint(0,999)) headers = [ ('Host', "twitter.com"), ('User-Agent', ua), # Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36 #Mozilla/5.0 (Windows NT 6.1; Win64; x64) ('Accept', "application/json, text/javascript, */*; q=0.01"), ('Accept-Language', "de,en-US;q=0.7,en;q=0.3"), ('X-Requested-With', "XMLHttpRequest"), ('Referer', url), ('Connection', "keep-alive") ] if proxy: opener = urllib2.build_opener(urllib2.ProxyHandler({'http': proxy, 'https': proxy}), urllib2.HTTPCookieProcessor(cookieJar)) else: opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar)) opener.addheaders = headers try: response = opener.open(url) jsonResponse = response.read() except Exception,e: print "Twitter weird response. Try to see on browser: https://twitter.com/search?q=%s&src=typd" % urllib.quote(urlGetData) raise Exception(e.message) #sys.exit() #return None dataJson = json.loads(jsonResponse) return dataJson
34.094915
214
0.680155
06d235db1b1f699e83bac4c384e8ab53dcbb9ca9
103
py
Python
dtdocker/containers/__init__.py
oxsoftdev/dt-docker
8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5
[ "MIT" ]
null
null
null
dtdocker/containers/__init__.py
oxsoftdev/dt-docker
8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5
[ "MIT" ]
null
null
null
dtdocker/containers/__init__.py
oxsoftdev/dt-docker
8ba30bf5ca2a7b89f3c7658a7768b16dbf8514a5
[ "MIT" ]
null
null
null
from MssqlContainer import MssqlContainer from RedisContainers import RedisContainers, RedisContainer
25.75
59
0.893204
06d61d6128d3d24a44e5f423459d0349beadddf4
834
py
Python
Raspberry Pi Pico/Pi_Pico_TrafficLight.py
ckuehnel/MicroPython
c57d0df744fe5301e755bd139b6cc56d69c442fd
[ "MIT" ]
1
2021-03-22T18:38:43.000Z
2021-03-22T18:38:43.000Z
Raspberry Pi Pico/Pi_Pico_TrafficLight.py
ckuehnel/MicroPython
c57d0df744fe5301e755bd139b6cc56d69c442fd
[ "MIT" ]
null
null
null
Raspberry Pi Pico/Pi_Pico_TrafficLight.py
ckuehnel/MicroPython
c57d0df744fe5301e755bd139b6cc56d69c442fd
[ "MIT" ]
1
2021-02-06T10:07:36.000Z
2021-02-06T10:07:36.000Z
# Pi_Pico_TrafficLight.py # Controlling Neopixel by PIO to simulate a traffic light # using ws2812b library by benevpi # https://github.com/benevpi/pico_python_ws2812b import time import ws2812b NUM_PIX = 3 # this is for M5Stack RGB LED PIN_NUM = 16 light = ws2812b.ws2812b(NUM_PIX, 0, PIN_NUM) RED = (255, 0, 0) YELLOW = (255, 150, 0) GREEN = (0, 255, 0) BLACK = (0, 0, 0) COLORS = (RED, YELLOW, GREEN, BLACK) def lights(L0, L1, L2, t): color = L0 light.set_pixel(0, color[0], color[1], color[2]) color = L1 light.set_pixel(1, color[0], color[1], color[2]) color = L2 light.set_pixel(2, color[0], color[1], color[2]) light.show() time.sleep(t) while True: lights(RED, BLACK, BLACK, 2) lights(RED, YELLOW, BLACK, 1) lights(BLACK, BLACK, GREEN, 3) lights(BLACK, YELLOW, BLACK, 2)
24.529412
57
0.655875
b081a38729addd1e06a8d879010ab8225a044073
9,011
py
Python
research/nlp/tprr/src/reader_downstream.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/nlp/tprr/src/reader_downstream.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/nlp/tprr/src/reader_downstream.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """downstream Model for reader""" import numpy as np from mindspore import nn, ops from mindspore import Tensor, Parameter from mindspore.ops import operations as P from mindspore import dtype as mstype dst_type = mstype.float16 dst_type2 = mstype.float32 class Linear(nn.Cell): """module of reader downstream""" def __init__(self, linear_weight_shape, linear_bias_shape): """init function""" super(Linear, self).__init__() self.matmul = nn.MatMul() self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, linear_weight_shape).astype(np.float32)), name=None) self.add = P.Add() self.add_bias = Parameter(Tensor(np.random.uniform(0, 1, linear_bias_shape).astype(np.float32)), name=None) self.relu = nn.ReLU() def construct(self, hidden_state): """construct function""" output = self.matmul(ops.Cast()(hidden_state, dst_type), ops.Cast()(self.matmul_w, dst_type)) output = self.add(ops.Cast()(output, dst_type2), self.add_bias) output = self.relu(output) return output class BertLayerNorm(nn.Cell): """Normalization module of reader downstream""" def __init__(self, bert_layer_norm_weight_shape, bert_layer_norm_bias_shape, eps=1e-12): """init function""" super(BertLayerNorm, self).__init__() self.reducemean = P.ReduceMean(keep_dims=True) self.sub = P.Sub() self.pow = P.Pow() self.add = P.Add() self.sqrt = P.Sqrt() self.div = P.Div() self.mul = P.Mul() self.variance_epsilon = eps self.bert_layer_norm_weight = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_weight_shape) .astype(np.float32)), name=None) self.bert_layer_norm_bias = Parameter(Tensor(np.random.uniform(0, 1, bert_layer_norm_bias_shape) .astype(np.float32)), name=None) def construct(self, x): """construct function""" u = self.reducemean(x, -1) s = self.reducemean(self.pow(self.sub(x, u), 2), -1) x = self.div(self.sub(x, u), self.sqrt(self.add(s, self.variance_epsilon))) output = self.mul(self.bert_layer_norm_weight, x) output = self.add(output, self.bert_layer_norm_bias) return output class SupportingOutputLayer(nn.Cell): """module of reader downstream""" def __init__(self, linear_1_weight_shape, linear_1_bias_shape, bert_layer_norm_weight_shape, bert_layer_norm_bias_shape): """init function""" super(SupportingOutputLayer, self).__init__() self.linear_1 = Linear(linear_weight_shape=linear_1_weight_shape, linear_bias_shape=linear_1_bias_shape) self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape, bert_layer_norm_bias_shape=bert_layer_norm_bias_shape) self.matmul = nn.MatMul() self.matmul_w = Parameter(Tensor(np.random.uniform(0, 1, (8192, 1)).astype(np.float32)), name=None) def construct(self, x): """construct function""" output = self.linear_1(x) output = self.bert_layer_norm(output) output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.matmul_w, dst_type)) return ops.Cast()(output, dst_type2) class PosOutputLayer(nn.Cell): """module of reader downstream""" def __init__(self, linear_weight_shape, linear_bias_shape, bert_layer_norm_weight_shape, bert_layer_norm_bias_shape): """init function""" super(PosOutputLayer, self).__init__() self.linear_1 = Linear(linear_weight_shape=linear_weight_shape, linear_bias_shape=linear_bias_shape) self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=bert_layer_norm_weight_shape, bert_layer_norm_bias_shape=bert_layer_norm_bias_shape) self.matmul = nn.MatMul() self.linear_2_weight = Parameter(Tensor(np.random.uniform(0, 1, (4096, 1)).astype(np.float32)), name=None) self.add = P.Add() self.linear_2_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None) def construct(self, state): """construct function""" output = self.linear_1(state) output = self.bert_layer_norm(output) output = self.matmul(ops.Cast()(output, dst_type), ops.Cast()(self.linear_2_weight, dst_type)) output = self.add(ops.Cast()(output, dst_type2), self.linear_2_bias) return output class MaskInvalidPos(nn.Cell): """module of reader downstream""" def __init__(self): """init function""" super(MaskInvalidPos, self).__init__() self.squeeze = P.Squeeze(2) self.sub = P.Sub() self.mul = P.Mul() def construct(self, pos_pred, context_mask): """construct function""" output = self.squeeze(pos_pred) invalid_pos_mask = self.mul(self.sub(1.0, context_mask), 1e30) output = self.sub(output, invalid_pos_mask) return output class Reader_Downstream(nn.Cell): """Downstream model for reader""" def __init__(self): """init function""" super(Reader_Downstream, self).__init__() self.add = P.Add() self.para_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None) self.para_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192), linear_1_bias_shape=(8192,), bert_layer_norm_weight_shape=(8192,), bert_layer_norm_bias_shape=(8192,)) self.sent_bias = Parameter(Tensor(np.random.uniform(0, 1, (1,)).astype(np.float32)), name=None) self.sent_output_layer = SupportingOutputLayer(linear_1_weight_shape=(4096, 8192), linear_1_bias_shape=(8192,), bert_layer_norm_weight_shape=(8192,), bert_layer_norm_bias_shape=(8192,)) self.start_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096), linear_bias_shape=(4096,), bert_layer_norm_weight_shape=(4096,), bert_layer_norm_bias_shape=(4096,)) self.end_output_layer = PosOutputLayer(linear_weight_shape=(4096, 4096), linear_bias_shape=(4096,), bert_layer_norm_weight_shape=(4096,), bert_layer_norm_bias_shape=(4096,)) self.mask_invalid_pos = MaskInvalidPos() self.gather_input_weight = Tensor(np.array(0)) self.gather = P.Gather() self.type_linear_1 = nn.Dense(in_channels=4096, out_channels=4096, has_bias=True) self.relu = nn.ReLU() self.bert_layer_norm = BertLayerNorm(bert_layer_norm_weight_shape=(4096,), bert_layer_norm_bias_shape=(4096,)) self.type_linear_2 = nn.Dense(in_channels=4096, out_channels=3, has_bias=True) def construct(self, para_state, sent_state, state, context_mask): """construct function""" para_logit = self.para_output_layer(para_state) para_logit = self.add(para_logit, self.para_bias) sent_logit = self.sent_output_layer(sent_state) sent_logit = self.add(sent_logit, self.sent_bias) start = self.start_output_layer(state) start = self.mask_invalid_pos(start, context_mask) end = self.end_output_layer(state) end = self.mask_invalid_pos(end, context_mask) cls_emb = self.gather(state, self.gather_input_weight, 1) q_type = self.type_linear_1(cls_emb) q_type = self.relu(q_type) q_type = self.bert_layer_norm(q_type) q_type = self.type_linear_2(q_type) return q_type, start, end, para_logit, sent_logit
46.932292
118
0.619021
7c08672a1f4b3a7a17149a2b57e2b2a120ca8857
1,046
py
Python
src/onegov/wtfs/upgrade.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/wtfs/upgrade.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
src/onegov/wtfs/upgrade.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[ "MIT" ]
null
null
null
""" Contains upgrade tasks that are executed when the application is being upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`. """ from onegov.core.upgrade import upgrade_task @upgrade_task('Add payment types') def add_payment_types(context): session = context.session if context.has_table('wtfs_payment_type'): query = session.execute('SELECT count(*) FROM wtfs_payment_type') if not query.scalar(): session.execute(""" INSERT INTO wtfs_payment_type ("name", "price_per_quantity") VALUES ('normal', 700), ('spezial', 850); """) query = session.execute(""" UPDATE groups SET meta = CASE WHEN meta @> '{"_price_per_quantity"\\:850}'::jsonb THEN jsonb_set(meta, '{payment_type}', '"spezial"') ELSE jsonb_set(meta, '{payment_type}', '"normal"') END WHERE groups.meta ? '_price_per_quantity'; """)
36.068966
76
0.578394
b01c8f325df0368355928771fb0cdac6ea1b83fc
553
py
Python
exercises/es/test_01_04.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
2,085
2019-04-17T13:10:40.000Z
2022-03-30T21:51:46.000Z
exercises/es/test_01_04.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
79
2019-04-18T14:42:55.000Z
2022-03-07T08:15:43.000Z
exercises/es/test_01_04.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
361
2019-04-17T13:34:32.000Z
2022-03-28T04:42:45.000Z
def test(): assert ( "if token.like_num" in __solution__ ), "¿Estás revisando el atributo del token like_num?" assert ( 'next_token.text == "%"' in __solution__ ), "¿Estás revisando si el texto del siguiente token es un símbolo de porcentaje?" assert ( next_token.text == "%" ), "¿Estás revisando si el texto del siguiente token es un símbolo de porcentaje?" __msg__.good( "¡Bien hecho! Como puedes ver hay muchos análisis poderosos que puedes hacer usando los tokens y sus atributos." )
36.866667
120
0.65642
05a8c4fd95b6efca543f543621dd2cffe6ce31a4
1,092
py
Python
DataProcess/my_utils/myplot.py
ZhangQiHang-98/RFID_Scirpt
0f74087add4cf16e2d201ad4f31cc1abd287db7e
[ "MIT" ]
3
2021-12-24T04:52:03.000Z
2021-12-27T02:14:49.000Z
DataProcess/my_utils/myplot.py
ZhangQiHang-98/RFID_Scirpt
0f74087add4cf16e2d201ad4f31cc1abd287db7e
[ "MIT" ]
null
null
null
DataProcess/my_utils/myplot.py
ZhangQiHang-98/RFID_Scirpt
0f74087add4cf16e2d201ad4f31cc1abd287db7e
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ @Project :DataProcess @File :myplot.py @Author :Zhang Qihang @Date :2021/11/8 14:41 """ import seaborn as sns import matplotlib.pyplot as plt import os import pandas as pd import glob import config import myunwrap import numpy as np import scipy.constants as C from sklearn.preprocessing import scale def phase_heatmap(phase_mat): sns.set_context({"figure.figsize": (8, 8)}) sns.heatmap(phase_mat) plt.show() def normalization(data): _range = np.max(data) - np.min(data) return (data - np.min(data)) / _range def phase_scatter(df): phases = df["phase"].values times = df["time"].values plt.plot(times, phases) plt.show() if __name__ == '__main__': test_path = glob.glob(os.path.join(config.PEN_PATH, '*.csv')) file_path = "../20220105104342normal.csv" df = pd.read_csv(file_path, header=None) df.columns = config.COMMON_COLUMNS df["phase"] = 2 * C.pi - df["phase"] phase_scatter(df) # print(test_path) # for path in test_path: # phase_scatter(path)
22.285714
65
0.667582
bbd9406456d0020c9cdbba07ff1aed2650cbfe2a
1,631
py
Python
barcode.py
T9C5F/packstation-barcode
3d7e719d6931d2d9e4834e7aaef2a892bd564aa1
[ "MIT" ]
45
2018-11-22T10:18:11.000Z
2021-06-17T07:20:25.000Z
barcode.py
T9C5F/packstation-barcode
3d7e719d6931d2d9e4834e7aaef2a892bd564aa1
[ "MIT" ]
2
2018-11-18T14:55:43.000Z
2020-10-14T15:15:08.000Z
barcode.py
T9C5F/packstation-barcode
3d7e719d6931d2d9e4834e7aaef2a892bd564aa1
[ "MIT" ]
4
2018-12-27T22:02:13.000Z
2020-10-18T14:29:57.000Z
#!/usr/bin/python3 # coding: utf-8 import os import luhn # sudo apt install python3-pip && sudo pip3 install luhn # Deutsche Post DHL hat einfach an Packstationen den Kartenleser # ausgebaut und duch einen Barcodeleser ersetzt, ohne den # Besitzern einer Goldcard automatisch rechtzeitig eine neue # Karte zuzuschicken. Man kann aber weiterhin an solchen Stationen # Pakete abholen, nur muss man jetzt die PostNummer manuell eintippen. # Mit diesem Skript generieren wir uns den entsprechenden Barcode, da das # Zusenden einer neuen Karte bei DHL beauftragt werden muss, die alte Karte # in der Zwischenzeit sofort gesperrt wird und die neue bis zu 2 Wochen # dauern kann. Das ist maximal kundenunfreundlich. # Sicherheitstechnisch liegt kein Verstoß vor, da man durch Eintippen # der PostNummer sowieso Pakete abholen kann und die Umrechnung von # PostNummer zu Barcode im Netz bereits vielfach beschrieben ist. # Der 16-stellige ITF-Barcode ist relativ einfach aufgebaut: # "3”+”[so viele ‘0’, dass die Zahl insgesamt 16 Stellen hat]” # +”[Postnummer*631]”+”[Luhn-Prüfziffer über ‘Postnummer*631’]" # http://www.frei-tag.com/index.php?/archives/445-DHL-Packstation-ohne-Goldcard.html def generate(number): postnummer = int(number) number = postnummer*631 luhnnr = luhn.generate(str(number)) number = "3" + (str(number)+str(luhnnr)).zfill(15) return(str(number)) # Beispiel anhand einer zufallsgenerierten Zahl: # 20281557 ergibt 3000127976624677 assert generate("20281557") == "3000127976624677" # Interleaved 2 of 5 ITF barcode os.system("xdg-open https://barcode.tec-it.com/de/Code25IL?data=" + generate(20281557))
41.820513
87
0.774985
a548d4044babcf769d72c98f4c36e053030cd2ea
214
py
Python
src/bo4e/enum/landescode.py
bo4e/BO4E-python
28b12f853c8a496d14b133759b7aa2d6661f79a0
[ "MIT" ]
1
2022-03-02T12:49:44.000Z
2022-03-02T12:49:44.000Z
src/bo4e/enum/landescode.py
bo4e/BO4E-python
28b12f853c8a496d14b133759b7aa2d6661f79a0
[ "MIT" ]
21
2022-02-04T07:38:46.000Z
2022-03-28T14:01:53.000Z
src/bo4e/enum/landescode.py
bo4e/BO4E-python
28b12f853c8a496d14b133759b7aa2d6661f79a0
[ "MIT" ]
null
null
null
""" Der ISO-Landescode als Enumeration. """ from enum import Enum from iso3166 import countries alpha2codes = {c.alpha2: c.alpha2 for c in countries} Landescode = Enum("Landescode", alpha2codes) # type: ignore
19.454545
60
0.742991
a553923330965bb513b635f568c86575b16db188
1,410
py
Python
WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py
jonasrdt/Wirtschaftsinformatik2
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
[ "MIT" ]
1
2022-03-23T09:40:39.000Z
2022-03-23T09:40:39.000Z
WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py
jonasrdt/Wirtschaftsinformatik2
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
[ "MIT" ]
null
null
null
WiSe-2122/Wiederholung/Gruppe-C/Online-Banking.py
jonasrdt/Wirtschaftsinformatik2
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
[ "MIT" ]
null
null
null
ueberweisungslimit = 50_000 kontostand = 3_500 ungueltiger_betrag = True # Funktionsdefinition def trenner(anzahl): for zaehler in range(anzahl): print("-", end="") print() # Funktionsaufruf trenner(50) print("Willkommen beim Online-Banking der DKB") print("Ihr Überweisungslimit beträgt", ueberweisungslimit, "€") print("Ihr aktueller Kontostand beträgt", kontostand, "€") trenner(50) while ungueltiger_betrag: try: betrag = round(float(input("Bitte geben Sie einen Überweisungbetrag in € ein: ")),2) if betrag > ueberweisungslimit: print("Ihr Betrag liegt über den Überweisungslimit von", ueberweisungslimit, "€.") elif betrag < 0: print("Bitte geben Sie nur positive Zahle für eine Überweisung ein.") elif betrag > kontostand: print("Leider reicht Ihr Kontostand i.H.v.", kontostand,"€ nicht für die Überweisung i.H.v.", betrag,"€ aus.") entscheidung = input("Wollen Sie einen niedrigeren Betrag überweisen (ja/nein): ") if entscheidung.lower() == "nein": ungueltiger_betrag = False else: print("Ihre Überweisung i.H.v.", betrag,"€ wurde durchgeführt.") kontostand -= betrag print("Ihr neuer Kontostand beträgt:", kontostand, "€.") ungueltiger_betrag = False except: print("Bitte geben Sie nur Zahlen ein.")
38.108108
122
0.647518
a5916d10fd080c93d3ed806b1372e236b4a374e6
1,078
py
Python
Vergleich-Display/upy.py
aboehm/CLT2019
51b9b5674b5ed18297c5ee7e825888d632d96a0e
[ "BSD-2-Clause" ]
1
2019-07-01T11:59:06.000Z
2019-07-01T11:59:06.000Z
Vergleich-Display/upy.py
aboehm/CLT2019
51b9b5674b5ed18297c5ee7e825888d632d96a0e
[ "BSD-2-Clause" ]
null
null
null
Vergleich-Display/upy.py
aboehm/CLT2019
51b9b5674b5ed18297c5ee7e825888d632d96a0e
[ "BSD-2-Clause" ]
null
null
null
import machine from display import ssd1306 display = None run = 0 def setup(): global display from machine import I2C, Pin import uos _, nodename, _, _, _ = uos.uname() if nodename == 'esp32': i2c = I2C(freq=400000, scl=machine.Pin(22), sda=machine.Pin(21)) elif nodename == 'pyboard': i2c = I2C(freq=400000, scl=machine.Pin('X9'), sda=machine.Pin('X10')) else: raise Exception('No compatible board found') display = ssd1306.SSD1306_I2C(128, 64, i2c, addr=0x3c) def stress(show=True): global display, run display.fill(0) run += 1 display.text('CLT2019 %i uPython' % (run), 0, 0) i = 0 for y in range(8, 56, 8): for x in range(0, 119, 8): display.text('%c' % (ord('0') + (((run + i) * 17) % 36)), x, y) i += 1 if show: display.show() def loop(): loops = 100 print('Stressing library and io ...') for i in range(loops): stress(show=True) print('Done') try: setup() while True: loop() except: pass
19.25
77
0.558442
3c8ae15a51f7ed8aa4a4793f2527828936db7e90
294
py
Python
FUNDASTORE/APPS/PRODUCTOS/forms.py
GabrielB-07/FundaStore-cgb
b509a9743a651344b32dd7a40ab789f1db48e54b
[ "CC0-1.0" ]
null
null
null
FUNDASTORE/APPS/PRODUCTOS/forms.py
GabrielB-07/FundaStore-cgb
b509a9743a651344b32dd7a40ab789f1db48e54b
[ "CC0-1.0" ]
null
null
null
FUNDASTORE/APPS/PRODUCTOS/forms.py
GabrielB-07/FundaStore-cgb
b509a9743a651344b32dd7a40ab789f1db48e54b
[ "CC0-1.0" ]
null
null
null
from django import forms from .models import Producto class FormularioProducto(forms.ModelForm): class Meta: model = Producto fields = '__all__' labels = {'pro_nombre': 'NOMBRE','pro_precio': 'PRECIO','pro_stock': 'STOCK','pro_descripcion':'DESCRIPCIÖN'}
29.4
118
0.663265