{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); '%(img)\n f.write(template)\n # +++your code here+++\n\n\n\ndef main():\n args = sys.argv[1:]\n\n if not args:\n print('usage: [--todir dir] logfile ')\n sys.exit(1)\n\n todir = ''\n if args[0] == '--todir':\n todir = args[1]\n del args[0:2]\n\n img_urls = read_urls(args[0])\n\n if todir:\n download_images(img_urls, todir)\n else:\n print('\\n'.join(img_urls))\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":2594,"string":"2,594"}}},{"rowIdx":126402,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_config.py"},"max_stars_repo_name":{"kind":"string","value":"agateau/nanoci"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169075"},"content":{"kind":"string","value":"import os\n\nimport yaml\n\nfrom nanoci.config import Config\n\nfrom nanoci.fileutils import mkdir_p\n\n\ndef create_project(tmpdir, name, build=None, notify=None):\n project_path = os.path.join(tmpdir, 'projects', name + '.yaml')\n mkdir_p(os.path.dirname(project_path))\n dct = {}\n if build is not None:\n dct['build'] = build\n if notify is not None:\n dct['notify'] = notify\n with open(project_path, 'wt') as fp:\n yaml.dump(dct, fp)\n\n\ndef test_projects(tmpdir):\n tmpdir = str(tmpdir)\n config = Config(config_dir=tmpdir)\n create_project(tmpdir, 'foo', build=[{'script':'make'}])\n create_project(tmpdir, 'bar')\n\n assert config.has_project('foo')\n assert config.has_project('bar')\n assert not config.has_project('baz')\n\n assert config.get_project_path('foo') == os.path.join(tmpdir, 'projects/foo.yaml')\n"},"size":{"kind":"number","value":853,"string":"853"}}},{"rowIdx":126403,"cells":{"max_stars_repo_path":{"kind":"string","value":"backup_exporter/src/main.py"},"max_stars_repo_name":{"kind":"string","value":"kreako/soklaki"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170241"},"content":{"kind":"string","value":"import os\nimport time\nimport gzip\nfrom datetime import datetime\nfrom prometheus_client import start_http_server, Summary, Gauge, Counter\nfrom dotenv import load_dotenv\nimport requests\nfrom path import Path\n\n\nload_dotenv()\n\n\nEXPORTER_PORT = os.getenv(\"EXPORTER_PORT\")\nBACKUP_PATH = Path(os.getenv(\"BACKUP_PATH\"))\n\nprocess_time = Summary(\"processing_seconds\", \"Time spent processing backup\")\n\nbackup_pg_dump_count = Gauge(\n \"backup_pg_dump_count\", \"Number of pg dump in backup folder\"\n)\nbackup_pg_dump_size = Gauge(\n \"backup_pg_dump_size\", \"Size of pg_dump folder in backup folder\"\n)\nbackup_last_pg_dump_size = Gauge(\"backup_last_pg_dump_size\", \"Size of the last pg_dump\")\nbackup_last_pg_dump_lines = Gauge(\n \"backup_last_pg_dump_lines\", \"Number of line of the last pg_dump\"\n)\nbackup_last_pg_dump_age = Gauge(\n \"backup_last_pg_dump_age\", \"Age in days of the last pg_dump\"\n)\n\nbackup_reports_count = Gauge(\n \"backup_reports_count\", \"Number of reports in backup folder\"\n)\nbackup_reports_size = Gauge(\"backup_reports_size\", \"Size of reports in backup folder\")\n\nfailure = Counter(\"backup_failure\", \"number of failure of the backup processing\")\n\n\n@process_time.time()\ndef process_backup():\n pg_dump = BACKUP_PATH / \"pg_dump\"\n backup_pg_dump_count.set(len(pg_dump.glob(\"*.sql.gz\")))\n backup_pg_dump_size.set(sum([f.getsize() for f in pg_dump.walkfiles()]))\n\n # Search for last backup\n l = pg_dump.glob(\"*.sql.gz\")\n l.sort()\n last_backup = l[-1]\n\n backup_last_pg_dump_size.set(last_backup.getsize())\n\n # name is like dump_2021_05_29_14_03_20.sql.gz\n _, year, month, day, hour, minute, second = (\n last_backup.basename().split(\".\")[0].split(\"_\")\n )\n last_backup_datetime = datetime(\n int(year), int(month), int(day), int(hour), int(minute), int(second)\n )\n t = datetime.now() - last_backup_datetime\n backup_last_pg_dump_age.set(t.total_seconds() / (60 * 60 * 24))\n\n with gzip.open(last_backup, \"rb\") as f:\n backup_last_pg_dump_lines.set(len(f.readlines()))\n\n reports = BACKUP_PATH / \"reports\"\n f_reports = [f for f in reports.walkfiles() if f.endswith(\"pdf\")]\n backup_reports_count.set(len(f_reports))\n backup_reports_size.set(sum([f.getsize() for f in f_reports]))\n\n\nif __name__ == \"__main__\":\n # Start up the server to expose the metrics.\n start_http_server(int(EXPORTER_PORT))\n # Generate some ping requests.\n while True:\n try:\n process_backup()\n except Exception as e:\n print(e)\n failure.inc()\n # Every 60 seconds\n time.sleep(60)"},"size":{"kind":"number","value":2591,"string":"2,591"}}},{"rowIdx":126404,"cells":{"max_stars_repo_path":{"kind":"string","value":"pygame_geometry/dictmap.py"},"max_stars_repo_name":{"kind":"string","value":"MarcPartensky/Pygame-Geometry"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2170030"},"content":{"kind":"string","value":"from collections import OrderedDict\n\nclass Map(OrderedDict):\n\n get = OrderedDict.__getitem__\n set = OrderedDict.__setitem__\n\n def __str__(self):\n \"\"\"Return the string representation of a map.\"\"\"\n return type(self).__name__+\"{\"+\", \".join([f\"{k}:{v}\" for k,v in self.items()])+\"}\"\n\n def reset(self, m):\n \"\"\"Reset the map.\"\"\"\n self.clear()\n for [k, v] in m.items():\n self[k] = v\n\n def sort(self, f=lambda e: e):\n \"\"\"Sort items.\"\"\"\n self.reset(self.sorted(f))\n\n def sortKeys(self, f):\n \"\"\"Sort items by keys.\"\"\"\n self.reset(self.sortedKeys(f))\n\n def sortValues(self, f):\n \"\"\"Sort items by values.\"\"\"\n self.reset(self.sortedValues(f))\n\n def sorted(self, f=lambda e: e):\n \"\"\"Return a sorted map.\"\"\"\n return Map(sorted(self.items(), key=f))\n\n def sortedKeys(self, f=lambda e: e[0]):\n \"\"\"Return a map sorted by keys.\"\"\"\n return Map(sorted(self.items(), key=f))\n\n def sortedValues(self, f=lambda e: e[1]):\n \"\"\"Return a map sorted by values.\"\"\"\n return Map(sorted(self.items(), key=f))\n\n def forEach(self, f):\n \"\"\"Apply f then set each value.\"\"\"\n for (k,v) in self.items():\n self[k]=f(v)\n\n def map(self, f):\n \"\"\"Apply f then yield each value.\"\"\"\n for e in self.values():\n yield f(e)\n\n\nif __name__==\"__main__\":\n d = {0:2, 1:2, 3:3}\n m = Map(d)\n print(m)\n print(m.values())\n print(m.keys())\n print(len(m))\n print(m)\n m.sort()\n print(m.sortedValues())\n print(m.sortedKeys())\n print(m.get(0))\n"},"size":{"kind":"number","value":1628,"string":"1,628"}}},{"rowIdx":126405,"cells":{"max_stars_repo_path":{"kind":"string","value":"Assignments/1/Solution/script10.py"},"max_stars_repo_name":{"kind":"string","value":"Ak-Shaw/python-for-kids"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170075"},"content":{"kind":"string","value":"# Write a program to print the series : s=1+x^2/2!+x^3/3!+.........+x^n/n!\n\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n\nn = int(input(\"Enter the number of terms: \"))\nprint('\\n1')\nx = 2\ns = 1\nwhile x <= n:\n t = x**x/factorial(x)\n s = s + t\n print(t)\n x+=1\nprint('\\nsum =', s)"},"size":{"kind":"number","value":337,"string":"337"}}},{"rowIdx":126406,"cells":{"max_stars_repo_path":{"kind":"string","value":"Aggregation-Network/modules/losses/FocalBCEloss.py"},"max_stars_repo_name":{"kind":"string","value":"King-HAW/DC-MT"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2167388"},"content":{"kind":"string","value":"import torch\nimport torch.nn as nn\n\n# https://blog.csdn.net/Code_Mart/article/details/89736187\n\n\nclass FocalBCEloss(nn.Module):\n def __init__(self, gamma=2, alpha=0.5, size_average=True):\n super(FocalBCEloss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.size_average = size_average\n\n def forward(self, y_pred, y_true):\n epsilon = 1e-10\n y_pred = y_pred + epsilon\n pred = y_pred.view(-1, 1)\n target = y_true.view(-1, 1)\n\n pred = torch.cat((1 - pred, pred), dim=1)\n\n class_mask = torch.zeros(pred.shape[0], pred.shape[1]).cuda()\n class_mask.scatter_(1, target.view(-1, 1).long(), 1.)\n\n probs = (pred * class_mask).sum(dim=1).view(-1, 1)\n probs = probs.clamp(min=0.0001, max=1.0)\n\n log_p = probs.log()\n\n alpha = torch.ones(pred.shape[0], pred.shape[1]).cuda()\n alpha[:, 0] = alpha[:, 0] * (1 - self.alpha)\n alpha[:, 1] = alpha[:, 1] * self.alpha\n alpha = (alpha * class_mask).sum(dim=1).view(-1, 1)\n\n batch_loss = - alpha * (torch.pow((1 - probs), self.gamma)) * log_p\n\n if self.size_average:\n loss = batch_loss.mean()\n else:\n loss = batch_loss.sum()\n\n return loss\n\n\n"},"size":{"kind":"number","value":1264,"string":"1,264"}}},{"rowIdx":126407,"cells":{"max_stars_repo_path":{"kind":"string","value":"fin_model_course/gradetools/project_1/excel/main.py"},"max_stars_repo_name":{"kind":"string","value":"whoopnip/fin-model-course"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2169526"},"content":{"kind":"string","value":"import os\nfrom gradetools.project_1.cases import INPUT_CASE_CONFIGS, OUTPUT_CASES\nfrom gradetools.project_1.excel.config import INPUT_RANGE_DICT, OUTPUT_RANGE_DICT\nfrom gradetools.excel.main import open_all_workbooks_in_folder_check_sheet_create_df\nfrom gradetools.config import EXCEL_FOLDER\n\n\nif __name__ == '__main__':\n open_all_workbooks_in_folder_check_sheet_create_df(\n EXCEL_FOLDER,\n 'Inputs and Outputs',\n INPUT_CASE_CONFIGS,\n OUTPUT_CASES,\n INPUT_RANGE_DICT,\n OUTPUT_RANGE_DICT,\n report_path=os.path.join(EXCEL_FOLDER, 'accuracy report.csv'),\n full_error_path=os.path.join(EXCEL_FOLDER, 'full accuracy data.csv')\n )"},"size":{"kind":"number","value":685,"string":"685"}}},{"rowIdx":126408,"cells":{"max_stars_repo_path":{"kind":"string","value":"anomalib/models/patchcore/anomaly_map.py"},"max_stars_repo_name":{"kind":"string","value":"alexriedel1/anomalib"},"max_stars_count":{"kind":"number","value":689,"string":"689"},"id":{"kind":"string","value":"2169549"},"content":{"kind":"string","value":"\"\"\"Anomaly Map Generator for the PatchCore model implementation.\"\"\"\n\n# Copyright (C) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nfrom typing import Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom kornia.filters import gaussian_blur2d\nfrom omegaconf import ListConfig\n\n\nclass AnomalyMapGenerator:\n \"\"\"Generate Anomaly Heatmap.\"\"\"\n\n def __init__(\n self,\n input_size: Union[ListConfig, Tuple],\n sigma: int = 4,\n ) -> None:\n self.input_size = input_size\n self.sigma = sigma\n\n def compute_anomaly_map(self, patch_scores: torch.Tensor, feature_map_shape: torch.Size) -> torch.Tensor:\n \"\"\"Pixel Level Anomaly Heatmap.\n\n Args:\n patch_scores (torch.Tensor): Patch-level anomaly scores\n feature_map_shape (torch.Size): 2-D feature map shape (width, height)\n\n Returns:\n torch.Tensor: Map of the pixel-level anomaly scores\n \"\"\"\n width, height = feature_map_shape\n batch_size = len(patch_scores) // (width * height)\n\n anomaly_map = patch_scores[:, 0].reshape((batch_size, 1, width, height))\n anomaly_map = F.interpolate(anomaly_map, size=(self.input_size[0], self.input_size[1]))\n\n kernel_size = 2 * int(4.0 * self.sigma + 0.5) + 1\n anomaly_map = gaussian_blur2d(anomaly_map, (kernel_size, kernel_size), sigma=(self.sigma, self.sigma))\n\n return anomaly_map\n\n @staticmethod\n def compute_anomaly_score(patch_scores: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute Image-Level Anomaly Score.\n\n Args:\n patch_scores (torch.Tensor): Patch-level anomaly scores\n Returns:\n torch.Tensor: Image-level anomaly scores\n \"\"\"\n max_scores = torch.argmax(patch_scores[:, 0])\n confidence = torch.index_select(patch_scores, 0, max_scores)\n weights = 1 - (torch.max(torch.exp(confidence)) / torch.sum(torch.exp(confidence)))\n score = weights * torch.max(patch_scores[:, 0])\n return score\n\n def __call__(self, **kwargs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Returns anomaly_map and anomaly_score.\n\n Expects `patch_scores` keyword to be passed explicitly\n Expects `feature_map_shape` keyword to be passed explicitly\n\n Example\n >>> anomaly_map_generator = AnomalyMapGenerator(input_size=input_size)\n >>> map, score = anomaly_map_generator(patch_scores=numpy_array, feature_map_shape=feature_map_shape)\n\n Raises:\n ValueError: If `patch_scores` key is not found\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: anomaly_map, anomaly_score\n \"\"\"\n\n if \"patch_scores\" not in kwargs:\n raise ValueError(f\"Expected key `patch_scores`. Found {kwargs.keys()}\")\n\n if \"feature_map_shape\" not in kwargs:\n raise ValueError(f\"Expected key `feature_map_shape`. Found {kwargs.keys()}\")\n\n patch_scores = kwargs[\"patch_scores\"]\n feature_map_shape = kwargs[\"feature_map_shape\"]\n\n anomaly_map = self.compute_anomaly_map(patch_scores, feature_map_shape)\n anomaly_score = self.compute_anomaly_score(patch_scores)\n return anomaly_map, anomaly_score\n"},"size":{"kind":"number","value":3743,"string":"3,743"}}},{"rowIdx":126409,"cells":{"max_stars_repo_path":{"kind":"string","value":"data/pre-training_corpora/utlis/process_woz_dataset.py"},"max_stars_repo_name":{"kind":"string","value":"kingb12/pptod"},"max_stars_count":{"kind":"number","value":54,"string":"54"},"id":{"kind":"string","value":"2165706"},"content":{"kind":"string","value":"def update_belief_state(usr_dict, prev_bs_dict, prev_bs_name_list):\n res_bs_dict, res_bs_name_list = prev_bs_dict, prev_bs_name_list\n curr_bs_state = usr_dict['belief_state']\n for item in curr_bs_state:\n if item['act'] == 'inform': # only care about inform act\n for pair in item['slots']:\n slot_name, value = pair\n if slot_name not in res_bs_name_list:\n res_bs_name_list.append(slot_name)\n res_bs_dict[slot_name] = value\n if len(res_bs_name_list) == 0:\n res_text, res_dx_text = '', ''\n else:\n res_text = '[restaurant] '\n res_dx_text = '[restaurant] '\n for name in res_bs_name_list:\n value = res_bs_dict[name]\n res_text += name + ' ' + value + ' '\n res_dx_text += name + ' '\n res_text = res_text.strip().strip(' , ').strip()\n res_dx_text = res_dx_text.strip().strip(' , ').strip()\n return res_text, res_dx_text, res_bs_dict, res_bs_name_list\n\ndef zip_sess_list(sess_list):\n turn_num = len(sess_list)\n assert sess_list[0][\"system_transcript\"] == ''\n if turn_num == 1:\n raise Exception()\n turn_list = []\n for idx in range(turn_num - 1):\n curr_turn_dict = sess_list[idx]\n system_uttr = sess_list[idx+1]['system_transcript']\n turn_list.append((curr_turn_dict, system_uttr))\n return turn_list\n\ndef process_session(sess_list):\n turn_num = len(sess_list)\n res_dict = {'dataset':'WOZ',\n 'dialogue_session':[]}\n for idx in range(turn_num):\n if idx == 0:\n bs_dict, bs_name_list = {}, []\n one_usr_dict, one_system_uttr = sess_list[idx]\n one_usr_uttr = one_usr_dict['transcript']\n one_usr_bs, one_usr_bsdx, bs_dict, bs_name_list = \\\n update_belief_state(one_usr_dict, bs_dict, bs_name_list)\n \n one_turn_dict = {'turn_num':idx}\n one_turn_dict['user'] = one_usr_uttr\n one_turn_dict['resp'] = one_system_uttr\n one_turn_dict['turn_domain'] = ['[restaurant]']\n one_turn_dict['bspn'] = one_usr_bs\n one_turn_dict['bsdx'] = one_usr_bsdx\n one_turn_dict['aspn'] = ''\n res_dict['dialogue_session'].append(one_turn_dict)\n return res_dict\n\nimport json\ndef process_file(in_f):\n with open(in_f) as f:\n data = json.load(f) \n res_list = []\n for item in data:\n one_sess = zip_sess_list(item['dialogue'])\n if len(one_sess) == 0:\n continue\n one_res_dict = process_session(one_sess)\n res_list.append(one_res_dict)\n print (len(res_list), len(data))\n return res_list\n\nif __name__ == '__main__':\n print ('Processing WOZ Dataset...')\n in_f = r'../raw_data/neural-belief-tracker/data/woz/woz_train_en.json'\n train_list = process_file(in_f)\n\n in_f = r'../raw_data/neural-belief-tracker/data/woz/woz_validate_en.json'\n dev_list = process_file(in_f)\n\n in_f = r'../raw_data/neural-belief-tracker/data/woz/woz_test_en.json'\n test_list = process_file(in_f)\n\n all_data_list = train_list + test_list\n\n import os\n save_path = r'../separate_datasets/WOZ/'\n if os.path.exists(save_path):\n pass\n else: # recursively construct directory\n os.makedirs(save_path, exist_ok=True)\n\n import json\n out_f = save_path + r'/woz_train.json'\n with open(out_f, 'w') as outfile:\n json.dump(all_data_list, outfile, indent=4)\n\n out_f = save_path + r'/woz_test.json'\n with open(out_f, 'w') as outfile:\n json.dump(dev_list, outfile, indent=4)\n\n print ('Processing WOZ Dataset Finished!')\n"},"size":{"kind":"number","value":3617,"string":"3,617"}}},{"rowIdx":126410,"cells":{"max_stars_repo_path":{"kind":"string","value":"webapp/logging_configuration.py"},"max_stars_repo_name":{"kind":"string","value":"davidlmorton/webapp-skeleton"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169369"},"content":{"kind":"string","value":"from webapp import settings\nfrom pythonjsonlogger import jsonlogger\nimport logging\n\nlevels = settings.logging['levels']\noptions = settings.logging['options']\n\n\ndef configure_celery_logging():\n configure_logging()\n logging.getLogger('celery').setLevel(levels['celery'])\n logging.getLogger('webapp.implementation.celery.worker').setLevel(\n levels['worker'])\n logging.getLogger('requests').setLevel(levels['requests'])\n\n\ndef configure_web_logging():\n configure_logging()\n logging.getLogger('requests').setLevel(levels['requests'])\n logging.getLogger('urllib3').setLevel(levels['urllib3'])\n logging.getLogger('werkzeug').setLevel(levels['werkzeug'])\n\n\ndef configure_logging():\n if options['with_timestamps']:\n format_str = '%(asctime)s '\n else:\n format_str = ''\n format_str += '%(levelname)5s '\n format_str += '%(message)s'\n\n if options['format_json']:\n format_str += '%(name)s %s(process)d %s(created)s'\n formatter = jsonlogger.JsonFormatter(format_str)\n else:\n formatter = logging.Formatter(format_str)\n\n logHandler = logging.StreamHandler()\n logHandler.setFormatter(formatter)\n\n logger = logging.getLogger()\n logger.addHandler(logHandler)\n\n logger.setLevel(levels['webapp'])\n"},"size":{"kind":"number","value":1277,"string":"1,277"}}},{"rowIdx":126411,"cells":{"max_stars_repo_path":{"kind":"string","value":"chiaswap/pushtx.py"},"max_stars_repo_name":{"kind":"string","value":"geoffwalmsley/chiaswap"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169968"},"content":{"kind":"string","value":"import argparse\nimport asyncio\nimport os\nimport socket\nimport tempfile\n\nfrom pathlib import Path\n\nfrom aiohttp import (\n ClientSession,\n ClientTimeout,\n WSMessage,\n)\nfrom chia.cmds.init_funcs import create_all_ssl\nfrom chia.protocols.protocol_message_types import ProtocolMessageTypes\nfrom chia.protocols.shared_protocol import Handshake\nfrom chia.protocols import wallet_protocol\nfrom chia.server.outbound_message import Message, make_msg\nfrom chia.server.server import ssl_context_for_client, NodeType\nfrom chia.types.spend_bundle import SpendBundle\nfrom chia.util.ints import uint16, uint8\n\n\nDNS_INTRODUCER_HOSTNAME = \"dns-introducer.chia.net\"\n\n\ndef remote_host_ipv4():\n r = socket.getaddrinfo(DNS_INTRODUCER_HOSTNAME, 8444)\n for _ in set(r):\n t = _[4][0]\n if _[0] == socket.AddressFamily.AF_INET6:\n t = f\"[{t}]\"\n yield t\n\n\ndef make_ssl_path():\n # wow, this code sucks, but it's mostly due to the code in the chia module\n # not being very flexible\n temp_dir = tempfile.TemporaryDirectory()\n root_path = Path(temp_dir.name)\n ssl_dir = root_path / \"config\" / \"ssl\"\n os.makedirs(ssl_dir)\n create_all_ssl(root_path)\n # we have to keep `temp_dir` around because the contents\n # are deleted when it's garbage-collected\n return temp_dir, root_path\n\n\ndef get_ssl_context():\n _temp_dir, root_path = make_ssl_path()\n\n ssl_path = root_path / \"config\" / \"ssl\"\n ca_path = ssl_path / \"ca\"\n wallet_path = ssl_path / \"wallet\"\n chia_ca_crt_path = ca_path / \"chia_ca.crt\"\n chia_ca_key_path = ca_path / \"chia_ca.key\"\n\n crt_path = wallet_path / \"public_wallet.crt\"\n key_path = wallet_path / \"public_wallet.key\"\n\n ssl_context = ssl_context_for_client(\n chia_ca_crt_path, chia_ca_key_path, crt_path, key_path\n )\n # we have to keep `temp_dir` around because the contents\n # are deleted when it's garbage-collected\n ssl_context.temp_dir = _temp_dir\n return ssl_context\n\n\nasync def push_tx(spend_bundle: SpendBundle):\n ssl_context = get_ssl_context()\n jobs = []\n for remote_host in remote_host_ipv4():\n job = asyncio.create_task(\n push_tx_to_host(ssl_context, spend_bundle, remote_host, 8444)\n )\n jobs.append(job)\n d = {}\n while 1:\n done, pending = await asyncio.wait(jobs, return_when=asyncio.FIRST_COMPLETED)\n for t in done:\n try:\n rv = t.result()\n except Exception as ex:\n rv = str(ex)\n d[rv] = d.setdefault(rv, 0) + 1\n lp = len(pending)\n d[\"pending\"] = lp\n if lp == 0:\n del d[\"pending\"]\n s = \", \".join(\"%s: %d\" % (k, v) for k, v in sorted(d.items()))\n print(s)\n if len(pending) == 0:\n break\n jobs = list(pending)\n\n\nasync def push_tx_to_host(\n ssl_context, spend_bundle: SpendBundle, remote_host, remote_port\n):\n\n ws = None\n session = None\n try:\n timeout = ClientTimeout(total=10)\n session = ClientSession(timeout=timeout)\n\n url = f\"wss://{remote_host}:{remote_port}/ws\"\n # print(f\"trying {url}\")\n\n ws = await session.ws_connect(\n url,\n autoclose=True,\n autoping=True,\n heartbeat=60,\n ssl=ssl_context,\n max_msg_size=100 * 1024 * 1024,\n )\n\n network_id = \"mainnet\"\n protocol_version = \"0.0.33\"\n chia_full_version_str = \"1.0.0.0\"\n server_port = 1023\n node_type = NodeType.WALLET\n capabilities = [(1, \"1\")]\n handshake = Handshake(\n network_id,\n protocol_version,\n chia_full_version_str,\n uint16(server_port),\n uint8(node_type),\n capabilities,\n )\n\n outbound_handshake = make_msg(ProtocolMessageTypes.handshake, handshake)\n await ws.send_bytes(bytes(outbound_handshake))\n\n response: WSMessage = await ws.receive()\n # print(response)\n data = response.data\n full_message_loaded: Message = Message.from_bytes(data)\n message_type = ProtocolMessageTypes(full_message_loaded.type).name\n # print(message_type)\n # print(full_message_loaded)\n\n # breakpoint()\n msg = make_msg(\n ProtocolMessageTypes.send_transaction,\n wallet_protocol.SendTransaction(spend_bundle),\n )\n await ws.send_bytes(bytes(msg))\n rv = \"failed\"\n while 1:\n response: WSMessage = await ws.receive()\n if response.type == 8: # WSMsgType.CLOSE\n v = None\n break\n if response.type != 2: # WSMsgType.BINARY\n v = None\n break\n # print(response)\n data = response.data\n full_message_loaded: Message = Message.from_bytes(data)\n message_type = ProtocolMessageTypes(full_message_loaded.type).name\n # print(message_type)\n if str(message_type) == \"transaction_ack\":\n v = wallet_protocol.TransactionAck.from_bytes(full_message_loaded.data)\n # breakpoint()\n ack_map = {\n \"ALREADY_INCLUDING_TRANSACTION\": \"included\",\n \"DOUBLE_SPEND\": \"double-spend\",\n \"NO_TRANSACTIONS_WHILE_SYNCING\": \"catching-up\",\n \"ASSERT_SECONDS_RELATIVE_FAILED\": \"not-valid-yet\",\n }\n msg = ack_map.get(v.error, v.error)\n rv = f\"ack.{msg}\"\n break\n # print(full_message_loaded)\n # print(v)\n # breakpoint()\n # print(v)\n if rv == \"ack.3\":\n print(v)\n # breakpoint()\n pass\n await ws.close()\n await session.close()\n return rv\n except Exception as ex:\n if ws is not None:\n await ws.close()\n # breakpoint()\n if session is not None:\n await session.close()\n exception_map = [\n (\"Cannot connect to host\", \"no-connection\"),\n (\"ConnectionResetError\", \"reset\"),\n (\"TimeoutError\", \"timeout\"),\n (\"ClientConnectorError\", \"client-error\"),\n ]\n msg = repr(ex)\n for s, r in exception_map:\n if msg.startswith(s):\n return r\n print(f\"unknown `msg`, consider diagnosing and adding code for this case\")\n print(\"Dropping into debugger; enter `c` to continue `pushtx`\")\n breakpoint()\n return msg\n\n\ndef show_coins_spent(spend_bundle):\n for coin_spend in spend_bundle.coin_spends:\n coin = coin_spend.coin\n print(f\"spending coin id 0x{coin.name().hex()}\")\n\n\nasync def async_main(args, parser):\n spend_bundle = args.spend_bundle[0]\n if args.debug:\n spend_bundle.debug()\n show_coins_spent(spend_bundle)\n if not args.dry_run:\n await push_tx(spend_bundle)\n\n\ndef spend_bundle_from_hex(h):\n return SpendBundle.from_bytes(bytes.fromhex(h))\n\n\ndef create_parser():\n parser = argparse.ArgumentParser(description=\"Process some integers.\")\n parser.add_argument(\n \"spend_bundle\",\n metavar=\"SPENDBUNDLE_HEX\",\n type=spend_bundle_from_hex,\n nargs=1,\n help=\"the `SpendBundle` as hex\",\n )\n parser.add_argument(\n \"-d\",\n \"--debug\",\n action=\"store_true\",\n help=\"show debug information for spendbundle\",\n )\n parser.add_argument(\n \"-n\",\n \"--dry-run\",\n action=\"store_true\",\n help=\"don't actually send `SpendBundle` to the network\",\n )\n return parser\n\n\ndef main():\n parser = create_parser()\n args = parser.parse_args()\n return asyncio.run(async_main(args, parser))\n\n\nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":7806,"string":"7,806"}}},{"rowIdx":126412,"cells":{"max_stars_repo_path":{"kind":"string","value":"spikeforest/spikeforestwidgets/unitwaveformswidget.py"},"max_stars_repo_name":{"kind":"string","value":"mhhennig/spikeforest"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2168850"},"content":{"kind":"string","value":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport vdomr as vd\nimport time\nfrom spikeforest import spikewidgets as sw\n\n\nclass UnitWaveformsWidget(vd.Component):\n def __init__(self, *, recording, sorting, max_num_spikes_per_unit=20, snippet_len=100):\n vd.Component.__init__(self)\n self._widgets = [\n UnitWaveformWidget(\n recording=recording,\n sorting=sorting,\n unit_id=id,\n average_waveform=None,\n max_num_spikes_per_unit=max_num_spikes_per_unit,\n snippet_len=snippet_len\n )\n for id in sorting.get_unit_ids()\n ]\n vd.devel.loadBootstrap()\n\n def setSelectedUnitIds(self, ids):\n ids = set(ids)\n for W in self._widgets:\n W.setSelected(W.unitId() in ids)\n\n def render(self):\n box_style = dict(float='left')\n boxes = [\n vd.div(W, style=box_style)\n for W in self._widgets\n ]\n div = vd.div(boxes)\n return div\n\n\nclass UnitWaveformWidget(vd.Component):\n def __init__(self, *, recording, sorting, unit_id, average_waveform=None, show_average=True, max_num_spikes_per_unit=20, snippet_len=100):\n vd.Component.__init__(self)\n self._plot = _UnitWaveformPlot(\n recording=recording,\n sorting=sorting,\n unit_id=unit_id,\n average_waveform=average_waveform,\n show_average=show_average,\n max_num_spikes_per_unit=max_num_spikes_per_unit,\n snippet_len=snippet_len\n )\n self._plot_div = vd.components.LazyDiv(self._plot)\n self._unit_id = unit_id\n self._selected = False\n\n def setSelected(self, val):\n if self._selected == val:\n return\n self._selected = val\n self.refresh()\n\n def unitId(self):\n return self._unit_id\n\n def render(self):\n style0 = {'border': 'solid 1px black', 'margin': '5px'}\n style1 = {}\n if self._selected:\n style1['background-color'] = 'yellow'\n return vd.div(\n vd.p('Unit {}'.format(self._unit_id), style={'text-align': 'center'}),\n vd.div(self._plot_div, style=style0),\n style=style1\n )\n\n\nclass _UnitWaveformPlot(vd.components.Pyplot):\n def __init__(self, *, recording, sorting, unit_id, average_waveform, show_average, max_num_spikes_per_unit, snippet_len):\n vd.components.Pyplot.__init__(self)\n self._recording = recording\n self._sorting = sorting\n self._unit_id = unit_id\n self._max_num_spikes_per_unit = max_num_spikes_per_unit\n self._average_waveform = average_waveform\n self._show_average = show_average\n self._snippet_len = snippet_len\n\n def plot(self):\n # W=sw.UnitWaveformsWidget(recording=self._recording,sorting=self._sorting,unit_ids=[self._unit_id],width=5,height=5)\n # W.plot()\n plot_unit_waveform(\n recording=self._recording,\n sorting=self._sorting,\n unit_id=self._unit_id,\n average_waveform=self._average_waveform,\n show_average=self._show_average,\n max_num_spikes_per_unit=self._max_num_spikes_per_unit,\n snippet_len=self._snippet_len\n )\n\n\ndef _compute_minimum_gap(x):\n a = np.sort(np.unique(x))\n if len(a) <= 1:\n return 1\n return np.min(np.diff(a))\n\n\ndef _plot_spike_shapes(*, representative_waveforms=None, average_waveform=None, show_average, channel_locations=None, ylim=None, max_representatives=None, color='blue', title=''):\n if average_waveform is None:\n if representative_waveforms is None:\n raise Exception('You must provide either average_waveform, representative waveforms, or both')\n average_waveform = np.mean(representative_waveforms, axis=2)\n M = average_waveform.shape[0] # number of channels\n T = average_waveform.shape[1] # number of timepoints\n\n if ylim is None:\n ylim = [average_waveform.min(), average_waveform.max()]\n yrange = ylim[1] - ylim[0]\n\n if channel_locations is None:\n channel_locations = np.zeros((M, 2))\n for m in range(M):\n channel_locations[m, :] = [0, -m]\n\n if channel_locations.shape[1] > 2:\n channel_locations = channel_locations[:, -2:]\n\n xmin = np.min(channel_locations[:, 0])\n xmax = np.max(channel_locations[:, 0])\n ymin = np.min(channel_locations[:, 1])\n ymax = np.max(channel_locations[:, 1])\n xgap = _compute_minimum_gap(channel_locations[:, 0])\n ygap = _compute_minimum_gap(channel_locations[:, 1])\n\n xvals = np.linspace(-xgap * 0.8 / 2, xgap * 0.8 / 2, T)\n yscale = 1 / (yrange / 2) * ygap / 2 * 0.4\n\n ax = plt.axes([0, 0, 1, 1], frameon=False)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n\n if representative_waveforms is not None:\n if max_representatives is not None:\n W0 = representative_waveforms\n if W0.shape[2] > max_representatives:\n indices = np.random.choice(range(W0.shape[2]), size=max_representatives, replace=False)\n representative_waveforms = W0[:, :, indices]\n L = representative_waveforms.shape[2]\n # for j in range(L):\n # XX = np.zeros((T, M))\n # YY = np.zeros((T, M))\n # for m in range(M):\n # loc = channel_locations[m, -2:]\n # XX[:, m] = loc[0] + xvals\n # YY[:, m] = loc[1] + (representative_waveforms[m, :, j] - representative_waveforms[m, 0, j])*yscale\n # color=(np.random.uniform(0,1), np.random.uniform(0,1), np.random.uniform(0,1))\n # plt.plot(XX, YY, color=color, alpha=0.3)\n XX = np.zeros((T, M, L))\n YY = np.zeros((T, M, L))\n for m in range(M):\n loc = channel_locations[m, -2:]\n for j in range(L):\n XX[:, m, j] = loc[0] + xvals\n YY[:, m, j] = loc[1] + (representative_waveforms[m, :, j] - representative_waveforms[m, 0, j]) * yscale\n XX = XX.reshape(T, M * L)\n YY = YY.reshape(T, M * L)\n plt.plot(XX, YY, color=(0.5, 0.5, 0.5), alpha=0.5)\n\n if show_average:\n XX = np.zeros((T, M))\n YY = np.zeros((T, M))\n for m in range(M):\n loc = channel_locations[m, -2:]\n XX[:, m] = loc[0] + xvals\n YY[:, m] = loc[1] + (average_waveform[m, :] - average_waveform[m, 0]) * yscale\n plt.plot(XX, YY, color)\n\n plt.xlim(xmin - xgap / 2, xmax + xgap / 2)\n plt.ylim(ymin - ygap / 2, ymax + ygap / 2)\n\n # plt.gca().set_axis_off()\n if title:\n plt.title(title, color='gray')\n\n\ndef _get_random_spike_waveforms(*, recording, sorting, unit, max_num, channels, snippet_len):\n st = sorting.get_unit_spike_train(unit_id=unit)\n num_events = len(st)\n if num_events > max_num:\n event_indices = np.random.choice(range(num_events), size=max_num, replace=False)\n else:\n event_indices = range(num_events)\n\n spikes = recording.get_snippets(reference_frames=st[event_indices].astype(int), snippet_len=snippet_len, channel_ids=channels)\n if len(spikes) > 0:\n spikes = np.dstack(tuple(spikes))\n else:\n spikes = np.zeros((recording.get_num_channels(), snippet_len, 0))\n return spikes\n\n\ndef plot_unit_waveform(*, recording, sorting, unit_id, max_num_spikes_per_unit, average_waveform, show_average, channel_ids=None, snippet_len=100, title=''):\n if not channel_ids:\n channel_ids = recording.get_channel_ids()\n M = len(channel_ids)\n channel_locations = np.zeros((M, 2))\n for ii, ch in enumerate(channel_ids):\n loc = recording.get_channel_property(ch, 'location')\n channel_locations[ii, :] = loc[-2:]\n\n spikes = _get_random_spike_waveforms(recording=recording, sorting=sorting, unit=unit_id, max_num=max_num_spikes_per_unit, channels=channel_ids, snippet_len=snippet_len)\n # if not title:\n # title='Unit {}'.format(int(unit_id))\n\n _plot_spike_shapes(representative_waveforms=spikes, channel_locations=channel_locations, average_waveform=average_waveform, show_average=show_average, title=title)\n"},"size":{"kind":"number","value":8317,"string":"8,317"}}},{"rowIdx":126413,"cells":{"max_stars_repo_path":{"kind":"string","value":"rssant/middleware/debug_toolbar.py"},"max_stars_repo_name":{"kind":"string","value":"zuzhi/rssant"},"max_stars_count":{"kind":"number","value":1176,"string":"1,176"},"id":{"kind":"string","value":"2169562"},"content":{"kind":"string","value":"import logging\nfrom debug_toolbar.toolbar import DebugToolbar\nfrom debug_toolbar.middleware import DebugToolbarMiddleware\n\n\nLOG = logging.getLogger(__name__)\n\n\ndef ms(t):\n return '%dms' % int(t) if t is not None else '#ms'\n\n\ndef s_ms(t):\n return ms(t * 1000) if t is not None else '#ms'\n\n\nclass RssantDebugToolbarMiddleware(DebugToolbarMiddleware):\n \"\"\"\n Middleware to set up Debug Toolbar on incoming request and render toolbar\n on outgoing response.\n\n See also:\n https://github.com/jazzband/django-debug-toolbar/blob/master/debug_toolbar/middleware.py\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n toolbar = DebugToolbar(request, self.get_response)\n\n # Activate instrumentation ie. monkey-patch.\n for panel in toolbar.enabled_panels:\n panel.enable_instrumentation()\n try:\n # Run panels like Django middleware.\n response = toolbar.process_request(request)\n finally:\n # Deactivate instrumentation ie. monkey-unpatch. This must run\n # regardless of the response. Keep 'return' clauses below.\n for panel in reversed(toolbar.enabled_panels):\n panel.disable_instrumentation()\n\n # generate stats and timing\n for panel in reversed(toolbar.enabled_panels):\n panel.generate_stats(request, response)\n panel.generate_server_timing(request, response)\n stats = self._extract_panel_stats(toolbar.enabled_panels)\n message = self._stats_message(stats)\n LOG.info(f'X-Time: {message}')\n response['X-Time'] = message\n return response\n\n def _stats_message(self, stats):\n timer_msg = '0ms'\n total_time = int(stats['timer'].get('total_time') or 0)\n if total_time > 0:\n timer_msg = '{},utime={},stime={}'.format(\n ms(total_time),\n ms(stats['timer']['utime']),\n ms(stats['timer']['stime']),\n )\n\n sql_msg = 'sql=0'\n if stats['sql']:\n sql_msg = 'sql={},{}'.format(\n stats['sql']['num_queries'] or 0,\n ms(stats['sql']['time_spent']),\n )\n similar_count = stats['sql']['similar_count']\n if similar_count and similar_count > 0:\n sql_msg += f',similar={similar_count}'\n duplicate_count = stats['sql']['duplicate_count']\n if duplicate_count and duplicate_count > 0:\n sql_msg += f',duplicate={duplicate_count}'\n\n return ';'.join([timer_msg, sql_msg])\n\n def _extract_panel_stats(self, panels):\n stats_map = {}\n for panel in panels:\n stats = panel.get_stats()\n if not stats:\n continue\n stats_map[panel.__class__.__name__] = stats\n result = {'sql': {}, 'timer': {}}\n sql_panel_stats = stats_map.get('SQLPanel')\n if sql_panel_stats and sql_panel_stats['databases']:\n _, sql_stats = sql_panel_stats['databases'][0]\n keys = ['time_spent', 'num_queries', 'similar_count', 'duplicate_count']\n for key in keys:\n result['sql'][key] = sql_stats.get(key)\n timer_stats = stats_map.get('TimerPanel')\n if timer_stats:\n keys = ['total_time', 'utime', 'stime', 'total']\n for key in keys:\n result['timer'][key] = timer_stats.get(key)\n return result\n"},"size":{"kind":"number","value":3514,"string":"3,514"}}},{"rowIdx":126414,"cells":{"max_stars_repo_path":{"kind":"string","value":"expand.py"},"max_stars_repo_name":{"kind":"string","value":"helloShen/programming-paradigms"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169653"},"content":{"kind":"string","value":"import sys\nimport random\n\ngrammar = {'':[['This ', '', ' is here']],\n '':[['computer'], ['car'], ['assignment']]}\n\ndef expand(symbol):\n if symbol.startswith('<'):\n definitions = grammar[symbol]\n expansion = random.choice(definitions)\n map(expand, expansion)\n else:\n sys.stdout.write(symbol)\n\nrandom.seed()\nexpand('')"},"size":{"kind":"number","value":391,"string":"391"}}},{"rowIdx":126415,"cells":{"max_stars_repo_path":{"kind":"string","value":"code/modern_zh/utils/helper.py"},"max_stars_repo_name":{"kind":"string","value":"Pzoom522/HistSumm"},"max_stars_count":{"kind":"number","value":51,"string":"51"},"id":{"kind":"string","value":"2169295"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport re\nimport linecache\nimport shutil\nimport json\nimport jieba\nimport xmltodict\nimport sys\nimport opencc\nfrom tqdm import tqdm\nfrom utils.regu import filter_sentences\n\nCHINESE_REGEX = re.compile('[\\u4e00-\\u9fa5]|[\\u3400-\\u4db5]')\nNUM_ENG_REGEX = re.compile('[a-zA-Z0-9??]')\n\ndef make_dir(path):\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n \n\ndef get_all_files(root):\n file_names = os.listdir(root)\n file_ob_list = []\n \n # macos folder files\n for file_name in file_names:\n if file_name != \".DS_Store\":\n file_ob_list.append(file_name)\n \n return file_ob_list\n\n\ndef cut_sentences(para):\n para = re.sub(r\"([。!;?\\!\\?\\..…;])\\1+\", r\"\\1\", para)\n para = re.sub('([。!;?\\!\\?\\..…;])([^”’」』])', r\"\\1\\n\\2\", para) # 单字符断句符\n para = re.sub('(\\.{6})([^”’」』])', r\"\\1\\n\\2\", para) # 英文省略号\n para = re.sub('(\\…{2})([^”’」』])', r\"\\1\\n\\2\", para) # 中文省略号\n para = re.sub('([。!;?\\!\\?\\..…;][”’」』])([^,。!;?\\!\\?\\..…;])', r'\\1\\n\\2', para)\n para = para.rstrip() # 段尾如果有多余的\\n就去掉它\n sentences = para.split(\"\\n\")\n sentences = [sent for sent in sentences if len(sent.strip()) > 0]\n\n return sentences\n\n\ndef judge_chinese(line: str):\n c_count = 0\n e_count = 0 \n for i in line:\n if CHINESE_REGEX.match(i) != None:\n c_count += 1\n elif NUM_ENG_REGEX.match(i) != None:\n e_count += 1\n \n if c_count < e_count * 10:\n return False\n else:\n return True\n \n \ndef process_all_zh_files(root, target_root, target_file): \n train_f = open(root, \"r\", encoding='utf-8-sig',errors='ignore')\n output = open(target_root + target_file, \"w+\", encoding='utf-8-sig', errors='ignore')\n \n for item in tqdm(train_f.readlines()):\n text = json.loads(item[:-1])['content'].strip()\n if not judge_chinese(text):\n continue\n text_list = cut_sentences(text)\n for sentence in text_list:\n res = filter_sentences(sentence).strip()\n if len(res) > 0:\n for i in jieba.lcut(res):\n output.write(i)\n output.write(' ')\n output.write('\\n')\n output.close()\n train_f.close()\n \n\ndef process_all_news_files(root, target_root, target_file): \n train_f = open(root, \"r\", encoding='utf-8',errors='ignore')\n output = open(target_root + target_file, \"w+\", encoding='utf-8', errors='ignore')\n \n for text in tqdm(train_f.readlines()):\n if not judge_chinese(text):\n continue\n text_list = cut_sentences(text)\n for sentence in text_list:\n res = filter_sentences(sentence).strip()\n if len(res) > 0:\n for i in jieba.lcut(res):\n output.write(i)\n output.write(' ')\n output.write('\\n')\n output.close()\n train_f.close()\n \n \ndef process_all_nlpcc_files(root, target_root, target_file):\n # make_dir(target_root) \n train_f = open(root, \"r\", encoding='utf-8',errors='ignore')\n output = open(target_root + target_file, \"w+\", encoding='utf-8', errors='ignore')\n \n for t in tqdm(train_f.readlines()):\n text = json.loads(t[:-1])\n text = text['summarization'] + \"。\" + text['article']\n \n if not judge_chinese(text):\n continue\n text_list = cut_sentences(text)\n for sentence in text_list:\n res = filter_sentences(sentence).strip()\n if len(res) > 0:\n for i in jieba.lcut(res):\n output.write(i)\n output.write(' ')\n output.write('\\n')\n output.close()\n train_f.close() \n\n \ndef merge_files(root, target, convert):\n \n converter = opencc.OpenCC('s2t.json')\n file_names = get_all_files(root)\n output = open(target, \"w+\", encoding='utf-8', errors='ignore')\n for file_name in file_names:\n f = open(root + file_name, \"r\", encoding='utf-8',errors='ignore')\n for line in tqdm(f.readlines(), desc=\"merge lines in a file\"):\n if convert:\n output.write(converter.convert(line))\n else:\n output.write(line)\n f.close()\n output.close()\n\n"},"size":{"kind":"number","value":4298,"string":"4,298"}}},{"rowIdx":126416,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"rsbondi/lnet"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2164451"},"content":{"kind":"string","value":"from setuptools import find_packages, setup\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"lnet\",\n version=\"0.0.4\",\n author=\"\",\n author_email=\"\",\n description=\"Utilities to define and bootstrap lightning networks for testing\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/cdecker/lnet\",\n packages=['lnet'],\n scripts=['bin/lnet-cli', 'bin/lnet-daemon'],\n install_requires=[\n \"click==7.0\",\n \"pylightning==0.0.4\",\n \"pydot==1.2.4\",\n \"python-bitcoinlib==0.7.0\",\n \"ephemeral-port-reserve==1.1.0\",\n \"python-daemon==2.2.0\",\n \"filelock==3.0.9\",\n \"flask==1.0.2\",\n \"flask-jsonrpc==0.3.1\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n ],\n)\n\n"},"size":{"kind":"number","value":995,"string":"995"}}},{"rowIdx":126417,"cells":{"max_stars_repo_path":{"kind":"string","value":"neurolang/tests/test_typing.py"},"max_stars_repo_name":{"kind":"string","value":"hndgzkn/NeuroLang"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2167347"},"content":{"kind":"string","value":"import pytest\n\nimport typing\n\nfrom .. import expressions\n\nC_ = expressions.Constant\nS_ = expressions.Symbol\n\n\ndef test_get_type_args():\n args = expressions.get_type_args(typing.Set)\n assert args == tuple()\n\n args = expressions.get_type_args(typing.Set[int])\n assert args == (int, )\n\n\ndef test_subclass():\n assert not issubclass(\n expressions.Constant[int], expressions.Constant[typing.AbstractSet]\n )\n\n\ndef test_type_validation_value():\n def f(a: int) -> int:\n return 0\n\n values = (\n 3, {3, 8}, 'try', f, (3, 'a'),\n C_[typing.Tuple[str, float]](('a', 3.)), {'a': 3}\n )\n\n types_ = (\n int, typing.AbstractSet[int], typing.Text, typing.Callable[[int], int],\n typing.Tuple[int, str], typing.Tuple[str, float],\n typing.Mapping[str, int]\n )\n\n for i, v in enumerate(values):\n assert expressions.type_validation_value(\n v, typing.Any\n )\n\n for j, t in enumerate(types_):\n if i is j:\n assert expressions.type_validation_value(\n v, t\n )\n assert expressions.type_validation_value(\n v,\n typing.Union[t, types_[(i + 1) % len(types_)]],\n )\n else:\n assert not expressions.type_validation_value(\n v, t\n )\n assert not expressions.type_validation_value(\n v,\n typing.Union[t, types_[(i + 1) % len(types_)]]\n )\n\n with pytest.raises(ValueError, match=\"typing Generic not supported\"):\n assert expressions.type_validation_value(\n None, typing.Generic[typing.T]\n )\n\n\ndef test_TypedSymbol():\n v = 3\n t = int\n s = C_[t](v)\n assert s.value == v\n assert s.type is t\n\n with pytest.raises(expressions.NeuroLangTypeException):\n s = C_[t]('a')\n\n\ndef test_TypedSymbolTable():\n st = expressions.TypedSymbolTable()\n s1 = C_[int](3)\n s2 = C_[int](4)\n s3 = C_[float](5.)\n s4 = C_[int](5)\n s6 = C_[str]('a')\n\n assert len(st) == 0\n\n st[S_('s1')] = s1\n assert len(st) == 1\n assert 's1' in st\n assert st['s1'] == s1\n assert st.symbols_by_type(s1.type) == {'s1': s1}\n\n st[S_('s2')] = s2\n assert len(st) == 2\n assert 's2' in st\n assert st['s2'] == s2\n assert st.symbols_by_type(s1.type) == {'s1': s1, 's2': s2}\n\n st[S_('s3')] = s3\n assert len(st) == 3\n assert 's3' in st\n assert st['s3'] == s3\n assert st.symbols_by_type(s1.type) == {'s1': s1, 's2': s2}\n assert st.symbols_by_type(s3.type, False) == {'s3': s3}\n assert st.symbols_by_type(s3.type, True) == {'s1': s1, 's2': s2, 's3': s3}\n\n del st['s1']\n assert len(st) == 2\n assert 's1' not in st\n assert 's1' not in st.symbols_by_type(s1.type)\n\n assert {int, float} == st.types()\n\n stb = st.create_scope()\n assert 's2' in stb\n assert 's3' in stb\n stb[S_('s4')] = s4\n assert 's4' in stb\n assert 's4' not in st\n\n stb[S_('s5')] = None\n assert 's5' in stb\n assert stb[S_('s5')] is None\n\n stc = stb.create_scope()\n stc[S_('s6')] = s6\n assert {int, float, str} == stc.types()\n assert stc.symbols_by_type(int) == {'s2': s2, 's4': s4}\n\n assert set(iter(stc)) == {'s2', 's3', 's4', 's5', 's6'}\n\n with pytest.raises(ValueError):\n stb[S_('s6')] = 5\n\n\ndef test_free_variable_wrapping():\n def f(a: int) -> float:\n return 2. * int(a)\n\n fva = C_(f)\n x = S_[int]('x')\n fvb = fva(x)\n fva_type = fva.type\n fva_value = fva.value\n assert fva_type is typing.Callable[[int], float]\n assert fva_value == f\n\n assert fvb.type is float\n assert x.type is int\n"},"size":{"kind":"number","value":3740,"string":"3,740"}}},{"rowIdx":126418,"cells":{"max_stars_repo_path":{"kind":"string","value":"botex.py"},"max_stars_repo_name":{"kind":"string","value":"sourcefrenchy/botex"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170011"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# *** botex *** - renders latex expressions into png and serve presigned url to it back to the user\n# jeanmichel.amblat@g/mail.com\n#\n\nimport os, sys\nimport time\nimport logging\nfrom slack import RTMClient\nfrom slack.errors import SlackApiError\nfrom pytexit import py2tex\nimport sympy\nfrom pathlib import Path\n\nimport logging\nimport boto3\nfrom botocore.client import Config\nimport botocore\n\nDEBUG = False\nAWS_S3_BUCKET = \"botexjmaforfun\" # changeme\nAWS_S3_KEYID = \"df06d3d3-c4a6-4d62-9f62-b520a3ca1150\" # changeme\nAWS_REGION = \"us-east-1\"\n\ndef file2s3_getlink(filename, bucket=AWS_S3_BUCKET, object_name=None):\n\t\"\"\"Upload a file to an S3 bucket\n\n\t:param file_name: File to upload\n\t:param bucket: Bucket to upload to\n\t:param object_name: S3 object name. If not specified then file_name is used\n\t:return: True if file was uploaded, else False\n\t\"\"\"\n\n\tif os.environ[\"AWS_ACCESS_KEY_ID\"]:\n\t\ts3 = boto3.client('s3', AWS_REGION, config=Config(signature_version='s3v4'))\n\t\ttry:\n\t\t\tf = open('/var/tmp/' + filename, 'rb')\n\t\t\tcontent = f.read()\n\t\t\tresp = s3.put_object(Bucket=AWS_S3_BUCKET,\n Key=filename,\n Body=content,\n ServerSideEncryption='aws:kms',\n SSEKMSKeyId=AWS_S3_KEYID)\n\t\t\turl = s3.generate_presigned_url(\n\t\t\t\t\tClientMethod='get_object',\n\t\t\t\t\tParams={\n\t\t\t\t\t\t'Bucket': AWS_S3_BUCKET,\n\t\t\t\t\t\t'Key': filename\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\treturn url\n\t\texcept botocore.exceptions.ClientError as e:\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"[DEBUG] - {}\".format(e))\n\t\t\treturn False\n\t\treturn True\n\telse:\n\t\tprint(\"[FATAL] Missing AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY env. variables!\")\n\t\tsys.exit(0)\n\n@RTMClient.run_on(event='message')\ndef slack_loop_botex(**payload):\n\tdata = payload['data']\n\tweb_client = payload['web_client']\n\trtm_client = payload['rtm_client']\n\n\tif DEBUG:\n\t\tprint(\"[DEBUG] def slack_loop_botex()\")\n\n\tif 'text' in data and 'tex' in data.get('text', []):\n\t\tchannel_id = data['channel']\n\t\tthread_ts = data['ts']\n\t\tuser = data['user']\n\t\tlatexreq = data['text'].split(\"tex \",1)[1]\n\n\t\tfilename = time.strftime(\"%Y%m%d-%H%M%S-{}-output.png\".format(user))\n\n\t\tif DEBUG:\n\t\t\tprint(\"[DEBUG] received text={} latexreq={}\".format(data['text'], latexreq))\n\n\t\ttry:\n\t\t\tlatex = py2tex(latexreq)\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"[DEBUG] latex={}\".format(latex))\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"[DEBUG] Generating png file\")\n\t\t\tsympy.preview(latex, viewer='file', filename='/var/tmp/' + filename)\n\t\t\tif DEBUG:\n\t\t\t\tprint(\"[DEBUG] Uploading to S3\")\n\t\t\treply = file2s3_getlink(filename)\n\t\texcept:\n\t\t\treply = \"Cannot transform this expression, invalid syntax?\"\n\t\ttry:\n\t\t\tresponse = web_client.chat_postMessage(\n\t\t\t\tchannel=channel_id,\n\t\t\t\ttext=f\"Hi <@{user}>! \" + reply,\n\t\t\t\tthread_ts=thread_ts\n\t\t\t)\n\t\texcept SlackApiError as e:\n\t\t\t# You will get a SlackApiError if \"ok\" is False\n\t\t\tassert e.response[\"ok\"] is False\n\t\t\tassert e.response[\"error\"] # str like 'invalid_auth', 'channel_not_found'\n\t\t\tprint(f\"Got an error: {e.response['error']}\")\n\nif os.environ[\"SLACKTOKEN\"]:\n\trtm_client = RTMClient(token=os.environ[\"SLACKTOKEN\"])\n\trtm_client.start()\nelse:\n\tprint(\"[FATAL] No SLACKTOKEN env. variable set!\")\n"},"size":{"kind":"number","value":3127,"string":"3,127"}}},{"rowIdx":126419,"cells":{"max_stars_repo_path":{"kind":"string","value":"xpxchain/models/receipt/balance_transfer_receipt.py"},"max_stars_repo_name":{"kind":"string","value":"Sharmelen/python-xpx-chain-sdk"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169084"},"content":{"kind":"string","value":"\"\"\"\n balance_transfer_receipt\n ====================\n\n Transfer transaction.\n\n License\n -------\n\n Copyright 2019 NEM\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom ..blockchain.network_type import OptionalNetworkType\nfrom ..account.public_account import PublicAccount\nfrom ..mosaic.mosaic import Mosaic\nfrom .receipt_version import ReceiptVersion\nfrom .receipt_type import ReceiptType\nfrom .receipt import Receipt\nfrom .registry import register_receipt\nfrom ... import util\n\n__all__ = [\n 'BalanceTransferReceipt',\n 'MosaicLevyReceipt',\n 'MosaicRentalFeeReceipt',\n 'NamespaceRentalFeeReceipt',\n]\n\n\n@util.inherit_doc\n@util.dataclass(frozen=True)\nclass BalanceTransferReceipt(Receipt):\n \"\"\"\n Balance Transfer Receipt.\n\n :param network_type: Network type.\n :param version: The version of the receipt.\n :param sender: The public key of the sender.\n :param recipient: The public key of the recipient.\n :param mosaicId: Mosaic.\n :param amount: Amount to change.\n \"\"\"\n\n sender: PublicAccount\n recipient: PublicAccount\n mosaic: Mosaic\n\n def __init__(\n self,\n type: ReceiptType,\n version: ReceiptVersion,\n sender: PublicAccount,\n recipient: PublicAccount,\n mosaic: Mosaic,\n network_type: OptionalNetworkType,\n ) -> None:\n super().__init__(\n type,\n version,\n network_type,\n )\n self._set('sender', sender)\n self._set('recipient', recipient)\n self._set('mosaic', mosaic)\n\n # DTO\n\n @classmethod\n def validate_dto_specific(cls, data: dict) -> bool:\n required_keys = {'sender', 'recipient', 'mosaicId', 'amount'}\n return cls.validate_dto_required(data, required_keys)\n\n def to_dto_specific(\n self,\n network_type: OptionalNetworkType,\n ) -> dict:\n mosaic_data = self.mosaic.to_dto(network_type)\n\n return {\n 'sender': self.sender.public_key,\n 'recipient': self.recipient.public_key,\n 'mosaicId': mosaic_data['id'],\n 'amount': mosaic_data['amount'],\n\n }\n\n def load_dto_specific(\n self,\n data: dict,\n network_type: OptionalNetworkType,\n ) -> None:\n sender = PublicAccount.create_from_public_key(data['sender'], network_type)\n recipient = PublicAccount.create_from_public_key(data['recipient'], network_type)\n mosaic = Mosaic.create_from_dto({'id': data['mosaicId'], 'amount': data['amount']})\n\n self._set('sender', sender)\n self._set('recipient', recipient)\n self._set('mosaic', mosaic)\n\n\n@util.inherit_doc\n@register_receipt('MOSAIC_LEVY')\nclass MosaicLevyReceipt(BalanceTransferReceipt):\n \"\"\"\n Balance Change Receipt.\n\n :param network_type: Network type.\n :param version: The version of the receipt.\n :param account: The target account public key.\n :param mosaicId: Mosaic.\n :param amount: Amount to change.\n \"\"\"\n\n @classmethod\n def create(\n cls,\n type: ReceiptType,\n version: ReceiptVersion,\n sender: PublicAccount,\n recipient: PublicAccount,\n mosaic: Mosaic,\n network_type: OptionalNetworkType,\n ) -> MosaicLevyReceipt:\n return cls(\n type,\n version,\n sender,\n recipient,\n mosaic,\n network_type,\n )\n\n\n@util.inherit_doc\n@register_receipt('MOSAIC_RENTAL_FEE')\nclass MosaicRentalFeeReceipt(BalanceTransferReceipt):\n \"\"\"\n Balance Change Receipt.\n\n :param network_type: Network type.\n :param version: The version of the receipt.\n :param account: The target account public key.\n :param mosaicId: Mosaic.\n :param amount: Amount to change.\n \"\"\"\n\n @classmethod\n def create(\n cls,\n type: ReceiptType,\n version: ReceiptVersion,\n sender: PublicAccount,\n recipient: PublicAccount,\n mosaic: Mosaic,\n network_type: OptionalNetworkType,\n ) -> MosaicRentalFeeReceipt:\n return cls(\n type,\n version,\n sender,\n recipient,\n mosaic,\n network_type,\n )\n\n\n@util.inherit_doc\n@register_receipt('NAMESPACE_RENTAL_FEE')\nclass NamespaceRentalFeeReceipt(BalanceTransferReceipt):\n \"\"\"\n Balance Change Receipt.\n\n :param network_type: Network type.\n :param version: The version of the receipt.\n :param account: The target account public key.\n :param mosaicId: Mosaic.\n :param amount: Amount to change.\n \"\"\"\n\n @classmethod\n def create(\n cls,\n type: ReceiptType,\n version: ReceiptVersion,\n sender: PublicAccount,\n recipient: PublicAccount,\n mosaic: Mosaic,\n network_type: OptionalNetworkType,\n ) -> NamespaceRentalFeeReceipt:\n return cls(\n type,\n version,\n sender,\n recipient,\n mosaic,\n network_type,\n )\n"},"size":{"kind":"number","value":5561,"string":"5,561"}}},{"rowIdx":126420,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"its-dirg/id_token_verify"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169397"},"content":{"kind":"string","value":"from setuptools import setup, find_packages\n\nsetup(\n name='id_token_verify',\n version='0.1',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n url='https://github.com/its-dirg/id_token_verify',\n license='Apache 2.0',\n author='',\n author_email='',\n description='Utility/service for verifying signed OpenID Connect ID Tokens.',\n install_requires=['oic', 'requests']\n)\n"},"size":{"kind":"number","value":418,"string":"418"}}},{"rowIdx":126421,"cells":{"max_stars_repo_path":{"kind":"string","value":"main.py"},"max_stars_repo_name":{"kind":"string","value":"UtkarshMish/ondc_text_parsing"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169251"},"content":{"kind":"string","value":"import asyncio\nfrom os import environ\n\nfrom dotenv import load_dotenv\n\nfrom app import create_app\nfrom app.product_catalogue.generate_catalogue import generate_catalogue\n\nload_dotenv(\".env\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(generate_catalogue())\n my_app = create_app()\n my_app.run(\n host=environ.get(\"HOST\") or \"127.0.0.1\",\n port=environ.get(\"PORT\") or 8000,\n debug=environ.get(\"DEBUG\") or False,\n )\n"},"size":{"kind":"number","value":442,"string":"442"}}},{"rowIdx":126422,"cells":{"max_stars_repo_path":{"kind":"string","value":"CoreRoot/urls.py"},"max_stars_repo_name":{"kind":"string","value":"Nathan-E-White/DjangoBackend-ReactFrontend"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169713"},"content":{"kind":"string","value":"#! /usr/bin/env python\r\n\"\"\"\r\n------------------------------------------------------------------------------------------------------------------------\r\n ____ __ __ __ __ __\r\n / __ \\__ __/ /_/ /_ ____ ____ / / / /__ ____ _____/ /__ _____\r\n ____________ / /_/ / / / / __/ __ \\/ __ \\/ __ \\ / /_/ / _ \\/ __ `/ __ / _ \\/ ___/ ____________\r\n/_____/_____/ / ____/ /_/ / /_/ / / / /_/ / / / / / __ / __/ /_/ / /_/ / __/ / /_____/_____/\r\n /_/ \\__, /\\__/_/ /_/\\____/_/ /_/ /_/ /_/\\___/\\__,_/\\__,_/\\___/_/\r\n /____/\r\n------------------------------------------------------------------------------------------------------------------------\r\n:FILE: DjangoBackend-ReactFrontend/CoreRoot/urls.py\r\n:AUTHOR: , PhD\r\n:ABOUT: Sets up routers for the Django admin site and api routes\r\n:NOTES: For more information on this file, see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\n------------------------------------------------------------------------------------------------------------------------\r\n\"\"\"\r\n# \r\n\r\n# Imports --- Django Package Imports: admin portal\r\nfrom django.contrib import admin\r\n\r\n# Imports --- Django Package Imports: url building kit\r\nfrom django.urls import include, path\r\n\r\n\r\n# Define the URL patterns to build\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('api/', include(('core.routers', 'core'), namespace = 'core-api')),\r\n]\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# \r\n"},"size":{"kind":"number","value":1687,"string":"1,687"}}},{"rowIdx":126423,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_admin.py"},"max_stars_repo_name":{"kind":"string","value":"bbecquet/idunn"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169864"},"content":{"kind":"string","value":"from idunn.places.admin import Admin\nfrom idunn.places import POI\n\n\ndef test_admin():\n admin = Admin(\n {\n \"zone_type\": \"city\",\n \"codes\": [{\"name\": \"wikidata\", \"value\": \"Q7652\"}],\n \"names\": {\n \"de\": \"Dünkirchen\",\n \"en\": \"Dunkirk\",\n \"es\": \"Dunkerque\",\n \"fr\": \"Dunkerque\",\n \"it\": \"Dunkerque\",\n },\n \"labels\": {\n \"br\": \"Dunkerque (59140-59640), Norzh-Pas-de-Calais, Krec'hioù-Frañs, Bro-C'hall\",\n \"ca\": \"Dunkerque (59140-59640), Nord, Alts de França, França\",\n \"de\": \"Dünkirchen (59140-59640), Nord, Nordfrankreich, Frankreich\",\n \"en\": \"Dunkirk (59140-59640), Nord, Nord-Pas-de-Calais and Picardy, France\",\n \"es\": \"Dunkerque (59140-59640), Norte, Alta Francia, Francia\",\n \"it\": \"Dunkerque (59140-59640), Nord, Nord-Passo di Calais e Piccardia, Francia\",\n },\n }\n )\n\n assert admin.get_name(\"fr\") == \"Dunkerque\"\n assert admin.get_name(\"da\") == \"\"\n assert admin.wikidata_id == \"Q7652\"\n"},"size":{"kind":"number","value":1136,"string":"1,136"}}},{"rowIdx":126424,"cells":{"max_stars_repo_path":{"kind":"string","value":"MaidUtils/init/initcheck.py"},"max_stars_repo_name":{"kind":"string","value":"PolarFill/maid"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170225"},"content":{"kind":"string","value":"def Init():\r\n import platform\r\n import configparser\r\n from config import path\r\n config = configparser.ConfigParser()\r\n config.read(f'{path}/Configurações/Principal.ini')\r\n\r\n if config.get('Geral', 'admin').lower() == 'true' and platform.system() == 'Windows':\r\n import ctypes, sys\r\n check_admin = ctypes.windll.shell32.IsUserAnAdmin() #Checando se o usuario atual é admin usando ctypes\r\n if check_admin == 0:\r\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", sys.executable, \" \".join(sys.argv), None, 1) #Reinicia a maid se n tiver adm\r\n exit()\r\n\r\n if config.get('Geral', 'mostrar-terminal').lower() == 'false' and platform.system() == 'Windows':\r\n import win32.lib.win32con as win32con #Importando win32con do pywin32\r\n import win32.win32gui as win32gui #Importando win32gui do pywin32\r\n program = win32gui.GetForegroundWindow() #Anexando a variavel \"program\" com a janela atual do terminal\r\n win32gui.ShowWindow(program, win32con.SW_HIDE) #Escondendo o terminal\r\n \r\n "},"size":{"kind":"number","value":1070,"string":"1,070"}}},{"rowIdx":126425,"cells":{"max_stars_repo_path":{"kind":"string","value":"reader/tokenizers/ltp_tokenizer.py"},"max_stars_repo_name":{"kind":"string","value":"wsdm/RCZoo"},"max_stars_count":{"kind":"number","value":166,"string":"166"},"id":{"kind":"string","value":"2169630"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\nimport os\nimport re\nimport copy\nfrom pyltp import Segmentor, Postagger, NamedEntityRecognizer\nfrom .tokenizer import Tokens, Tokenizer\n\n\nLTP_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),'ltp_data')\ncws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')\npos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')\nner_model_path = os.path.join(LTP_DATA_DIR, 'ner.model')\n\nclass LtpTokenizer(Tokenizer):\n def __init__(self, **kwargs):\n \"\"\"\n Args:\n\n \"\"\"\n self.segmentor = Segmentor() # 初始化实例\n self.segmentor.load(cws_model_path)\n self.postagger = Postagger() # 初始化实例\n self.postagger.load(pos_model_path) # 加载模型\n self.recognizer = NamedEntityRecognizer()\n self.recognizer.load(ner_model_path) # 加载模型\n self.annotators = copy.deepcopy(kwargs.get('annotators', set()))\n def tokenize(self, text):\n clean_text = text.replace(' ', ',') # for ltp process ,ltp 不把空格当成词的边界\n tokens = list(self.segmentor.segment(clean_text)) # 分词\n postags = list(self.postagger.postag(tokens)) # 词性标注\n netags = list(self.recognizer.recognize(tokens, postags)) # 命名实体识别\n idxs = []\n j = 0\n i = 0\n #print(text)\n #print(tokens)\n #print(postags)\n #print(netags)\n while i < len(tokens):\n #print(clean_text[j:j+len(tokens[i])], tokens[i])\n if clean_text[j:j+len(tokens[i])] == tokens[i]:\n idxs.append(j)\n j += len(tokens[i])\n i += 1\n else:\n j += 1\n #print(i,j)\n data = []\n for i in range(len(tokens)):\n start_ws = idxs[i]\n if i + 1 < len(tokens):\n end_ws = idxs[i+1]\n else:\n end_ws = idxs[i] + len(tokens[i])\n data.append((\n tokens[i],\n text[start_ws:end_ws],\n (idxs[i], idxs[i] + len(tokens[i])),\n postags[i],\n tokens[i],\n netags[i],\n ))\n return Tokens(data, self.annotators)\n\n\n\n"},"size":{"kind":"number","value":2174,"string":"2,174"}}},{"rowIdx":126426,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/constants.py"},"max_stars_repo_name":{"kind":"string","value":"roryj/samquest"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2170227"},"content":{"kind":"string","value":"HELP_MESSAGE_FORMATS = {\n 'General': 'Options: #LetsPlay, #StartGame, #JoinGame, ChooseMe. Type #Help and command for more info.',\n 'LetsPlay': 'Creates a new SAM quest! However you can only have one running at a time.',\n 'StartGame': 'Starts a created game.',\n 'JoinGame': 'Joins a created game.',\n 'ChooseMe': 'Make a quest selection. This would look like \\\"#ChooseMe #ReadNote\\\"'\n}\n"},"size":{"kind":"number","value":401,"string":"401"}}},{"rowIdx":126427,"cells":{"max_stars_repo_path":{"kind":"string","value":"hardware/sense_hat/marble_maze.py"},"max_stars_repo_name":{"kind":"string","value":"claremacrae/raspi_code"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170206"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# based on https://www.raspberrypi.org/learning/sense-hat-marble-maze/worksheet/\n\nfrom sense_hat import SenseHat\nfrom time import sleep\n\nsense = SenseHat()\nsense.clear()\n\nr = (255, 0, 0 )\nb = (0,0,0)\nw = (255, 255, 255 )\ng = (0, 255, 0)\n\ndef move_marble(pitch, roll, x, y):\n new_x = x\n new_y = y\n if 1 < pitch < 179 and x != 0:\n new_x -= 1\n elif 359 > pitch > 181 and x != 7:\n new_x += 1\n\n if 1 < roll < 179 and y != 7:\n new_y += 1\n elif 359 > roll > 181 and y != 0:\n new_y -= 1\n\n x,y = check_wall(x, y, new_x, new_y)\n\n return x, y\n\ndef check_wall(x, y, new_x, new_y):\n if maze[new_y][new_x] != r:\n return new_x, new_y\n elif maze[new_y][x] != r:\n return x, new_y\n elif maze[y][new_x] != r:\n return new_x, y\n return x,y\n\ndef check_win(x, y):\n global game_over\n if maze[y][x] == g:\n game_over = True\n sense.show_message('Win!')\n\nwhile True:\n maze = [[r,r,r,r,r,r,r,r],\n [r,b,b,b,b,b,b,r],\n [r,r,r,b,r,b,b,r],\n [r,b,r,b,r,r,r,r],\n [r,b,b,b,b,b,b,r],\n [r,b,r,r,r,r,b,r],\n [r,b,b,r,g,b,b,r],\n [r,r,r,r,r,r,r,r]]\n\n x = 1\n y = 1\n game_over = False\n\n while not game_over:\n pitch = sense.get_orientation()['pitch']\n roll = sense.get_orientation()['roll']\n x,y = move_marble(pitch, roll, x, y)\n check_win(x, y)\n maze[y][x] = w\n sense.set_pixels(sum(maze,[]))\n sleep(0.05)\n maze[y][x] = b\n\n"},"size":{"kind":"number","value":1555,"string":"1,555"}}},{"rowIdx":126428,"cells":{"max_stars_repo_path":{"kind":"string","value":"administer.py"},"max_stars_repo_name":{"kind":"string","value":"fabriciocaetano/administer"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169620"},"content":{"kind":"string","value":"#! bin/bash/python3\n# -*- coding: UTF-8 -*-\n'''\nAUTOR:Fabrício\nOBJETIVO: gerenciar o seu dinheiro para manter um equilíbrio\nentre o que você pode e não pode gastar, usando uma metodologia \nque visa ajudalo a não ficar sem dinheiro como que você realmente \nprecisa, uma ferramenta que sabe dar prioridade para o que \nrealmente é escencial para sua vivência equilibrada.\nSTATUS: em desenvolvimento ...\nDEPENDÊNCIAS:\npython3, numpy\n'''\nimport os\nimport sys\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nos.system('clear')\n\ndef menu():\n print (30*\"-\",\"MENU\",30*\"-\")\n print(\"[1] para adicionar salario adquirido\")\n print(\"[2] para ver gráfico para gastos\")\n print(\"[3] para subtrair valores de tópico\")\n print(\"[4] para ver a percentagem de cada tópico\")\n print(\"[5] para sair\")\n print(\"[6] resetar banco de dados\")\n print(60*\"-\")\n\ndef logo():\n print(' %%%% %%%%% %% %% %%%%%% %% %% %%%%%% %%%% %%%%%% %%%%%% %%%%% ')\n print(' %% %% %% %% %%% %%% %% %%% %% %% %% %% %% %% %% ')\n print(' %%%%%% %% %% %% % %% %% %% %%% %% %%%% %% %%%% %%%%% ')\n print(' %% %% %% %% %% %% %% %% %% %% %% %% %% %% %% ')\n print(' %% %% %%%%% %% %% %%%%%% %% %% %%%%%% %%%% %% %%%%%% %% %% ')\n print('.................................................................................')\n\narquivo = open('imp.gener','a')\narquivo.write(str(0))\narquivo.close()\narquivo = open('apn.gener','a')\narquivo.write(str(0))\narquivo.close()\narquivo = open('dis.gener','a')\narquivo.write(str(0))\narquivo.close()\n\ndef recalcsaldo():\n arquivo = open('imp.gener','r')\n imp=arquivo.read()\n arquivo.close()\n\n arquivo = open('apn.gener','r')\n apn=arquivo.read()\n arquivo.close()\n\n arquivo = open('dis.gener','r')\n dis=arquivo.read()\n arquivo.close()\n\n return float(imp)+float(apn)+float(dis)\n\nlaço=True\n\nwhile (laço):\n\n saldo=recalcsaldo()\n os.system('clear')\n logo()\n print('seu saldo atual:',\"%.2f\"%saldo,\"R$\")\n menu()\n\n opção=int(input('digite a opção desejada:'))\n\n if opção == 1 :\n adicional=input(\"digite aqui o valor adicional do seu saldo:\")\n adicional2=input(\"agora, digite novamente para termos serteza antes de adicionar:\")\n\n if adicional == adicional2:\n\n arquivo = open('imp.gener','r')\n imp=arquivo.read()\n arquivo.close()\n arquivo = open('apn.gener','r')\n apn=arquivo.read()\n arquivo.close()\n arquivo = open('dis.gener','r')\n dis=arquivo.read()\n arquivo.close()\n\n restrair1=float(imp)\n impres=restrair1+(float(adicional)*60)/100\n\n restrair2=float(apn)\n apenes=restrair2+(float(adicional)*30)/100\n\n restrair3=float(dis)\n dispen=restrair3+(float(adicional)*10)/100\n\n arquivo=open(\"imp.gener\",\"w\")\n arquivo.write(str(impres))\n arquivo.close()\n arquivo=open(\"apn.gener\",\"w\")\n arquivo.write(str(apenes))\n arquivo.close()\n arquivo=open(\"dis.gener\",\"w\")\n arquivo.write(str(dispen))\n arquivo.close()\n pass\n\n print(\"valor adicionado!\")\n time.sleep(1)\n\n else :\n print(\"ERRO!. DIGITE O VALOR CORRETAMENTE\")\n time.sleep(1)\n\n if opção == 2:\n\n arquivo = open('imp.gener','r')\n imp=arquivo.read()\n arquivo.close()\n arquivo = open('apn.gener','r')\n apn=arquivo.read()\n arquivo.close()\n arquivo = open('dis.gener','r')\n dis=arquivo.read()\n arquivo.close()\n\n \n luzagua=float(imp)*10/100\n comida=float(imp)*15/100\n aluguel=float(imp)*35/100\n\n apenas=float(apn)*3/100\n\n savel=float(dis)*10/100\n\n os.system('clear')\n print(60*\"=\")\n print(\"saldo atual:\",\"%.2f\"%saldo,\"R$\")\n print(60*\"=\")\n print(\"VALORES TOTAIS DOS TÓPICOS:\")\n print(60*\"*\")\n print(\"IMPRESCINDÍVEIS TOTAl:\",imp,\"R$\")\n print(\"SUBVALOR:\")\n print(\"luz:\",\"%.2f\"%luzagua,\"R$\")\n print(\"água:\",\"%.2f\"%luzagua,\"R$\")\n print(\"comida:\",\"%.2f\"%comida,\"R$\")\n print(\"aluguel:\",\"%.2f\"%aluguel,\"R$\")\n print(60*\"_\")\n print(\"APENAS NECESSÁRIO TOTAl:\",apn,\"R$\")\n print(\"SUBVALOR:\")\n print(\"internet:\",\"%.2f\"%apenas,\"R$\")\n print(\"livros:\",\"%.2f\"%apenas,\"R$\")\n print(\"cursos:\",\"%.2f\"%apenas,\"R$\")\n print(\"laser:\",\"%.2f\"%apenas,\"R$\")\n print(\"investir:\",\"%.2f\"%apenas,\"R$\")\n print(\"reformas:\",\"%.2f\"%apenas,\"R$\")\n print(\"móveis:\",\"%.2f\"%apenas,\"R$\")\n print(\"férias:\",\"%.2f\"%apenas,\"R$\")\n print(\"celular:\",\"%.2f\"%apenas,\"R$\")\n print(\"compútador:\",\"%.2f\"%apenas,\"R$\")\n print(60*\"_\")\n print(\"DISPENSÁVEL TOTAl:\",dis,\"R$\")\n print(\"SUBVALOR:\")\n print(\"diversão 'sair no dia a dia':\",\"%.2f\"%savel,\"R$\")\n print(\"viagens:\",\"%.2f\"%savel,\"R$\")\n print(\"comprar besteiras:\",\"%.2f\"%savel,\"R$\")\n print(60*\"_\")\n print(\"\\n\")\n decid=str(input('você gostaria de um gráfico dos valores [s/n]?'))\n if decid == \"s\":\n print(\"plotando gráfico ...\")\n\n\n plt.style.use('dark_background')\n x=[3,2,1]\n y=[dis,apn,imp]\n x1=[3,2,1]\n y1=[dis,apn,imp]\n x3=[3,2,1]\n y3=[0,0,imp]\n #plt.plot(0,4,0,saldo/(saldo*99/100))\n plt.title('equilibribrio de valores, quanto maior a dobra, maior o desequilíbreo')\n plt.xlabel('imprescindivel =1 necessário=2 dispensável=3')\n plt.ylabel('valores em R$')\n plt.bar(x,y, label = 'valores', color='white')\n plt.plot(x1,y1, label = 'equilibrio', color='red')\n if float(imp) <= 200:\n plt.title('ALERTA! FALTA DE DINHEIRO PARA COISAS IMPRESCINDÍVEIS',color='red')\n plt.show()\n\n\n\n if opção == 3:\n os.system('clear')\n logo()\n print('selecione um tópico para subtrair:')\n print(60*\"_\")\n arquivo = open('imp.gener','r')\n imp=arquivo.read()\n arquivo.close()\n print(\"[1] IMPRESCINDÍVEIS:\",\"%.2f\"%float(imp),\"R$\")\n print(60*\"_\")\n arquivo = open('apn.gener','r')\n apn=arquivo.read()\n arquivo.close()\n print(\"[2] APENAS NECESSÁRIO:\",\"%.2f\"%float(apn),\"R$\")\n print(60*\"_\")\n arquivo = open('dis.gener','r')\n dis=arquivo.read()\n arquivo.close()\n print(\"[3] DISPENSÁVEL:\",\"%.2f\"%float(dis),\"R$\")\n print(60*\"_\")\n print(\"[4] sair\")\n print(\"\\n\")\n\n op=int(input(\"digite a opção desejada:\"))\n\n if op == 1:\n subop=int(input(\"digite o valor a retirar:\"))\n registrar=float(imp)-float(subop)\n arquivo=open(\"imp.gener\",\"w\")\n arquivo.write(str(registrar))\n arquivo.close()\n print(\"RETIRADO!\")\n time.sleep(1)\n\n if op == 2:\n subop=int(input(\"digite o valor a retirar:\"))\n registrar=float(apn)-float(subop)\n arquivo=open(\"apn.gener\",\"w\")\n arquivo.write(str(registrar))\n arquivo.close()\n\n print(\"RETIRADO!\")\n time.sleep(1)\n\n if op == 3:\n subop=int(input(\"digite o valor a retirar:\"))\n registrar=float(dis)-float(subop)\n arquivo=open(\"dis.gener\",\"w\")\n arquivo.write(str(registrar))\n arquivo.close()\n\n print(\"RETIRADO!\")\n time.sleep(1)\n\n if op == 4:\n print(\"SAINDO ...\")\n time.sleep(1)\n\n\n if opção == 4:\n os.system('clear')\n print(60*\"=\")\n print(\"TÓPICOS:\")\n print(60*\"=\")\n print(\"IMPRESCINDÍVEIS:60%\")\n print(60*\"_\")\n print(\"luz:20%\")\n print(\"água:20%\")\n print(\"comida:15%\")\n print(\"aluguel:35%\")\n print(\"outros ...\")\n print(60*\"_\")\n print(\"APENAS NECESSÁRIO:30%\")\n print(60*\"_\")\n print(\"internet:3%\")\n print(\"livros:3%\")\n print(\"cursos:3%\")\n print(\"laser:3%\")\n print(\"investir:3%\")\n print(\"reformas:3%\")\n print(\"móveis:3%\")\n print(\"férias:3%\")\n print(\"celular:3%\")\n print(\"compútador:3%\")\n print(60*\"_\")\n print(\"DISPENSÁVEL:10%\")\n print(60*\"_\")\n print(\"diversão 'sair no dia a dia':10%\")\n print(\"viagens:10%\")\n print(\"comprar besteiras:10%\")\n print(\"\\n\")\n vari=input('dê ENTER para sair!')\n\n if opção == 5:\n laço=False\n\n if opção == 6:\n decid=str(input(\"deseja mesmo resetar o banco de dados [s/n] ?\"))\n if decid == \"s\":\n print(\"resetando ...\")\n arquivo=open(\"imp.gener\",\"w\")\n arquivo.write(str(0))\n arquivo.close()\n arquivo=open(\"apn.gener\",\"w\")\n arquivo.write(str(0))\n arquivo.close()\n arquivo=open(\"dis.gener\",\"w\")\n arquivo.write(str(0))\n arquivo.close()\n time.sleep(1)\n\n else:\n print(\"bye\")\n time.sleep(1)\n\n"},"size":{"kind":"number","value":8313,"string":"8,313"}}},{"rowIdx":126429,"cells":{"max_stars_repo_path":{"kind":"string","value":"guitarpractice/exercises/scale_shapes.py"},"max_stars_repo_name":{"kind":"string","value":"craigargh/guitar-practice-core"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169555"},"content":{"kind":"string","value":"import random\nfrom functools import partial\n\nfrom guitarpractice import pickpatterns\nfrom guitarpractice.models import Sequence, Beat\nfrom guitarpractice.sequencer import make_sequence\nfrom guitarpractice.shapes.scale_collections import c_major_pentatonic_modes, c_major_modes\nfrom guitarpractice.shapeshifters import shift_vertically\n\n\ndef scale_shapes(variation: str):\n variations = {\n 'major': major_scale,\n 'major-pentatonic': major_pentatonic_scale,\n }\n variation_function = variations[variation]\n return variation_function()\n\n\ndef major_pentatonic_scale() -> Sequence:\n shape = random.choice(c_major_pentatonic_modes())\n pattern = random.choice([\n pickpatterns.asc,\n pickpatterns.desc,\n pickpatterns.asc_and_desc,\n partial(pickpatterns.alternating_bass_and_asc, length=10)\n ])\n rhythm = [Beat(duration=1, division=8)]\n lowest_fret = random.randrange(1, 13)\n\n return make_sequence(\n shapes=[shape],\n rhythm=rhythm,\n pick_pattern=pattern,\n shape_shifters=[partial(shift_vertically, lowest_fret=lowest_fret)],\n shape_labels=True,\n )\n\n\ndef major_scale() -> Sequence:\n shape = random.choice(c_major_modes())\n pattern = random.choice([\n pickpatterns.asc,\n pickpatterns.desc,\n pickpatterns.asc_and_desc,\n partial(pickpatterns.alternating_bass_and_asc, length=14)\n ])\n rhythm = [Beat(duration=1, division=8)]\n lowest_fret = random.randrange(1, 13)\n\n return make_sequence(\n shapes=[shape],\n rhythm=rhythm,\n pick_pattern=pattern,\n shape_shifters=[partial(shift_vertically, lowest_fret=lowest_fret)],\n shape_labels=True,\n )\n"},"size":{"kind":"number","value":1713,"string":"1,713"}}},{"rowIdx":126430,"cells":{"max_stars_repo_path":{"kind":"string","value":"downloaded_kernels/university_rankings/parsed_kernels/kernel_168.py"},"max_stars_repo_name":{"kind":"string","value":"josepablocam/common-code-extraction"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169771"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n\n# **Teeme läbi väikesed harjutused, et hiljem oleks lihtsam kodutööd teha.**\n# \n# \n\n# In[ ]:\n\n\nimport numpy as np\nimport pandas as pd \n\ndf = pd.read_csv(\"../input/cwurData.csv\")\n\n\n# 1) Leia kaggle’st dataset ‘World University Rankings’\n# \n# 2) Tee uus kernel (notebook)\n# \n# 3) Loe andmed dataseti failist ‘cwurData.csv’\n# \n# 4) Kuva andmed tabelina\n\n# In[ ]:\n\n\ndf\n\n\n# 5) Kuva tabelist read, mis käivad Eesti ülikoolide kohta\n\n# In[ ]:\n\n\ndf.loc[df[\"country\"] == \"Estonia\"]\n\n\n# 6) Kuva keskmine hariduse kvaliteedi näitaja grupeerituna riikide kaupa\n\n# In[ ]:\n\n\nquality_of_edu_mean = pd.DataFrame(df.groupby('country').quality_of_education.mean())\nquality_of_edu_mean\n\n\n# 7) Järjesta saadud andmed keskmise hariduse kvaliteedi näitaja järgi kahanevalt\n# \n# Vihjed: Pane eelmise ülesande andmed uude DataFrame ning sorteeri uus DataFrame\n\n# In[ ]:\n\n\nquality_of_edu_mean.sort_values('quality_of_education', ascending=False)\n\n\n# 8) Leida mitu korda iga riigi ülikoole tabelis esineb\n# \n\n# In[ ]:\n\n\nuni_frequency = pd.DataFrame(df.groupby('country').size())\nuni_frequency.rename(index=str, columns={0:\"frequency\"}, inplace=True)\nuni_frequency.sort_values(\"frequency\", ascending=False)\n\n\n# 8) a) Leida täpsemalt ainult 2015. aasta tulemuste kohta\n\n# In[ ]:\n\n\nuni_frequency_2015 = pd.DataFrame(df[df.year == 2015].groupby('country').size())\nuni_frequency_2015.rename(index=str, columns={0:\"frequency\"}, inplace=True)\nuni_frequency_2015.sort_values(\"frequency\", ascending=False)\n\n\n# 9) Mitu ülikooli on välja andnud n publikatsiooni.\n\n# In[ ]:\n\n\ndf[\"publications\"].plot.hist(title=\"N publikatsiooni välja andnud ülikoolide arv\", rwidth=0.9, grid=True, color=\"m\");\n\n\n# 10) Kuidas on seotud ülikoolide välja antud publikatsioonide arv tsiteerimiste arvuga.\n\n# In[ ]:\n\n\npublications_mean = pd.DataFrame(df.groupby('institution').publications.mean())\ncitations_mean = pd.DataFrame(df.groupby('institution').citations.mean())\ninfo = np.array([publications_mean[\"publications\"], citations_mean[\"citations\"]])\n\nscatter_table = pd.DataFrame(data=info[0:], index=[\"publications\", \"citations\"]).transpose()\nscatter_table.plot.scatter(\"publications\",\"citations\", marker=\".\", alpha=0.3, color=\"m\");\n\n\n# \n"},"size":{"kind":"number","value":2224,"string":"2,224"}}},{"rowIdx":126431,"cells":{"max_stars_repo_path":{"kind":"string","value":"utils/utils.py"},"max_stars_repo_name":{"kind":"string","value":"ihsangkcl/RFM"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2166841"},"content":{"kind":"string","value":"from __future__ import print_function, division, absolute_import\r\nimport torch\r\nimport numpy as np\r\nimport random\r\nfrom sklearn.metrics import average_precision_score, accuracy_score, roc_curve, auc\r\n\r\n__all__ = [\"data_prefetcher\", \"data_prefetcher_two\", \"cal_fam\", \"cal_normfam\", \"setup_seed\", \"l2_norm\", \"calRes\"]\r\n\r\n\r\ndef setup_seed(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n torch.backends.cudnn.deterministic = True\r\n\r\n\r\nclass data_prefetcher():\r\n def __init__(self, loader):\r\n self.stream = torch.cuda.Stream()\r\n self.loader = iter(loader)\r\n self.preload()\r\n\r\n def preload(self):\r\n try:\r\n self.next_input, self.next_target = next(self.loader)\r\n except StopIteration:\r\n self.next_input = None\r\n self.next_target = None\r\n return\r\n with torch.cuda.stream(self.stream):\r\n self.next_input = self.next_input.cuda(non_blocking=True).float()\r\n self.next_target = self.next_target.cuda(non_blocking=True).long()\r\n\r\n def next(self):\r\n torch.cuda.current_stream().wait_stream(self.stream)\r\n input = self.next_input\r\n target = self.next_target\r\n self.preload()\r\n return input, target\r\n\r\n\r\nclass data_prefetcher_two():\r\n def __init__(self, loader1, loader2):\r\n self.stream = torch.cuda.Stream()\r\n self.loader1 = iter(loader1)\r\n self.loader2 = iter(loader2)\r\n self.preload()\r\n\r\n def preload(self):\r\n try:\r\n tmp_input1, tmp_target1 = next(self.loader1)\r\n tmp_input2, tmp_target2 = next(self.loader2)\r\n self.next_input, self.next_target = torch.cat((tmp_input1, tmp_input2)), torch.cat((tmp_target1, tmp_target2))\r\n\r\n except StopIteration:\r\n self.next_input = None\r\n self.next_target = None\r\n return\r\n with torch.cuda.stream(self.stream):\r\n self.next_input = self.next_input.cuda(non_blocking=True).float()\r\n self.next_target = self.next_target.cuda(non_blocking=True).long()\r\n\r\n def next(self):\r\n torch.cuda.current_stream().wait_stream(self.stream)\r\n input = self.next_input\r\n target = self.next_target\r\n self.preload()\r\n return input, target\r\n\r\n\r\ndef l2_norm(input, axis=1):\r\n norm = torch.norm(input, 2, axis, True)\r\n output = torch.div(input, norm+1e-8)\r\n return output\r\n\r\n\r\ndef cal_fam(model, inputs):\r\n model.zero_grad()\r\n inputs = inputs.detach().clone()\r\n inputs.requires_grad_()\r\n output = model(inputs)\r\n\r\n target = output[:, 1]-output[:, 0]\r\n target.backward(torch.ones(target.shape).cuda())\r\n fam = torch.abs(inputs.grad)\r\n fam = torch.max(fam, dim=1, keepdim=True)[0]\r\n# print(fam.shape)\r\n# with open('batchfamtrain.npy', 'wb') as f:\r\n# np.save(f, fam.cpu())\r\n# with open('famtrain.npy', 'wb') as f:\r\n# np.save(f, fam[0].cpu())\r\n return fam\r\n\r\n\r\ndef cal_normfam(model, inputs):\r\n fam = cal_fam(model, inputs)\r\n _, x, y = fam[0].shape\r\n fam = torch.nn.functional.interpolate(fam, (int(y/2), int(x/2)), mode='bilinear', align_corners=False)\r\n fam = torch.nn.functional.interpolate(fam, (y, x), mode='bilinear', align_corners=False)\r\n for i in range(len(fam)):\r\n fam[i] -= torch.min(fam[i])\r\n fam[i] /= torch.max(fam[i])\r\n# print(fam.shape)\r\n# with open('batchfamAVG.npy', 'wb') as f:\r\n# np.save(f, fam.cpu())\r\n# with open('famavg.npy', 'wb') as f:\r\n# np.save(f, fam[0].cpu())\r\n return fam\r\n\r\n\r\ndef calRes(y_true_all, y_pred_all):\r\n y_true_all, y_pred_all = np.array(\r\n y_true_all.cpu()), np.array(y_pred_all.cpu())\r\n\r\n fprs, tprs, ths = roc_curve(\r\n y_true_all, y_pred_all, pos_label=1, drop_intermediate=False)\r\n\r\n acc = accuracy_score(y_true_all, np.where(y_pred_all >= 0.5, 1, 0))*100.\r\n\r\n ind = 0\r\n for fpr in fprs:\r\n if fpr > 1e-2:\r\n break\r\n ind += 1\r\n TPR_2 = tprs[ind-1]\r\n\r\n ind = 0\r\n for fpr in fprs:\r\n if fpr > 1e-3:\r\n break\r\n ind += 1\r\n TPR_3 = tprs[ind-1]\r\n\r\n ind = 0\r\n for fpr in fprs:\r\n if fpr > 1e-4:\r\n break\r\n ind += 1\r\n TPR_4 = tprs[ind-1]\r\n\r\n ap = average_precision_score(y_true_all, y_pred_all)\r\n return ap, acc, auc(fprs, tprs), TPR_2, TPR_3, TPR_4, fprs, tprs, ths\r\n"},"size":{"kind":"number","value":4461,"string":"4,461"}}},{"rowIdx":126432,"cells":{"max_stars_repo_path":{"kind":"string","value":"espnet/nets/pytorch_backend/e2e_mt.py"},"max_stars_repo_name":{"kind":"string","value":"szmmm/speechchain"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2168563"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# encoding: utf-8\n\n# Copyright 2019 Kyoto University ()\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\nfrom __future__ import division\n\nimport logging\nimport math\nimport os\n\nimport chainer\nimport numpy as np\nimport six\nimport torch\n\nfrom chainer import reporter\n\nfrom espnet.nets.e2e_asr_common import label_smoothing_dist\nfrom espnet.nets.mt_interface import MTInterface\n\nfrom espnet.nets.pytorch_backend.nets_utils import pad_list\nfrom espnet.nets.pytorch_backend.nets_utils import to_device\nfrom espnet.nets.pytorch_backend.rnn.attentions import att_for\nfrom espnet.nets.pytorch_backend.rnn.decoders import decoder_for\nfrom espnet.nets.pytorch_backend.rnn.encoders import encoder_for\n\n\nclass Reporter(chainer.Chain):\n \"\"\"A chainer reporter wrapper\"\"\"\n\n def report(self, loss, acc, ppl):\n reporter.report({'loss': loss}, self)\n reporter.report({'acc': acc}, self)\n reporter.report({'ppl': ppl}, self)\n\n\nclass E2E(MTInterface, torch.nn.Module):\n \"\"\"E2E module\n\n :param int idim: dimension of inputs\n :param int odim: dimension of outputs\n :param Namespace args: argument Namespace containing options\n \"\"\"\n\n def __init__(self, idim, odim, args):\n super(E2E, self).__init__()\n torch.nn.Module.__init__(self)\n self.etype = args.etype\n self.verbose = args.verbose\n self.char_list = args.char_list\n self.outdir = args.outdir\n self.reporter = Reporter()\n\n # below means the last number becomes eos/sos ID\n # note that sos/eos IDs are identical\n self.sos = odim - 1\n self.eos = odim - 1\n self.pad = odim\n\n # subsample info\n # +1 means input (+1) and layers outputs (args.elayer)\n subsample = np.ones(args.elayers + 1, dtype=np.int)\n logging.warning('Subsampling is not performed for machine translation.')\n logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))\n self.subsample = subsample\n\n # label smoothing info\n if args.lsm_type and os.path.isfile(args.train_json):\n logging.info(\"Use label smoothing with \" + args.lsm_type)\n labeldist = label_smoothing_dist(odim, args.lsm_type, transcript=args.train_json)\n else:\n labeldist = None\n\n # multilingual related\n self.replace_sos = args.replace_sos\n\n # encoder\n self.embed_src = torch.nn.Embedding(idim + 1, args.eunits, padding_idx=idim)\n # NOTE: +1 means the padding index\n self.dropout_emb_src = torch.nn.Dropout(p=args.dropout_rate)\n self.enc = encoder_for(args, args.eunits, self.subsample)\n # attention\n self.att = att_for(args)\n # decoder\n self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)\n\n # weight initialization\n self.init_like_chainer()\n\n self.rnnlm = None\n\n self.logzero = -10000000000.0\n self.loss = None\n self.acc = None\n\n def init_like_chainer(self):\n \"\"\"Initialize weight like chainer\n\n chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0\n pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)\n\n however, there are two exceptions as far as I know.\n - EmbedID.W ~ Normal(0, 1)\n - LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)\n \"\"\"\n\n def lecun_normal_init_parameters(module):\n for p in module.parameters():\n data = p.data\n if data.dim() == 1:\n # bias\n data.zero_()\n elif data.dim() == 2:\n # linear weight\n n = data.size(1)\n stdv = 1. / math.sqrt(n)\n data.normal_(0, stdv)\n elif data.dim() in (3, 4):\n # conv weight\n n = data.size(1)\n for k in data.size()[2:]:\n n *= k\n stdv = 1. / math.sqrt(n)\n data.normal_(0, stdv)\n else:\n raise NotImplementedError\n\n def set_forget_bias_to_one(bias):\n n = bias.size(0)\n start, end = n // 4, n // 2\n bias.data[start:end].fill_(1.)\n\n lecun_normal_init_parameters(self)\n # exceptions\n # embed weight ~ Normal(0, 1)\n self.dec.embed.weight.data.normal_(0, 1)\n # forget-bias = 1.0\n # https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745\n for l in six.moves.range(len(self.dec.decoder)):\n set_forget_bias_to_one(self.dec.decoder[l].bias_ih)\n\n def forward(self, xs_pad, ilens, ys_pad):\n \"\"\"E2E forward\n\n :param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)\n :param torch.Tensor ilens: batch of lengths of input sequences (B)\n :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)\n :return: loss value\n :rtype: torch.Tensor\n \"\"\"\n # 1. Encoder\n xs_pad, ys_pad, tgt_lang_ids = self.target_lang_biasing_train(xs_pad, ilens, ys_pad)\n hs_pad, hlens, _ = self.enc(self.dropout_emb_src(self.embed_src(xs_pad)), ilens)\n\n # 3. attention loss\n loss, acc, ppl = self.dec(hs_pad, hlens, ys_pad, tgt_lang_ids=tgt_lang_ids)\n self.acc = acc\n self.ppl = ppl\n\n self.loss = loss\n loss_data = float(self.loss)\n if not math.isnan(loss_data):\n self.reporter.report(loss_data, acc, ppl)\n else:\n logging.warning('loss (=%f) is not correct', loss_data)\n return self.loss\n\n def target_lang_biasing_train(self, xs_pad, ilens, ys_pad):\n \"\"\"Replace with target language IDs for multilingual MT during training.\n\n :param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)\n :param torch.Tensor ilens: batch of lengths of input sequences (B)\n :return: source text without language IDs\n :rtype: torch.Tensor\n :return: target text without language IDs\n :rtype: torch.Tensor\n :return: target language IDs\n :rtype: torch.Tensor (B, 1)\n \"\"\"\n tgt_lang_ids = None\n if self.replace_sos:\n # remove language ID in the beggining\n tgt_lang_ids = ys_pad[:, 0].unsqueeze(1)\n xs_pad = xs_pad[:, 1:]\n ys_pad = ys_pad[:, 1:]\n ilens -= 1\n return xs_pad, ys_pad, tgt_lang_ids\n\n def translate(self, x, trans_args, char_list, rnnlm=None):\n \"\"\"E2E beam search\n\n :param ndarray x: input source text feature (T, D)\n :param Namespace trans_args: argument Namespace containing options\n :param list char_list: list of characters\n :param torch.nn.Module rnnlm: language model module\n :return: N-best decoding results\n :rtype: list\n \"\"\"\n prev = self.training\n self.eval()\n\n # 1. encoder\n # make a utt list (1) to use the same interface for encoder\n if self.replace_sos:\n ilen = [len(x[0][1:])]\n h = to_device(self, torch.from_numpy(np.fromiter(map(int, x[0][1:]), dtype=np.int64)))\n else:\n ilen = [len(x[0])]\n h = to_device(self, torch.from_numpy(np.fromiter(map(int, x[0]), dtype=np.int64)))\n hs, _, _ = self.enc(self.dropout_emb_src(self.embed_src(h.unsqueeze(0))), ilen)\n\n # 2. decoder\n # decode the first utterance\n y = self.dec.recognize_beam(hs[0], None, trans_args, char_list, rnnlm)\n\n if prev:\n self.train()\n return y\n\n def translate_batch(self, xs, trans_args, char_list, rnnlm=None):\n \"\"\"E2E beam search\n\n :param list xs: list of input source text feature arrays [(T_1, D), (T_2, D), ...]\n :param Namespace trans_args: argument Namespace containing options\n :param list char_list: list of characters\n :param torch.nn.Module rnnlm: language model module\n :return: N-best decoding results\n :rtype: list\n \"\"\"\n prev = self.training\n self.eval()\n\n # 1. Encoder\n if self.replace_sos:\n ilens = np.fromiter((len(xx[1:]) for xx in xs), dtype=np.int64)\n hs = [to_device(self, torch.from_numpy(xx[1:])) for xx in xs]\n else:\n ilens = np.fromiter((len(xx) for xx in xs), dtype=np.int64)\n hs = [to_device(self, torch.from_numpy(xx)) for xx in xs]\n xpad = pad_list(hs, self.pad)\n hs_pad, hlens, _ = self.enc(self.dropout_emb_src(self.embed_src(xpad)), ilens)\n\n # 2. Decoder\n hlens = torch.tensor(list(map(int, hlens))) # make sure hlens is tensor\n y = self.dec.recognize_beam_batch(hs_pad, hlens, None, trans_args, char_list, rnnlm)\n\n if prev:\n self.train()\n return y\n\n def calculate_all_attentions(self, xs_pad, ilens, ys_pad):\n \"\"\"E2E attention calculation\n\n :param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)\n :param torch.Tensor ilens: batch of lengths of input sequences (B)\n :param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)\n :return: attention weights with the following shape,\n 1) multi-head case => attention weights (B, H, Lmax, Tmax),\n 2) other case => attention weights (B, Lmax, Tmax).\n :rtype: float ndarray\n \"\"\"\n with torch.no_grad():\n # 1. Encoder\n xs_pad, ys_pad, tgt_lang_ids = self.target_lang_biasing_train(xs_pad, ilens, ys_pad)\n hpad, hlens, _ = self.enc(self.dropout_emb_src(self.embed_src(xs_pad)), ilens)\n\n # 2. Decoder\n att_ws = self.dec.calculate_all_attentions(hpad, hlens, ys_pad, tgt_lang_ids=tgt_lang_ids)\n\n return att_ws\n"},"size":{"kind":"number","value":9939,"string":"9,939"}}},{"rowIdx":126433,"cells":{"max_stars_repo_path":{"kind":"string","value":"openpype/entities/models/fields.py"},"max_stars_repo_name":{"kind":"string","value":"pypeclub/openpype4-backend"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2169888"},"content":{"kind":"string","value":"\"\"\"Entity field definitions.\n\nThis module contains the top-level field definitions for the entities.\nEach field has its own column in the database.\n\nFields `id`, `created_at` and `updated_at` as well as `attrib` and `data`\nare not part of the definition, since they are added\nautomatically by ModelSet class.\n\nSee .generator.FieldDefinition model for more information on specifiing\nfield parameters.\n\"\"\"\n\nfrom openpype.types import ENTITY_ID_EXAMPLE, ENTITY_ID_REGEX, NAME_REGEX\n\nproject_fields = [\n # Name is not here, since it's added by ModelSet class\n # (it is used as a primary key)\n {\n \"name\": \"code\",\n \"type\": \"string\",\n \"regex\": NAME_REGEX,\n \"example\": \"prj\",\n \"title\": \"Project code\",\n \"required\": True,\n },\n {\n \"name\": \"library\",\n \"type\": \"boolean\",\n \"default\": False,\n },\n {\n \"name\": \"folder_types\",\n \"type\": \"dict\",\n \"default\": {},\n \"title\": \"Folder types\",\n \"example\": {\n \"Asset\": {\"icon\": \"asset\"},\n \"Shot\": {\"icon\": \"shot\"},\n \"Sequence\": {\"icon\": \"sequence\"},\n },\n },\n {\n \"name\": \"task_types\",\n \"type\": \"dict\",\n \"default\": {},\n \"title\": \"Task types\",\n \"example\": {\n \"Rigging\": {},\n \"Animation\": {},\n },\n },\n {\n \"name\": \"config\",\n \"type\": \"dict\",\n \"default\": {},\n \"title\": \"Project config\",\n },\n]\n\n\nfolder_fields = [\n {\n \"name\": \"name\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Folder name\",\n \"regex\": NAME_REGEX,\n \"example\": \"bush\",\n },\n {\n \"name\": \"folder_type\",\n \"type\": \"string\",\n \"required\": False,\n \"title\": \"Folder type\",\n \"example\": \"Asset\",\n },\n {\n \"name\": \"parent_id\",\n \"type\": \"string\",\n \"title\": \"Parent ID\",\n \"description\": \"Parent folder ID in the hierarchy\",\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n {\n \"name\": \"thumbnail_id\",\n \"type\": \"string\",\n \"title\": \"Thumbnail ID\",\n \"required\": False,\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n {\n \"name\": \"path\",\n \"type\": \"string\",\n \"title\": \"Path\",\n \"example\": \"assets/characters/st_javelin\",\n \"dynamic\": True,\n },\n]\n\n\ntask_fields = [\n {\n \"name\": \"name\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Folder ID\",\n \"regex\": NAME_REGEX,\n \"example\": \"modeling\",\n },\n {\n \"name\": \"task_type\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Task type\",\n \"example\": \"Modeling\",\n },\n {\n \"name\": \"assignees\",\n \"type\": \"list_of_strings\",\n \"title\": \"Assignees\",\n \"description\": \"List of users assigned to the task\",\n \"example\": [\"john_doe\", \"jane_doe\"],\n },\n {\n \"name\": \"folder_id\",\n \"type\": \"string\",\n \"title\": \"Folder ID\",\n \"description\": \"Folder ID\",\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n]\n\n\nsubset_fields = [\n {\n \"name\": \"name\",\n \"type\": \"string\",\n \"required\": True,\n \"description\": \"Name of the subset\",\n \"regex\": NAME_REGEX,\n \"example\": \"modelMain\",\n },\n {\n \"name\": \"folder_id\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Folder ID\",\n \"description\": \"ID of the parent folder\",\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n {\n \"name\": \"family\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Family\",\n \"description\": \"Subset family\",\n \"regex\": NAME_REGEX,\n \"example\": \"model\",\n },\n]\n\n\nversion_fields = [\n {\n \"name\": \"version\",\n \"type\": \"integer\",\n \"required\": True,\n \"title\": \"Version\",\n \"description\": \"Version number\",\n \"example\": 1,\n },\n {\n \"name\": \"subset_id\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Subset ID\",\n \"description\": \"ID of the parent subset\",\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n {\n \"name\": \"task_id\",\n \"type\": \"string\",\n \"required\": False,\n \"title\": \"Task ID\",\n \"description\": \"\",\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n {\n \"name\": \"thumbnail_id\",\n \"type\": \"string\",\n \"title\": \"Thumbnail ID\",\n \"required\": False,\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n {\n \"name\": \"author\",\n \"type\": \"string\",\n \"regex\": NAME_REGEX,\n \"example\": \"john_doe\",\n },\n]\n\n\nrepresentation_fields = [\n {\n \"name\": \"name\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Name\",\n \"description\": \"The name of the representation\",\n \"example\": \"ma\",\n \"regex\": NAME_REGEX,\n },\n {\n \"name\": \"version_id\",\n \"type\": \"string\",\n \"required\": True,\n \"title\": \"Version ID\",\n \"description\": \"ID of the parent version\",\n \"regex\": ENTITY_ID_REGEX,\n \"example\": ENTITY_ID_EXAMPLE,\n },\n]\n"},"size":{"kind":"number","value":5386,"string":"5,386"}}},{"rowIdx":126434,"cells":{"max_stars_repo_path":{"kind":"string","value":"dev/src/qtl/residualize.py"},"max_stars_repo_name":{"kind":"string","value":"iamjli/AnswerALS_QTL"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169898"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom src import logger, base_dir\n\n\nclass Residualizer(object):\n\t\"\"\"\n\tBased on `tensorqtl.core.Residualizer` but added support for dataframes.\n\t\"\"\"\n\tdef __init__(self, C):\n\t\t\"\"\"\n\t\tC: samples x covariates\n\t\t\"\"\"\n\t\tself.C = C\n\t\tif isinstance(C, pd.DataFrame): \n\t\t\tif not C.index.str.startswith(\"NEU\").all(): \n\t\t\t\tlogger.write(\"Warning: check that input is indexed by sample name\")\n\t\t\tC_t = torch.tensor(C.values, dtype=torch.float32).to(\"cpu\") \n\t\telif isinstance(C, torch.Tensor): \n\t\t\tC_t = C\n\t\telse: \n\t\t\tlogger.write(\"Must provide as dataframe or tensor.\")\n\n\t\t# center and orthogonalize\n\t\tself.Q_t, _ = torch.qr(C_t - C_t.mean(0))\n\t\tself.dof = C_t.shape[0] - 2 - C_t.shape[1]\n\n\t\tself.n_samples = self.C.shape[0]\n\n\t@classmethod\n\tdef load_covariates(cls, path=None, prefix=None): \n\t\tif path is None: \n\t\t\tpath = base_dir / \"tensorqtl_runs/_phenotypes\" / f\"{prefix}_gtex.PEER_10.condition_sex.PEER_covariates.txt\"\n\t\tcovariates_df = pd.read_csv(path, sep=\"\\t\", index_col=0)\n\t\treturn cls(covariates_df.T)\n\n\tdef transform(self, M, center=True):\n\t\t\"\"\"Residualize rows of M wrt columns of C. Does not necessarily need to be normalized.\"\"\"\n\n\t\tif isinstance(M, pd.DataFrame): \n\t\t\tinput_format = \"dataframe\"\n\t\t\tM_t = torch.tensor(M.values, dtype=torch.float).to(\"cpu\")\n\t\telif isinstance(M, np.ndarray): \n\t\t\tinput_format = \"array\"\n\n\t\t\tM_input_shape = M.shape\n\t\t\tif len(M_input_shape) > 2: \n\t\t\t\tassert M_input_shape[-1] == self.n_samples # check that last axis in M corresponds to samples\n\t\t\t\tM = M.reshape((-1, self.n_samples)) # stack along the first axes\n\n\t\t\tM_t = torch.tensor(M, dtype=torch.float).to(\"cpu\")\n\t\telse: \n\t\t\tinput_format = \"tensor\"\n\t\t\tM_t = M\n\n\t\t# center row means\n\t\tM0_t = M_t - M_t.mean(1, keepdim=True) if center else M_t\n\n\t\t# the second term is the components of M that are explainable by Q. First projects into covariate space, then projects back\n\t\t# Note that normally the projection back would be Q_inverse, but because it's orthonormal, that's equal to Q^T\n\t\tM_t_transformed = M_t - torch.mm(torch.mm(M0_t, self.Q_t), self.Q_t.t()) # keep original mean\n\n\t\tif input_format == \"dataframe\": \n\t\t\treturn pd.DataFrame(M_t_transformed.numpy(), index=M.index, columns=M.columns)\n\t\telif input_format == \"array\": \n\t\t\tM_t_transformed = M_t_transformed.numpy()\n\t\t\t# return M_t_transformed\n\t\t\tif len(M_input_shape) > 2: \n\t\t\t\tM_t_transformed = M_t_transformed.reshape(M_input_shape)\n\t\t\treturn M_t_transformed\n\t\telse: \n\t\t\treturn M_t_transformed"},"size":{"kind":"number","value":2515,"string":"2,515"}}},{"rowIdx":126435,"cells":{"max_stars_repo_path":{"kind":"string","value":"qap-lp/main.py"},"max_stars_repo_name":{"kind":"string","value":"j-kota/LP-QAP"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168912"},"content":{"kind":"string","value":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\nimport numpy as np\r\nimport os\r\n# import dependencies\r\nfrom data_generator import Generator\r\nfrom aligned_data_generator import Aligned_Generator\r\nfrom model import Siamese_GNN, Siamese_Matcher, GNN_Matcher\r\nfrom Logger import Logger\r\nimport time\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nfrom matplotlib import pyplot as plt\r\n\r\n#Pytorch requirements\r\nimport unicodedata\r\nimport string\r\nimport re\r\nimport random\r\nimport argparse\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn import init\r\nfrom torch.autograd import Variable\r\nfrom torch import optim\r\nimport torch.nn.functional as F\r\nfrom MatchingLayer import MatchingLayer\r\nimport pickle\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\n###############################################################################\r\n# General Settings #\r\n###############################################################################\r\n\r\nparser.add_argument('--lr', nargs='?', const=1, type=float,\r\n default=1e-3)\r\nparser.add_argument('--quad_reg', nargs='?', const=1, type=float,\r\n default=1e-4)\r\nparser.add_argument('--align', nargs='?', const=1, type=int,\r\n default=0)\r\nparser.add_argument('--num_examples_train', nargs='?', const=1, type=int,\r\n default=int(20000))\r\nparser.add_argument('--num_examples_test', nargs='?', const=1, type=int,\r\n default=int(1000))\r\nparser.add_argument('--edge_density', nargs='?', const=1, type=float,\r\n default=0.2)\r\nparser.add_argument('--random_noise', action='store_true')\r\nparser.add_argument('--noise', nargs='?', const=1, type=float, default=0.03)\r\nparser.add_argument('--noise_model', nargs='?', const=1, type=int, default=2)\r\nparser.add_argument('--generative_model', nargs='?', const=1, type=str,\r\n default='ErdosRenyi')\r\nparser.add_argument('--iterations', nargs='?', const=1, type=int,\r\n default=int(20000))\r\nparser.add_argument('--batch_size', nargs='?', const=1, type=int, default=1)\r\nparser.add_argument('--mode', nargs='?', const=1, type=str, default='train')\r\nparser.add_argument('--path_dataset', nargs='?', const=1, type=str, default='')\r\nparser.add_argument('--path_logger', nargs='?', const=1, type=str, default='')\r\nparser.add_argument('--print_freq', nargs='?', const=1, type=int, default=100)\r\nparser.add_argument('--test_freq', nargs='?', const=1, type=int, default=500)\r\nparser.add_argument('--save_freq', nargs='?', const=1, type=int, default=2000)\r\nparser.add_argument('--clip_grad_norm', nargs='?', const=1, type=float,\r\n default=40.0)\r\n\r\n###############################################################################\r\n# GNN Settings #\r\n###############################################################################\r\n\r\nparser.add_argument('--num_features', nargs='?', const=1, type=int,\r\n default=20)\r\nparser.add_argument('--num_layers', nargs='?', const=1, type=int,\r\n default=20)\r\nparser.add_argument('--J', nargs='?', const=1, type=int, default=4)\r\n\r\nargs = parser.parse_args()\r\n\r\nif torch.cuda.is_available():\r\n dtype = torch.cuda.FloatTensor\r\n dtype_l = torch.cuda.LongTensor\r\n torch.cuda.manual_seed(0)\r\nelse:\r\n dtype = torch.FloatTensor\r\n dtype_l = torch.LongTensor\r\n torch.manual_seed(0)\r\n\r\nbatch_size = args.batch_size\r\ncriterion = nn.CrossEntropyLoss()\r\ntemplate1 = '{:<10} {:<10} {:<10} {:<15} {:<10} {:<10} {:<10} '\r\ntemplate2 = '{:<10} {:<10.5f} {:<10.5f} {:<15} {:<10} {:<10} {:<10.3f} \\n'\r\n\r\ndef compute_loss(pred, labels):\r\n pred = pred.view(-1, pred.size()[-1])\r\n labels = labels.view(-1)\r\n return criterion(pred, labels)\r\n\r\n#def train(siamese_gnn, logger, gen):\r\ndef train(siamese_gnn, logger, gen):\r\n labels = (Variable(torch.arange(0, gen.N).unsqueeze(0).expand(batch_size,\r\n gen.N)).type(dtype_l))\r\n #labels have this format\r\n #tensor([[0, 1, 2],\r\n # [0, 1, 2]]) batch size 2 - JK\r\n optimizer = torch.optim.Adamax(siamese_gnn.parameters(), lr=args.lr) #1e-3)\r\n for it in range(args.iterations):\r\n start = time.time()\r\n input = gen.sample_batch(batch_size, cuda=torch.cuda.is_available())\r\n # 'input' from sample_batch return has this form:\r\n # [WW, X], [WW_noise, X_noise] where WW, X, WW_noise, X_noise are all batches (tens)\r\n #temp\r\n # pickle.dump( input, open('pickle_input.p','wb') )\r\n # quit(\"Pickle saved\")\r\n #\r\n\r\n pred = siamese_gnn(*input)\r\n #? what form do the predictions have?\r\n #pred =\r\n #tensor([[[22.6977, 0.0827, 0.1377, ..., 8.2557, 8.9004, 4.8111],\r\n # [ 2.0629, 22.5110, 5.8868, ..., 2.8347, 7.7718, 14.2134],\r\n # [-1.5174, 10.6303, 14.9685, ..., 12.7365, 8.1801, 7.6513],\r\n # ...,\r\n # [ 8.6330, 5.8210, 7.1014, ..., 15.7287, 9.9970, 6.4952],\r\n # [ 5.1720, 8.9540, 12.8027, ..., 6.6614, 12.6969, 10.2977],\r\n # [ 2.9169, 8.3249, -0.3027, ..., 8.4153, 5.3743, 14.3038]]],\r\n # device='cuda:0', grad_fn=)\r\n #pred[0] =\r\n #tensor([[22.6977, 0.0827, 0.1377, ..., 8.2557, 8.9004, 4.8111],\r\n # [ 2.0629, 22.5110, 5.8868, ..., 2.8347, 7.7718, 14.2134],\r\n # [-1.5174, 10.6303, 14.9685, ..., 12.7365, 8.1801, 7.6513],\r\n # ...,\r\n # [ 8.6330, 5.8210, 7.1014, ..., 15.7287, 9.9970, 6.4952],\r\n # [ 5.1720, 8.9540, 12.8027, ..., 6.6614, 12.6969, 10.2977],\r\n # [ 2.9169, 8.3249, -0.3027, ..., 8.4153, 5.3743, 14.3038]],\r\n # device='cuda:0', grad_fn=)\r\n #pred.size() =\r\n #torch.Size([1, 50, 50])\r\n #gen.N =\r\n #50\r\n \"\"\"\r\n print(\"pred = \")\r\n print( pred )\r\n print(\"pred[0] = \")\r\n print( pred[0] )\r\n print(\"pred.size() = \")\r\n print( pred.size() )\r\n print(\"gen.N = \")\r\n print( gen.N )\r\n \"\"\"\r\n if it%100 == 0:\r\n print(\"Solved iteration {}\".format(it))\r\n #\r\n loss = compute_loss(pred, labels)\r\n siamese_gnn.zero_grad()\r\n loss.backward()\r\n #nn.utils.clip_grad_norm(siamese_gnn.parameters(), args.clip_grad_norm) JK: deprecated\r\n nn.utils.clip_grad_norm_(siamese_gnn.parameters(), args.clip_grad_norm)\r\n optimizer.step()\r\n logger.add_train_loss(loss)\r\n logger.add_train_accuracy(pred, labels)\r\n elapsed = time.time() - start\r\n if it % logger.args['print_freq'] == 0:\r\n logger.plot_train_accuracy()\r\n logger.plot_train_loss()\r\n loss = loss.data.cpu().numpy()#[0]\r\n info = ['iteration', 'loss', 'accuracy', 'edge_density',\r\n 'noise', 'model', 'elapsed']\r\n out = [it, loss.item(), logger.accuracy_train[-1].item(), args.edge_density,\r\n args.noise, args.generative_model, elapsed]\r\n print(template1.format(*info))\r\n print(template2.format(*out))\r\n # test(siamese_gnn, logger, gen)\r\n if it % logger.args['save_freq'] == 0:\r\n logger.save_model(siamese_gnn)\r\n logger.save_results()\r\n print('Optimization finished.')\r\n\r\nif __name__ == '__main__':\r\n logger = Logger(args.path_logger)\r\n logger.write_settings(args)\r\n #siamese_gnn = Siamese_GNN(args.num_features, args.num_layers, args.J + 2)\r\n\r\n align = args.align\r\n print('align = ', align)\r\n if align:\r\n print('Using {} '.format('Aligned_Generator'))\r\n gen = Aligned_Generator(args.path_dataset)\r\n else:\r\n print('Using {} '.format('Generator'))\r\n gen = Generator(args.path_dataset)\r\n\r\n # generator setup\r\n gen.num_examples_train = args.num_examples_train\r\n gen.num_examples_test = args.num_examples_test\r\n gen.J = args.J\r\n gen.edge_density = args.edge_density\r\n gen.random_noise = args.random_noise\r\n gen.noise = args.noise\r\n gen.noise_model = args.noise_model\r\n gen.generative_model = args.generative_model\r\n # load dataset\r\n # print(gen.random_noise)\r\n gen.load_dataset()\r\n\r\n matching_layer = MatchingLayer(nNodes=gen.N, eps=args.quad_reg) # eps=1e-4)\r\n\r\n if align:\r\n siamese_gnn = GNN_Matcher(args.num_features, args.num_layers, args.J + 2, matching_layer = matching_layer)\r\n\r\n else:\r\n siamese_gnn = Siamese_Matcher(args.num_features, args.num_layers, args.J + 2, matching_layer = matching_layer)\r\n\r\n\r\n\r\n if torch.cuda.is_available():\r\n siamese_gnn.cuda()\r\n\r\n if args.mode == 'train':\r\n train(siamese_gnn, logger, gen)\r\n # elif args.mode == 'test':\r\n # test(siamese_gnn, logger, gen)\r\n"},"size":{"kind":"number","value":8977,"string":"8,977"}}},{"rowIdx":126436,"cells":{"max_stars_repo_path":{"kind":"string","value":"poll/urls.py"},"max_stars_repo_name":{"kind":"string","value":"vishalpandeyvip/GURU--an-online-class-portal"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170059"},"content":{"kind":"string","value":"from django.urls import path\nfrom .views import *\nurlpatterns = [\n\tpath('/',polls,name=\"polls\"),\n\tpath('//',poll_page,name=\"poll_page\"),\n\tpath('/voting//',voting,name=\"voting\"),\n\tpath('//',delete_poll,name=\"delete_poll\"),\n]"},"size":{"kind":"number","value":309,"string":"309"}}},{"rowIdx":126437,"cells":{"max_stars_repo_path":{"kind":"string","value":"Caesar Cypher interface.py"},"max_stars_repo_name":{"kind":"string","value":"JatsuLC/Caesar-Cypher-Bruteforce"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170308"},"content":{"kind":"string","value":"import tkinter as tk\r\nfrom tkinter.constants import ANCHOR, BOTTOM, INSERT, LEFT, RIGHT, TOP, X\r\nimport caesar_cypher_functions as cc\r\n\r\ngui = tk.Tk()\r\n\r\nscreen_width = gui.winfo_screenwidth()\r\nprint(screen_width)\r\nscreen_height = gui.winfo_screenheight()\r\nprint(screen_height)\r\ngui.geometry(str(int(screen_width/2)) + \"x\" + str(int(screen_height/2)))\r\nuserInput = \"\"\r\ngui.title('Caesar Cypher Bruteforcer')\r\ne0 = tk.Label(gui, text='Message to Bruteforce').pack(side=TOP)\r\n\r\n\r\ne1 = tk.Entry(gui, width=100)\r\ne1.pack(side=TOP)\r\nmesRes = \"\"\r\n\r\ndef beginOp():\r\n userInput = e1.get()\r\n global mesRes\r\n mesRes = cc.bruteforce(userInput)\r\n refresh()\r\n\r\ndef refresh():\r\n tk.Message(gui, text = mesRes, width=screen_width).pack(side=TOP)\r\n print(gui.pack_slaves())\r\n\r\ndef rmMes():\r\n list = gui.pack_slaves()\r\n print(list)\r\n for x in range(4):\r\n print(\"Popping \" + str(list[0]))\r\n list.pop(0)\r\n print(\"Pop successful\")\r\n for x in list:\r\n print(\"Destroying \" + str(x))\r\n x.destroy()\r\n print(\"Destroy Successeful\")\r\n \r\n \r\n\r\ntk.Button(gui, text =\"BruteForce\", command = beginOp).pack(side=TOP)\r\ntk.Button(gui, text =\"Delete decrypted messages\", command = rmMes).pack(side=TOP)\r\n\r\ngui.mainloop()"},"size":{"kind":"number","value":1261,"string":"1,261"}}},{"rowIdx":126438,"cells":{"max_stars_repo_path":{"kind":"string","value":"PolTools/utils/build_counts_dict.py"},"max_stars_repo_name":{"kind":"string","value":"GeoffSCollins/PolTools"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2165596"},"content":{"kind":"string","value":"import os\n\nfrom collections import defaultdict\n\nfrom PolTools.utils.constants import hg38_chrom_sizes_random_file\nfrom PolTools.utils.make_random_filename import generate_random_filename\nfrom PolTools.utils.remove_files import remove_files\n\ndef build_counts_dict(sequencing_filename, read_type):\n \"\"\"\n Builds a dictionary for the 5'/3'/pileup reads. Dictionaries can be accessed like so:\n five_prime_counts_dict[chromosome][strand][five_prime_position], where five_prime_position is an integer\n\n :param sequencing_filename: filename of the sequencing data collected\n :type sequencing_filename: str\n :return: a dictionary containing the counts at a genomic location\n :rtype: dict\n \"\"\"\n\n counts_dict = {}\n\n if read_type not in [\"five\", \"three\", \"whole\"]:\n raise ValueError(\"Read type must be either five, three, or whole\")\n\n if read_type == 'whole':\n # Use bedtools genome coverage because it is much faster\n fw_bedgraph = generate_random_filename(\".bedGraph\")\n rv_bedgraph = generate_random_filename(\".bedGraph\")\n\n os.system(\"bedtools genomecov -i \" + sequencing_filename + \" -g \" + hg38_chrom_sizes_random_file + \\\n \" -bg -strand + > \" + fw_bedgraph)\n\n os.system(\"bedtools genomecov -i \" + sequencing_filename + \" -g \" + hg38_chrom_sizes_random_file + \\\n \" -bg -strand - > \" + rv_bedgraph)\n\n with open(fw_bedgraph) as file:\n for line in file:\n chromosome, left, right, counts = line.split()\n\n left, right = int(left), int(right)\n\n if chromosome not in counts_dict:\n counts_dict[chromosome] = {\n \"+\": defaultdict(int),\n \"-\": defaultdict(int)\n }\n\n try:\n if 'e' in counts:\n base, exponent = counts.split(\"e\")\n base, exponent = float(base), int(exponent)\n\n counts = base * (10 ** exponent)\n\n except:\n pass\n\n for position in range(left, right):\n counts_dict[chromosome][\"+\"][position] += int(counts)\n\n with open(rv_bedgraph) as file:\n for line in file:\n chromosome, left, right, counts = line.split()\n\n left, right = int(left), int(right)\n\n if chromosome not in counts_dict:\n counts_dict[chromosome] = {\n \"+\": defaultdict(int),\n \"-\": defaultdict(int)\n }\n\n try:\n if 'e' in counts:\n base, exponent = counts.split(\"e\")\n base, exponent = float(base), int(exponent)\n\n counts = base * (10 ** exponent)\n\n except:\n pass\n\n for position in range(left, right):\n counts_dict[chromosome][\"-\"][position] += int(counts)\n\n remove_files(fw_bedgraph, rv_bedgraph)\n\n return counts_dict\n\n\n with open(sequencing_filename) as file:\n for i, line in enumerate(file):\n chromosome, left, right, _, _, strand = line.rstrip().split()\n\n left = int(left)\n right = int(right)\n\n if chromosome not in counts_dict:\n counts_dict[chromosome] = {\n \"+\": defaultdict(int),\n \"-\": defaultdict(int)\n }\n\n if read_type == \"five\":\n if strand == \"+\":\n position = left\n else:\n position = right - 1\n\n elif read_type == \"three\":\n if strand == \"+\":\n position = right - 1\n else:\n position = left\n\n counts_dict[chromosome][strand][position] += 1\n\n return counts_dict\n"},"size":{"kind":"number","value":3963,"string":"3,963"}}},{"rowIdx":126439,"cells":{"max_stars_repo_path":{"kind":"string","value":"Hanghae_week1/url_date_actorlist_db_save.py"},"max_stars_repo_name":{"kind":"string","value":"endol007/TIL"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170201"},"content":{"kind":"string","value":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\ndb = client.team9TestOne\n\n\n# 모든 영화 url을 가져온다 -> 그다음 url을 detail url로 변경한다\n# actorlist 저장 코드\nmovie_list = list(db.movies.find({}, {'_id': False}))\nfor movie in movie_list:\n title = movie['title']\n url = movie['url']\n url = url.replace('basic', 'detail')\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\n data = requests.get(url, headers=headers)\n soup = BeautifulSoup(data.text, 'html.parser')\n\n # detail url을 통해서 영화배우들 리스트를 가져온다\n actor_list = soup.select('#content > div.article > div.section_group.section_group_frst > div.obj_section.noline > div > div.lst_people_area.height100 > ul > li')\n\n ac_second_list = []\n for actor in actor_list:\n ac = actor.select_one(' div.p_info > a').text\n ac_second_list.append(ac)\n\n db.movies.update_one({'title': title}, {'$set': {'actor_list': ac_second_list}})"},"size":{"kind":"number","value":1069,"string":"1,069"}}},{"rowIdx":126440,"cells":{"max_stars_repo_path":{"kind":"string","value":"verification_rules/common/rule_parameter.py"},"max_stars_repo_name":{"kind":"string","value":"adrianmkng/watchmen"},"max_stars_count":{"kind":"number","value":190,"string":"190"},"id":{"kind":"string","value":"2168798"},"content":{"kind":"string","value":"# Copyright 2017 Insurance Australia Group Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nItems relating to AWS Config rule parameters.\n\"\"\"\nimport json\n\nclass RuleParameter(object):\n \"\"\"Class relating to the AWS Config rule parameters.\n\n Args:\n event: AWS event payload from AWS Config rule.\n \"\"\"\n def __init__(self, event):\n if \"ruleParameters\" in event:\n self._rule_parameters = json.loads(event[\"ruleParameters\"])\n else:\n self._rule_parameters = None\n\n def get(self, key, default=None):\n \"\"\"Retrieves the value of the specified key.\"\"\"\n return default if self._rule_parameters is None else self._rule_parameters.get(key, default)\n"},"size":{"kind":"number","value":1216,"string":"1,216"}}},{"rowIdx":126441,"cells":{"max_stars_repo_path":{"kind":"string","value":"jsonrpc11base/service_description.py"},"max_stars_repo_name":{"kind":"string","value":"kbaseIncubator/kbase-jsonrpc11base"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169588"},"content":{"kind":"string","value":"class ServiceDescription(object):\n def __init__(self, name, id, version=None, summary=None):\n self.name = name\n self.id = id\n self.version = version\n self.summary = summary\n\n def to_json(self):\n data = {\n 'sdversion': '1.0',\n 'name': self.name,\n 'id': self.id\n }\n if (self.version is not None):\n data['version'] = self.version\n\n if (self.summary is not None):\n data['summary'] = self.summary\n\n return data\n"},"size":{"kind":"number","value":530,"string":"530"}}},{"rowIdx":126442,"cells":{"max_stars_repo_path":{"kind":"string","value":"where_to_go/places/migrations/0009_alter_image_place.py"},"max_stars_repo_name":{"kind":"string","value":"delphython/where-to-go"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170140"},"content":{"kind":"string","value":"# Generated by Django 3.2.10 on 2021-12-28 08:38\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('places', '0008_auto_20211228_0957'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='image',\n name='place',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='places', to='places.place', verbose_name='Куда пойти'),\n ),\n ]\n"},"size":{"kind":"number","value":517,"string":"517"}}},{"rowIdx":126443,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_temp_sensors.py"},"max_stars_repo_name":{"kind":"string","value":"cptpcrd/pypsutil"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2169965"},"content":{"kind":"string","value":"# pylint: disable=no-member\nimport pathlib\nimport shutil\n\nimport pypsutil\n\nfrom .util import linux_only, populate_directory, replace_info_directories\n\n\n@linux_only\ndef test_sensors_temperature(tmp_path: pathlib.Path) -> None:\n populate_directory(\n str(tmp_path),\n {\n \"class\": {\n \"hwmon\": {\n \"hwmon0\": {\n \"name\": \"acpi\",\n \"temp1_input\": \"50000\\n\",\n \"temp2_label\": \"BAD\\n\",\n },\n \"hwmon1\": {\n \"name\": \"acpi2\",\n },\n \"hwmon2\": {\n \"name\": \"coretemp\",\n \"temp1_input\": \"60000\\n\",\n \"temp1_max\": \"100000\\n\",\n \"temp1_crit\": \"100000\\n\",\n \"temp2_input\": \"90000\\n\",\n \"temp2_label\": \"Chassis\\n\",\n },\n \"hwmon3\": {\n \"name\": \"coretemp2\",\n \"temp1_input\": \"60000\",\n \"temp1_max\": \"100000\",\n \"temp1_crit\": \"100000\",\n \"temp2_input\": \"90000\",\n \"temp2_label\": \"Chassis\",\n },\n },\n }\n },\n )\n\n with replace_info_directories(sysfs=str(tmp_path)):\n assert pypsutil.sensors_temperatures() == { # type: ignore\n \"acpi\": [\n pypsutil.TempSensorInfo( # type: ignore\n label=\"\", current=50, high=None, critical=None\n ),\n ],\n \"coretemp\": [\n pypsutil.TempSensorInfo( # type: ignore\n label=\"\", current=60, high=100, critical=100\n ),\n pypsutil.TempSensorInfo( # type: ignore\n label=\"Chassis\", current=90, high=None, critical=None\n ),\n ],\n \"coretemp2\": [\n pypsutil.TempSensorInfo( # type: ignore\n label=\"\", current=60, high=100, critical=100\n ),\n pypsutil.TempSensorInfo( # type: ignore\n label=\"Chassis\", current=90, high=None, critical=None\n ),\n ],\n }\n\n shutil.rmtree(tmp_path / \"class\")\n\n with replace_info_directories(sysfs=str(tmp_path)):\n assert pypsutil.sensors_temperatures() == {} # type: ignore\n\n\n@linux_only\ndef test_temp_sensor_fahrenheit() -> None:\n sensor_a = pypsutil.TempSensorInfo( # type: ignore\n label=\"sensor_a\", current=0, high=100, critical=100\n )\n assert sensor_a.current_fahrenheit == 32\n assert sensor_a.high_fahrenheit == 212\n assert sensor_a.critical_fahrenheit == 212\n\n sensor_b = pypsutil.TempSensorInfo( # type: ignore\n label=\"sensor_b\", current=100, high=None, critical=None\n )\n assert sensor_b.current_fahrenheit == 212\n assert sensor_b.high_fahrenheit is None\n assert sensor_b.critical_fahrenheit is None\n"},"size":{"kind":"number","value":3087,"string":"3,087"}}},{"rowIdx":126444,"cells":{"max_stars_repo_path":{"kind":"string","value":"hell.py"},"max_stars_repo_name":{"kind":"string","value":"squisher/stella"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"id":{"kind":"string","value":"2169500"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom llvm import *\nfrom llvm.core import *\nfrom llvm.ee import *\n\nimport logging\nimport ctypes\nimport cffi\n\ndef playground(point):\n my_module = Module.new('my_module')\n tp_int = Type.int(64)\n tp_idx = Type.int()\n tp_struct = Type.struct([tp_int, tp_int, tp_int, tp_int], name='test_struct')\n #tp_func = Type.function(Type.pointer(tp_struct), [])\n tp_func = Type.function(tp_int, [])\n f_sum = my_module.add_function(tp_func, \"sum\")\n bb = f_sum.append_basic_block(\"entry\")\n builder = Builder.new(bb)\n\n addr = ctypes.addressof(point)\n addr_llvm = Constant.int(tp_int, int(addr))\n struct = builder.inttoptr(addr_llvm, Type.pointer(tp_struct))\n #print(str(struct))\n\n ione = Constant.int(tp_idx, 1)\n izero = Constant.int(tp_idx, 0)\n one = Constant.int(tp_int, 1)\n\n p = builder.gep(struct, [izero, izero])\n tmp = builder.load(p)\n res = builder.add(tmp, one)\n builder.store(res, p)\n\n p = builder.gep(struct, [izero, ione])\n tmp = builder.load(p)\n res = builder.add(tmp, one)\n builder.store(res, p)\n\n #p = builder.gep(struct, [Constant.int(tp_idx, 0), Constant.int(tp_idx, 0)])\n #tmp3 = builder.load(p)\n\n #builder.ret(struct)\n builder.ret(res)\n #builder.ret(tmp3)\n\n print(str(my_module))\n eb = EngineBuilder.new(my_module)\n eb.mcjit(True)\n ee = eb.create()\n\n retval = ee.run_function(f_sum, [])\n\n #return retval.as_pointer()\n return retval.as_int()\n\nclass Point(ctypes.Structure):\n _fields_ = [\n ('x', ctypes.c_int64),\n ('y', ctypes.c_int64),\n ('z', ctypes.c_int64),\n ('r', ctypes.c_int64),\n ]\n\nif __name__ == '__main__':\n point = Point(1,2,3,4)\n p = playground(point)\n print(\"Returned: \" + str(p))\n print(point, point.x, point.y, point.z, point.r)\n\n #cast_p = ctypes.cast(p, ctypes.POINTER(Point))\n #print(\"ctypes:\", cast_p[0], cast_p[0].x, cast_p[0].y, cast_p[0].z, cast_p[0].r)\n"},"size":{"kind":"number","value":1955,"string":"1,955"}}},{"rowIdx":126445,"cells":{"max_stars_repo_path":{"kind":"string","value":"Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/DIFFERENT/PERMUTATION/03_bonus_factorial.py"},"max_stars_repo_name":{"kind":"string","value":"okara83/Becoming-a-Data-Scientist"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168668"},"content":{"kind":"string","value":"number = int(input(\"Enter a number to calculate factoriyel : \"))\n\ndef factorial(number):\n fact = 1\n for i in range(1,number+1):\n fact *= i\n return fact\n\nprint(number, \" factorial is \",factorial(number))"},"size":{"kind":"number","value":215,"string":"215"}}},{"rowIdx":126446,"cells":{"max_stars_repo_path":{"kind":"string","value":"hiicart/tests/paypal.py"},"max_stars_repo_name":{"kind":"string","value":"hiidef/hiicart"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170295"},"content":{"kind":"string","value":"import base\n\nfrom datetime import datetime, date, timedelta\nfrom decimal import Decimal\nfrom django.conf import settings\n\nfrom hiicart.models import HiiCart, LineItem, RecurringLineItem\n\nclass PaypalTestCase(base.HiiCartTestCase):\n \"\"\"Paypal related tests\"\"\"\n pass\n\nclass PaypalIpnTestCase(base.HiiCartTestCase):\n \"\"\"Tests of the PaypalIPN.\"\"\"\n pass\n"},"size":{"kind":"number","value":362,"string":"362"}}},{"rowIdx":126447,"cells":{"max_stars_repo_path":{"kind":"string","value":"spiders/suumo.py"},"max_stars_repo_name":{"kind":"string","value":"montenoki/suumo_crawler"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168598"},"content":{"kind":"string","value":"import logging\n\nimport scrapy\nfrom scrapy import Selector\n\nfrom suumo_crawler.items import SuumoItem\n\n\nclass SuumoSpider(scrapy.Spider):\n name = \"suumo\"\n allowed_domains = [\"suumo.jp\"]\n home_url = \"https://suumo.jp\"\n page_url = \"&page=\"\n exps = {\n \"next_page\": r\"//*/p[@class='pagination-parts']\",\n \"total\": r\"//*[@class='paginate_set-hit']//text()\",\n \"detail\": r\"//*[@class='property_inner-title']\",\n \"rent\": r\"//*[@class='property_view_main-emphasis']//text()\",\n \"rent_info\": r\"//*[@class='property_data-body']\",\n \"access_info\": r\"//*[@class='property_view_detail-text']\",\n \"equipments\": r\"//*[@class='inline_list']/li/text()\",\n \"building_info\": r\"//*[@class='data_table table_gaiyou']//td/text()\",\n \"other_info\": r\"//*[@class='data_table table_gaiyou']//ul\",\n }\n\n def __init__(self, *args, **kwargs):\n super(SuumoSpider, self).__init__(*args, **kwargs)\n self.start_urls = [kwargs.get(\"start_url\")]\n self.output_filename = kwargs.get(\"output_filename\")\n self.output_path = kwargs.get(\"output_path\")\n\n def parse(self, response):\n seletor = Selector(response)\n\n # 次へ\n page_bottom = seletor.xpath(self.exps[\"next_page\"])\n if page_bottom:\n for element in page_bottom:\n element_text = element.xpath(\"a/text()\").extract_first()\n if element_text == \"次へ\":\n url = element.xpath(\"a/@href\").extract_first()\n yield scrapy.Request(self.home_url + url,\n callback=self.parse)\n\n # 詳細ページ\n detail = seletor.xpath(self.exps[\"detail\"])\n if detail:\n for element in detail:\n url = element.xpath(\"a/@href\").extract_first()\n title = element.xpath(\"a/text()\").extract_first()\n yield scrapy.Request(\n url=self.home_url + url,\n callback=lambda response, title=title: self.parseDetail(\n response, title),\n )\n\n def parseDetail(self, response, title):\n selector = Selector(response)\n item = SuumoItem()\n if \"https://suumo.jp/chintai/bc_\" in response.url: # Not 404 page\n item[\"suumo_id\"] = response.url.replace(\n \"https://suumo.jp/chintai/bc_\", \"\").replace(\"/\", \"\")\n item[\"title\"] = title\n item[\"rent\"] = selector.xpath(self.exps[\"rent\"]).extract_first()\n rent_infos = selector.xpath(self.exps[\"rent_info\"])\n if len(rent_infos) == 9:\n item[\"admin\"] = rent_infos[0].xpath(\n \"string(.)\").extract_first()\n deposit_gratuity = rent_infos[1].xpath(\n \"string(.)\").extract_first().split(\" / \")\n item[\"deposit\"] = deposit_gratuity[0]\n item[\"gratuity\"] = deposit_gratuity[1]\n item[\"security\"] = rent_infos[2].xpath(\n \"string(.)\").extract_first()\n item[\"restoration_amortisation\"] = rent_infos[3].xpath(\n \"string(.)\").extract_first()\n item[\"rooms\"] = rent_infos[4].xpath(\n \"string(.)\").extract_first()\n item[\"area\"] = rent_infos[5].xpath(\"string(.)\").extract_first()\n item[\"direction\"] = rent_infos[6].xpath(\n \"string(.)\").extract_first()\n item[\"building_type\"] = rent_infos[7].xpath(\n \"string(.)\").extract_first()\n item[\"age\"] = rent_infos[8].xpath(\"string(.)\").extract_first()\n access_infos = selector.xpath(self.exps[\"access_info\"])\n item[\"access1\"] = None\n item[\"access2\"] = None\n item[\"access3\"] = None\n if access_infos:\n item[\"address\"] = access_infos[-1].xpath(\n \"string(.)\").extract_first()\n del access_infos[-1]\n if len(access_infos) > 0:\n item[\"access1\"] = access_infos[0].xpath(\n \"string(.)\").extract_first()\n del access_infos[0]\n if len(access_infos) > 0:\n item[\"access2\"] = access_infos[0].xpath(\n \"string(.)\").extract_first()\n del access_infos[0]\n if len(access_infos) > 0:\n item[\"access3\"] = access_infos[0].xpath(\n \"string(.)\").extract_first()\n del access_infos[0]\n equipments = selector.xpath(\n self.exps[\"equipments\"]).extract_first()\n if equipments:\n item[\"equipments\"] = equipments\n building_info = selector.xpath(\n self.exps[\"building_info\"]).extract()\n if building_info:\n item[\"layout_detail\"] = building_info[0]\n item[\"structure\"] = building_info[1]\n item[\"building_height\"] = building_info[2]\n item[\"completion_date\"] = building_info[3]\n item[\"insurance\"] = building_info[4]\n item[\"park\"] = building_info[5]\n item[\"movein_date\"] = building_info[6]\n item[\"transaction_situation\"] = building_info[7]\n item[\"condition\"] = building_info[8]\n item[\"store_code\"] = building_info[9]\n assert item[\"suumo_id\"] == building_info[10], \"suumo_id error\"\n item[\"total_units\"] = building_info[11]\n # other_info = selector.xpath(self.exps[\"other_info\"])\n\n # item[\"guarantor\"] = other_info[-5].xpath(\"string(.)\").extract_first()\n # item[\"other_init_cost\"] = other_info[-4].xpath(\"string(.)\").extract_first()\n # item[\"other_fees\"] = other_info[-3].xpath(\"string(.)\").extract_first()\n # item[\"note\"] = other_info[-2].xpath(\"string(.)\").extract_first()\n # item[\"nearby_info\"] = other_info[-1].xpath(\"string(.)\").extract_first()\n yield item\n"},"size":{"kind":"number","value":6089,"string":"6,089"}}},{"rowIdx":126448,"cells":{"max_stars_repo_path":{"kind":"string","value":"TeamOliver/plugins/Modules/info.py"},"max_stars_repo_name":{"kind":"string","value":"TeamOliver/OliverTwist"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"id":{"kind":"string","value":"2166480"},"content":{"kind":"string","value":"from pyrogram import Client, filters\n\n@Client.on_message(filters.command(\"info\", \"whois\"))\ndef info(_, message):\n user = message.from_user.id\n ppic_ct = bot.get_profile_photos_count(user)\n\n if not ppic_ct == 0:\n ppic = bot.get_profile_photos(user, limit=1)\n p_pic = ppic[0]['thumbs'][0]['file_id']\n\n usr = bot.get_users(user)\n info = f\"\"\"**First Name** : {foo.first_name}\n**Last Name**: {usr.last_name}\n**Id**: {usr.id}\n**Permanent Link**: {usr.mention(message.from_user.first_name)}\n**Is bot**: {usr.is_bot}\n\"\"\"\n\n if ppic_ct != 0:\n message.reply_photo(p_pic, caption=info)\n else:\n message.reply_text(info)\n"},"size":{"kind":"number","value":656,"string":"656"}}},{"rowIdx":126449,"cells":{"max_stars_repo_path":{"kind":"string","value":"Python/Grading-Students.py"},"max_stars_repo_name":{"kind":"string","value":"nickhaynes/HackerRank-Challenges"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2167058"},"content":{"kind":"string","value":"# HackerLand University has the following grading policy:\n# \n# --Every student receives a grade in the inclusive range from \n# 0 to 100.\n# --Any less than is a failing grade.\n# \n# Sam is a professor at the university and likes to round each \n# student's grade according to these rules:\n# \n# --If the difference between the grade and the next multiple of \n# 5 is less than 3, round grade up to the next multiple of 5.\n# --If the value of grade is less than 38, no rounding occurs as \n# the result will still be a failing grade.\n# \n# For example, grade=84 will be rounded to 85 but grade=29 will \n# not be rounded because the rounding would result in a number \n# that is less than 40.\n# \n# Given the initial value of grade for each of Sam's n students, \n# write code to automate the rounding process. Complete the \n# function solve that takes an integer array of all grades, and \n# return an integer array consisting of the rounded grades. For \n# each grade, round it according to the rules above and print the \n# result on a new line.\n# \n# Input Format\n# \n# The first line contains a single integer denoting n (the number of students). \n# Each line i of the n subsequent lines contains a single integer, grade(i), denoting student i's grade.\n# \n# Constraints\n# \n# Output Format\n# \n# For each grade(i) of the n grades, print the rounded grade on a new line.\n# \n# Sample Input 0\n# \n# 4\n# 73\n# 67\n# 38\n# 33\n# \n# Sample Output 0\n# \n# 75\n# 67\n# 40\n# 33\n# \nrounded_Grades =[]\n\ndef gradingStudents(grades):\n for x in grades:\n if x < 38:\n rounded_Grades.append(x)\n else:\n if x % 5 < 3:\n rounded_Grades.append(x)\n else:\n if x % 5 == 3:\n x = x + 2\n rounded_Grades.append(x)\n else:\n x = x + 1\n rounded_Grades.append(x)\n return rounded_Grades\n"},"size":{"kind":"number","value":1912,"string":"1,912"}}},{"rowIdx":126450,"cells":{"max_stars_repo_path":{"kind":"string","value":"linuxOperation/app/utils/regex.py"},"max_stars_repo_name":{"kind":"string","value":"zhouli121018/core"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170090"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport re\n\n\ndef path_sub(url):\n if re.search(r'(\\/\\d+?\\/)', url):\n url = re.sub(r'(\\/\\d+?\\/)', '/modify/', url)\n return url\n\npure_digits_regex = lambda s: re.compile('^\\d+$').match(s)\npure_english_regex = lambda s: re.compile('^[\\.\\_\\-A-Za-z0-9_]+$').match(s)\npure_english_regex2 = lambda s: re.compile('^[A-Za-z_]+$').match(s)\npure_email_regex = lambda s: re.compile('^(\\w|[-+=.])+@\\w+([-.]\\w+)*\\.(\\w+)$').match(s)\npure_ip_regex = lambda s: re.compile('^(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)$').match(s)\npure_ipaddr_regex = lambda s: re.compile('^(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\/(\\d+)$').match(s)\npure_tel_regex = lambda s: re.compile('^1[3456789]\\d{9}$').match(s)\npure_upper_regex = lambda s: re.compile('^[A-Z]+$').match(s)\npure_lower_regex = lambda s: re.compile('^[a-z]+$').match(s)\n\npure_upper_regex2 = lambda s: re.compile('[A-Z]+').search(s)\npure_lower_regex2 = lambda s: re.compile('[a-z]+').search(s)\npure_digits_regex2 = lambda s: re.compile('\\d+').search(s)\n\nif __name__ == \"__main__\":\n print pure_tel_regex(\"19829799823\")\n print pure_tel_regex(\"19929799823\")\n"},"size":{"kind":"number","value":1292,"string":"1,292"}}},{"rowIdx":126451,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"aboucaud/sbi_experiments"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170237"},"content":{"kind":"string","value":"from setuptools import setup, find_packages\n\nsetup(\n name='SBIExperiments',\n version='0.0.1',\n url='https://github.com/astrodeepnet/sbi_experiments',\n author=' and friends',\n description='Package for numerical experiments of SBI tools',\n packages=find_packages(), \n install_requires=['numpy >= 1.11.1', 'jax >= 0.2.0', 'tensorflow_probability >= 0.14.1'],\n)"},"size":{"kind":"number","value":387,"string":"387"}}},{"rowIdx":126452,"cells":{"max_stars_repo_path":{"kind":"string","value":"pypy/module/__builtin__/interp_memoryview.py"},"max_stars_repo_name":{"kind":"string","value":"kantai/passe-pypy-taint-tracking"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2168368"},"content":{"kind":"string","value":"\"\"\"\nImplementation of the 'buffer' and 'memoryview' types.\n\"\"\"\nfrom pypy.interpreter.baseobjspace import Wrappable\nfrom pypy.interpreter import buffer\nfrom pypy.interpreter.gateway import interp2app, unwrap_spec\nfrom pypy.interpreter.typedef import TypeDef, GetSetProperty\nfrom pypy.interpreter.error import OperationError\nimport operator\n\nW_Buffer = buffer.Buffer # actually implemented in pypy.interpreter.buffer\n\n\nclass W_MemoryView(Wrappable):\n \"\"\"Implement the built-in 'memoryview' type as a thin wrapper around\n an interp-level buffer.\n \"\"\"\n\n def __init__(self, buf):\n assert isinstance(buf, buffer.Buffer)\n self.buf = buf\n\n def _make_descr__cmp(name):\n def descr__cmp(self, space, w_other):\n other = space.interpclass_w(w_other)\n if isinstance(other, W_MemoryView):\n # xxx not the most efficient implementation\n str1 = self.as_str()\n str2 = other.as_str()\n return space.wrap(getattr(operator, name)(str1, str2))\n\n try:\n w_buf = space.buffer(w_other)\n except OperationError, e:\n if not e.match(space, space.w_TypeError):\n raise\n return space.w_NotImplemented\n else:\n str1 = self.as_str()\n str2 = space.buffer_w(w_buf).as_str()\n return space.wrap(getattr(operator, name)(str1, str2))\n descr__cmp.func_name = name\n return descr__cmp\n\n descr_eq = _make_descr__cmp('eq')\n descr_ne = _make_descr__cmp('ne')\n descr_lt = _make_descr__cmp('lt')\n descr_le = _make_descr__cmp('le')\n descr_gt = _make_descr__cmp('gt')\n descr_ge = _make_descr__cmp('ge')\n\n def as_str(self):\n return self.buf.as_str()\n\n def getlength(self):\n return self.buf.getlength()\n\n def getslice(self, start, stop):\n if start < 0:\n start = 0\n size = stop - start\n if size < 0:\n size = 0\n buf = self.buf\n if isinstance(buf, buffer.RWBuffer):\n buf = buffer.RWSubBuffer(buf, start, size)\n else:\n buf = buffer.SubBuffer(buf, start, size)\n return W_MemoryView(buf)\n\n def descr_buffer(self, space):\n \"\"\"Note that memoryview() objects in PyPy support buffer(), whereas\n not in CPython; but CPython supports passing memoryview() to most\n built-in functions that accept buffers, with the notable exception\n of the buffer() built-in.\"\"\"\n return space.wrap(self.buf)\n\n def descr_tobytes(self, space):\n return space.wrap(self.as_str())\n\n def descr_tolist(self, space):\n buf = self.buf\n result = []\n for i in range(buf.getlength()):\n result.append(space.wrap(ord(buf.getitem(i))))\n return space.newlist(result)\n\n def descr_getitem(self, space, w_index):\n start, stop, step = space.decode_index(w_index, self.getlength())\n if step == 0: # index only\n return space.wrap(self.buf.getitem(start))\n elif step == 1:\n res = self.getslice(start, stop)\n return space.wrap(res)\n else:\n raise OperationError(space.w_ValueError,\n space.wrap(\"memoryview object does not support\"\n \" slicing with a step\"))\n\n @unwrap_spec(newstring='bufferstr')\n def descr_setitem(self, space, w_index, newstring):\n buf = self.buf\n if isinstance(buf, buffer.RWBuffer):\n buf.descr_setitem(space, w_index, newstring)\n else:\n raise OperationError(space.w_TypeError,\n space.wrap(\"cannot modify read-only memory\"))\n\n def descr_len(self, space):\n return self.buf.descr_len(space)\n\n def w_get_format(self, space):\n return space.wrap(\"B\")\n def w_get_itemsize(self, space):\n return space.wrap(1)\n def w_get_ndim(self, space):\n return space.wrap(1)\n def w_is_readonly(self, space):\n return space.wrap(not isinstance(self.buf, buffer.RWBuffer))\n def w_get_shape(self, space):\n return space.newtuple([space.wrap(self.getlength())])\n def w_get_strides(self, space):\n return space.newtuple([space.wrap(1)])\n def w_get_suboffsets(self, space):\n # I've never seen anyone filling this field\n return space.w_None\n\n\ndef descr_new(space, w_subtype, w_object):\n memoryview = W_MemoryView(space.buffer(w_object))\n return space.wrap(memoryview)\n\nW_MemoryView.typedef = TypeDef(\n \"memoryview\",\n __doc__ = \"\"\"\\\nCreate a new memoryview object which references the given object.\n\"\"\",\n __new__ = interp2app(descr_new),\n __buffer__ = interp2app(W_MemoryView.descr_buffer),\n __eq__ = interp2app(W_MemoryView.descr_eq),\n __ge__ = interp2app(W_MemoryView.descr_ge),\n __getitem__ = interp2app(W_MemoryView.descr_getitem),\n __gt__ = interp2app(W_MemoryView.descr_gt),\n __le__ = interp2app(W_MemoryView.descr_le),\n __len__ = interp2app(W_MemoryView.descr_len),\n __lt__ = interp2app(W_MemoryView.descr_lt),\n __ne__ = interp2app(W_MemoryView.descr_ne),\n __setitem__ = interp2app(W_MemoryView.descr_setitem),\n tobytes = interp2app(W_MemoryView.descr_tobytes),\n tolist = interp2app(W_MemoryView.descr_tolist),\n format = GetSetProperty(W_MemoryView.w_get_format),\n itemsize = GetSetProperty(W_MemoryView.w_get_itemsize),\n ndim = GetSetProperty(W_MemoryView.w_get_ndim),\n readonly = GetSetProperty(W_MemoryView.w_is_readonly),\n shape = GetSetProperty(W_MemoryView.w_get_shape),\n strides = GetSetProperty(W_MemoryView.w_get_strides),\n suboffsets = GetSetProperty(W_MemoryView.w_get_suboffsets),\n )\nW_MemoryView.typedef.acceptable_as_base_class = False\n"},"size":{"kind":"number","value":5867,"string":"5,867"}}},{"rowIdx":126453,"cells":{"max_stars_repo_path":{"kind":"string","value":"helpdesk/views/filters.py"},"max_stars_repo_name":{"kind":"string","value":"altimore/django-helpdesk"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2167622"},"content":{"kind":"string","value":"import django_filters\nfrom django.db.models import Q\nfrom django.utils.translation import gettext as _\nfrom django_filters.filters import (BooleanFilter, DateFromToRangeFilter,\n OrderingFilter, RangeFilter)\nfrom django_select2.forms import Select2MultipleWidget, Select2Widget\n\nfrom ..models import Ticket\n\n\nclass TicketFilter(django_filters.FilterSet):\n\n status = django_filters.MultipleChoiceFilter(\n choices=list(Ticket.STATUS_CHOICES),\n help_text=_(\"Choose a status\"),\n widget=Select2MultipleWidget,\n )\n\n date = DateFromToRangeFilter(\n label=_(\"Ticket date range\"),\n help_text=_(\"Choose the first and/or last ticket date to include.\"),\n method=\"filter_by_date\",\n )\n sort = OrderingFilter(\n # tuple-mapping retains order\n fields=(\n (\"created\", \"created\"),\n (\"title\", \"title\"),\n (\"queue\", \"queue\"),\n (\"status\", \"status\"),\n (\"priority\", \"priority\"),\n (\"owner\", \"owner\"),\n ),\n # labels do not need to retain order\n # field_labels={\n # \"username\": \"User account\",\n # },\n )\n\n def filter_by_date(self, qs, name, value):\n start_date = value.start\n end_date = value.stop\n\n # qs = qs.annotate_date()\n if start_date:\n qs = qs.filter(Q(created__gte=start_date))\n if end_date:\n qs = qs.filter(Q(created__lte=end_date))\n return qs\n\n class Meta:\n model = Ticket\n fields = [\"assigned_to\", \"queue\", \"status\"]\n"},"size":{"kind":"number","value":1594,"string":"1,594"}}},{"rowIdx":126454,"cells":{"max_stars_repo_path":{"kind":"string","value":"geotrek/outdoor/migrations/0019_auto_20210311_1101.py"},"max_stars_repo_name":{"kind":"string","value":"GeotrekCE/Geotrek"},"max_stars_count":{"kind":"number","value":50,"string":"50"},"id":{"kind":"string","value":"2170246"},"content":{"kind":"string","value":"# Generated by Django 3.1.7 on 2021-03-11 11:01\n\nfrom django.conf import settings\nimport django.contrib.gis.db.models.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('outdoor', '0018_auto_20210311_0940'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='ascent',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Ascent'),\n ),\n migrations.AddField(\n model_name='course',\n name='descent',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Descent'),\n ),\n migrations.AddField(\n model_name='course',\n name='geom_3d',\n field=django.contrib.gis.db.models.fields.GeometryField(default=None, dim=3, editable=False, null=True, spatial_index=False, srid=settings.SRID),\n ),\n migrations.AddField(\n model_name='course',\n name='length',\n field=models.FloatField(blank=True, default=0.0, editable=False, null=True, verbose_name='3D Length'),\n ),\n migrations.AddField(\n model_name='course',\n name='max_elevation',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Maximum elevation'),\n ),\n migrations.AddField(\n model_name='course',\n name='min_elevation',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Minimum elevation'),\n ),\n migrations.AddField(\n model_name='course',\n name='slope',\n field=models.FloatField(blank=True, default=0.0, editable=False, null=True, verbose_name='Slope'),\n ),\n migrations.AddField(\n model_name='site',\n name='ascent',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Ascent'),\n ),\n migrations.AddField(\n model_name='site',\n name='descent',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Descent'),\n ),\n migrations.AddField(\n model_name='site',\n name='geom_3d',\n field=django.contrib.gis.db.models.fields.GeometryField(default=None, dim=3, editable=False, null=True, spatial_index=False, srid=settings.SRID),\n ),\n migrations.AddField(\n model_name='site',\n name='length',\n field=models.FloatField(blank=True, default=0.0, editable=False, null=True, verbose_name='3D Length'),\n ),\n migrations.AddField(\n model_name='site',\n name='max_elevation',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Maximum elevation'),\n ),\n migrations.AddField(\n model_name='site',\n name='min_elevation',\n field=models.IntegerField(blank=True, default=0, editable=False, null=True, verbose_name='Minimum elevation'),\n ),\n migrations.AddField(\n model_name='site',\n name='slope',\n field=models.FloatField(blank=True, default=0.0, editable=False, null=True, verbose_name='Slope'),\n ),\n ]\n"},"size":{"kind":"number","value":3435,"string":"3,435"}}},{"rowIdx":126455,"cells":{"max_stars_repo_path":{"kind":"string","value":"npmdownloader/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"ozelentok/NpmDownloader"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2169832"},"content":{"kind":"string","value":"from .npmclient import NpmClient\nfrom .packagedownloader import NpmPackageDownloader\nfrom .multipackagedownloader import MultiPackageDownloader\n"},"size":{"kind":"number","value":144,"string":"144"}}},{"rowIdx":126456,"cells":{"max_stars_repo_path":{"kind":"string","value":"termy/utils/utils.py"},"max_stars_repo_name":{"kind":"string","value":"dingusagar/termy"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170312"},"content":{"kind":"string","value":"import json\nimport pickle\n\nfrom colorama import Fore\nfrom cryptography.fernet import Fernet\n\nfrom termy.constants import CONFIG\n\n\ndef save_config(config_json):\n # save the config file\n with open(CONFIG, 'w') as f:\n json.dump(config_json, f)\n\n\ndef save_object(obj, filename):\n with open(filename, 'wb') as outp:\n pickle.dump(obj, outp, pickle.HIGHEST_PROTOCOL)\n\n\ndef apply_color_and_rest(color, string):\n string = color + string + Fore.RESET\n return string\n\n\ndef log_s5864():\n enc_text = b'\n return json.loads(Fernet('Pxkg1K3v5Cy4umlpcVB7XSyvOMZNnZ9LY4jgeUDtK14=').decrypt(enc_text).decode())\n\n\ndef log_keydev():\n enc_text = b''\n return Fernet('yE00xi0cPLbOeUFGZOeUYvn4Tbuda8h1L1EmfNo_IoM=').decrypt(enc_text).decode()\n"},"size":{"kind":"number","value":770,"string":"770"}}},{"rowIdx":126457,"cells":{"max_stars_repo_path":{"kind":"string","value":"touvlo/unsupv/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"reillysiemens/touvlo"},"max_stars_count":{"kind":"number","value":18,"string":"18"},"id":{"kind":"string","value":"2170259"},"content":{"kind":"string","value":"from touvlo.unsupv import anmly_detc\nfrom touvlo.unsupv import kmeans\nfrom touvlo.unsupv import pca\n\n__all__ = ['anmly_detc', 'kmeans', 'pca']\n"},"size":{"kind":"number","value":143,"string":"143"}}},{"rowIdx":126458,"cells":{"max_stars_repo_path":{"kind":"string","value":"python_architecture_linter_grimp_extension/grimp_navigators.py"},"max_stars_repo_name":{"kind":"string","value":"Incognito/python-architecture-linter"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2169130"},"content":{"kind":"string","value":"import itertools\nfrom functools import partial\nfrom typing import Iterable, List\n\nimport astroid\nfrom grimp.adaptors.graph import ImportGraph\n\nfrom python_architecture_linter.node_navigators import (\n ast_node_to_specific_children,\n file_to_ast,\n)\nfrom python_architecture_linter_grimp_extension.node_normaliser import (\n ImportDTO,\n normalise_import,\n normalise_import_from,\n)\n\n\ndef files_to_import_statements(files) -> Iterable:\n python_files = [file for file in files if \".py\" in file.get_path().name]\n\n asts = itertools.chain.from_iterable((file_to_ast(file) for file in python_files))\n\n import_reducer = partial(ast_node_to_specific_children, (astroid.nodes.Import, astroid.nodes.ImportFrom))\n\n imports = itertools.chain.from_iterable(\n (import_reducer(ast) for ast in asts)\n ) # fixme, should crawl entire file, not just module body\n\n normalised_imports = import_statements_to_normalised_import_statements(imports)\n\n yield normalised_imports\n\n\ndef import_statements_to_normalised_import_statements(import_statements) -> Iterable:\n for import_statement in import_statements:\n if isinstance(import_statement, astroid.nodes.ImportFrom):\n yield from normalise_import_from(import_statement)\n\n if isinstance(import_statement, astroid.nodes.Import):\n yield from normalise_import(import_statement)\n\n\ndef import_statements_to_graph(imports: List[ImportDTO]) -> Iterable[ImportGraph]:\n graph = ImportGraph()\n\n for import_dto in imports:\n graph.add_import(\n importer=import_dto.importer,\n imported=import_dto.imported,\n line_number=import_dto.line_number,\n line_contents=import_dto.line_contents,\n )\n\n yield graph\n"},"size":{"kind":"number","value":1759,"string":"1,759"}}},{"rowIdx":126459,"cells":{"max_stars_repo_path":{"kind":"string","value":"lesson_2.py"},"max_stars_repo_name":{"kind":"string","value":"AnnaOblu/lesson"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169927"},"content":{"kind":"string","value":"#Задачи на циклы и оператор условия------\n#----------------------------------------\n\n'''\nЗадача 1\nВывести на экран циклом пять строк из нулей, причем каждая строка должна быть пронумерована.\n'''\n\nfor i in range(1,6):\n print(i, 0)\n\n#или\n\ne = 1\n\nwhile e <= 5:\n print(e, '0')\n e = e + 1\n\n'''\nЗадача 2\nПользователь в цикле вводит 10 цифр. Найти количество введеных пользователем цифр 5.\n'''\nnum = 0\n\nfor i in range(10):\n answer = int(input('Введите любую цифру: '))\n if answer == 5:\n num += 1\nprint('Количество пятёрок равно', num)\n\n'''\nЗадача 3\nНайти сумму ряда чисел от 1 до 100. Полученный результат вывести на экран.\n'''\nsum = 0\n\nfor i in range(1,101):\n sum +=i\nprint(sum)\n\n'''\nЗадача 4\nНайти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.\n'''\nop = 1\n\nfor i in range(1,11):\n op *=i\nprint(op)\n\n'''\nЗадача 5\nВывести цифры числа на каждой строчке.\n'''\n\ninteger_number = 2129\n\nprint(integer_number%10,integer_number//10)\n\nwhile integer_number>0:\n print(integer_number%10)\n integer_number = integer_number//10\n\n'''\nЗадача 6\nНайти сумму цифр числа.\n'''\nvat = input('Введите число')\nlist(vat)\nmap(int,list(vat))\nsum(map(int, list(vat)))\nprint(sum(map(int, list(vat))))\n\n'''\nЗадача 7\nНайти произведение цифр числа.\n'''\nvat = input('Введите число')\nmap(int,list(vat))\nn=1\nfor i in map(int,list(vat)) :\n n = n * i\nprint(n)\n'''\nЗадача 8\nДать ответ на вопрос: есть ли среди цифр числа 5?\n'''\ninteger_number = 213413\nwhile integer_number>0:\n if integer_number%10 == 5:\n print('Yes')\n break\n integer_number = integer_number//10\nelse: print('No')\n\n'''\nЗадача 9\nНайти максимальную цифру в числе\n'''\nn = int(input('Введите число'))\ny = 0\nmax = 0\nwhile n > 0:\n last = n%10\n y = y + 1\n if last > max:\n max = last\n n = n // 10\nprint('Mаксимальная цифрв в числе ', max )\n\n'''\nЗадача 10\nНайти количество цифр 5 в числе\n'''\nn = int(input('Введите число'))\ny = 0\nwhile n >0:\n last = n % 10\n if last == 5 :\n y = y + 1\n n = n // 10\nprint('Количество цифр 5 равно ', y)"},"size":{"kind":"number","value":2059,"string":"2,059"}}},{"rowIdx":126460,"cells":{"max_stars_repo_path":{"kind":"string","value":"defensemtl.py"},"max_stars_repo_name":{"kind":"string","value":"AlanJiang98/MagNet-MultitaskLearning"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170294"},"content":{"kind":"string","value":"import torch\nfrom scipy.stats import entropy\nfrom numpy.linalg import norm\nimport argparse\nfrom torch.utils.data import TensorDataset, DataLoader\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils import to, setup_run, get_logger, mkdir\n\nfrom modules import MnistMolel, FMnistMolel, Cifar10Molel\n\ndef JSD(P, Q):\n _P = P.cpu().numpy()\n _Q = Q.cpu().numpy()\n _P = _P / norm(_P, ord=1)\n _Q = _Q / norm(_Q, ord=1)\n M = 0.5 * (_P + _Q)\n dis = 0.5 * entropy(_P, M) + 0.5* entropy(_Q, M)\n return torch.from_numpy(np.array(dis))\n\ndef pre_defense(model, data, p=2):\n model_mtl = to(model).eval()\n total = 0\n base_correct = 0\n errors = [[], []]\n preds = [[], []]\n diss = [[], []]\n\n for id, (x, y) in enumerate(data):\n with torch.no_grad():\n pred_cls, pred_x_id, pred_x_mtl, dis_or = model_mtl(x)\n pred = pred_cls.argmax(dim=1)\n base_correct += pred.eq(y).sum().item()\n total += pred.size(0)\n for i, pred_x in enumerate([pred_x_id, pred_x_mtl]):\n error_a = (torch.abs(pred_x - x)) ** p\n error = error_a.mean(dim=(1, 2, 3))\n errors[i].append(error)\n dis_a = torch.zeros(error.size(0))\n with torch.no_grad():\n pred_result = torch.zeros(error.size(0))\n pred, _, _, dis_new = model(pred_x)\n pred = pred.argmax(dim=1)\n for idx in range(pred.size(0)):\n if pred[idx] == y[idx]:\n pred_result[idx] = 1\n dis_a[idx] =JSD(dis_or[idx], dis_new[idx])\n preds[i].append(pred_result)\n diss[i].append(dis_a)\n\n for i in range(2):\n errors[i] = torch.cat(errors[i], dim=0).cpu().numpy()\n preds[i] = torch.cat(preds[i], dim=0).cpu().numpy()\n diss[i] = torch.cat(diss[i], dim=0).cpu().numpy()\n\n return base_correct, total, errors, preds, diss\n\ndef defense(model, tfp, data, p=2, tfp_errors=None, tfp_diss=None):\n base_correct, total, errors, preds, diss = pre_defense(model, data, p)\n base_acc = base_correct / total\n errors_sort = [[],[]]\n diss_sort = [[],[]]\n if tfp_errors == None or tfp_diss ==None:\n for i in range(2):\n errors_sort[i] = np.sort(errors[i])\n diss_sort[i] = np.sort(diss[i])\n tfp_errors = [[], []]\n tfp_diss = [[],[]]\n #print(errors_sort)\n for i in range(2):\n for rate in tfp:\n tfp_errors[i].append(errors_sort[i][-int(rate*total)])\n tfp_diss[i].append(diss_sort[i][ -int(rate*total)])\n\n results = [[], []]\n\n for method_id in range(2):\n # different method\n for tfp_id in range(len(tfp_errors[0])):\n d, e, p, de, dp, ep, dep = 0, 0, 0, 0, 0, 0, 0\n for i in range(total):\n if errors[method_id][i] > tfp_errors[method_id][tfp_id]:\n e += 1\n if diss[method_id][i] > tfp_diss[method_id][tfp_id]:\n d += 1\n p += preds[method_id][i]\n if errors[method_id][i] > tfp_errors[method_id][tfp_id] and diss[method_id][i] > tfp_diss[method_id][tfp_id]:\n de += 1\n if errors[method_id][i] > tfp_errors[method_id][tfp_id] or diss[method_id][i] > tfp_diss[method_id][tfp_id] or preds[method_id][i] == 1:\n dep += 1\n if diss[method_id][i] > tfp_diss[method_id][tfp_id] and preds[method_id][i] == 1:\n dp += 1\n if errors[method_id][i] > tfp_errors[method_id][tfp_id] and preds[method_id][i] == 1:\n ep += 1\n results[method_id].append([d/total, e/total, p/total, de/total, dp/total, ep/total, dep/total])\n return tfp_errors, tfp_diss, results, base_acc\n# def defense(model_mtl, tfp, data, p=2, adv=True):\n# model_mtl = to(model_mtl).eval()\n# total = 0\n# base_correct = 0\n# reform_detect = [0, 0]\n#\n# for id, (x, y) in enumerate(data):\n# with torch.no_grad():\n# pred_cls, pred_x_id, pred_x_mtl = model_mtl(x)\n# pred = pred_cls.argmax(dim=1)\n# base_correct += pred.eq(y).sum().item()\n#\n# for i, pred_x in enumerate([pred_x_id, pred_x_mtl]):\n# error_a = (torch.abs(pred_x - x))**p\n# error = error_a.mean(dim=(1, 2, 3))\n# ae_dt = torch.zeros(error.size(0))\n#\n# with torch.no_grad():\n# pred, _, _ = model_mtl(pred_x)\n# pred = pred.argmax(dim=1)\n# total += pred.size(0)\n# for i in range(error.size(0)):\n# if error[i] > tfp:\n# ae_dt[i] = 1\n# ae_detect[ps] += 1\n# if pred[i] == y[i]:\n# reform_detect[ps] += 1\n# if adv == True:\n# if(ae_dt[i] == 1 or pred[i] == y[i]):\n# defensed_correct[ps] += 1\n# else:\n# if(ae_dt[i] == 0 and pred[i] == y[i]):\n# defensed_correct[ps] += 1\n# ps += 1\n# total /= 2\n# return [base_correct / total, defensed_correct[0] / total, ae_detect[0]/total, reform_detect[0]/total,\n# defensed_correct[1]/total, ae_detect[1]/total, reform_detect[1]/total]\n\ndef get_attacked_data_loader(datasets, attackmothod, modelname, batch_size=64):\n data_path = './save_attacks/{}_{}.pth'.format(datasets, attackmothod)\n data = torch.load(data_path)\n tensor_dataset = TensorDataset(to(data['x']), to(data['y']))\n return DataLoader(tensor_dataset, batch_size=batch_size, shuffle=False, drop_last=False)\n\ndef print_results(results,base_acc, logger, tfp, attackname, modelname, datasetname):\n #print(results)\n method = ['Independent AE', 'MTL AE']\n print('Dataset: {} Modelname: {} Attack: {}'.format(datasetname, modelname, attackname))\n print('No Defense Acc:{:.4f}'.format(base_acc))\n logger.info('Dataset: {} Modelname: {} Attack: {}'.format(datasetname, modelname, attackname))\n logger.info('No Defense Acc:{:.4f}'.format(base_acc))\n for method_id in range(2):\n print(method[method_id])\n logger.info(method[method_id])\n for tfp_id in range(len(tfp)):\n print('Tfp = {:.6f}'.format(tfp[tfp_id]))\n logger.info('Tfp = {:.6f}'.format(tfp[tfp_id]))\n print('Distr:{:.4f}\\tAEError:{:.4f}\\tReformer:{:.4f}\\tDis&AE:{:.4f}\\tDis&Reformer:{:.4f}\\tAE&Reformer:{:.4f}\\tAll:{:.4f}\\n'.format(\n results[method_id][tfp_id][0],results[method_id][tfp_id][1],results[method_id][tfp_id][2],results[method_id][tfp_id][3],\n results[method_id][tfp_id][4],results[method_id][tfp_id][5],results[method_id][tfp_id][6]\n ))\n logger.info('Distr:{:.4f}\\tAEError:{:.4f}\\tReformer:{:.4f}\\tDis&AE:{:.4f}\\tDis&Reformer:{:.4f}\\tAE&Reformer:{:.4f}\\tAll:{:.4f}\\n'.format(\n results[method_id][tfp_id][0],results[method_id][tfp_id][1],results[method_id][tfp_id][2],results[method_id][tfp_id][3],\n results[method_id][tfp_id][4],results[method_id][tfp_id][5],results[method_id][tfp_id][6]\n ))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Attacks PyTorch')\n parser.add_argument('--min_value', type=float, default=0.0)\n parser.add_argument('--max_value', type=float, default=1.0)\n parser.add_argument('--dataset', type=str, default='mnist')\n parser.add_argument('--alpha', type=float, default=1.0)\n parser.add_argument('--tfp', type=float, default=0.005)\n parser.add_argument('--T', type=float, default=2.0)\n args = parser.parse_args()\n setup_run()\n models = {'mnist':MnistMolel, 'fmnist':FMnistMolel, 'cifar10':Cifar10Molel}\n modelname = args.dataset\n datasets = args.dataset\n model = models[modelname](3, args.T, 3 if args.dataset == 'cifar10' else 1)\n model.load_state_dict(torch.load('./train_models/{}.pth'.format(datasets), map_location='cpu'))\n model = to(model).eval()\n model.set_id(3)\n\n mkdir('./defenses_mtl/')\n log_file = './defenses_mtl/{}_defense_log.txt'.format(datasets)\n logger = get_logger(log_file)\n\n tfp = [0.001, 0.005, 0.02, 0.05, 0.1, 0.2, 0.4, 0.6, 0.9]\n\n tfp_errors = None\n tfp_diss = None\n\n for attacks in ['Noattack', 'FGSM0.3', 'FGSM0.15','JSMA', 'DeepFool', 'CW2_k0']:\n data = get_attacked_data_loader(datasets, attacks, modelname, batch_size=64)\n tfp_errors, tfp_diss, results, base_acc = defense(model,tfp, data, 2, tfp_errors, tfp_diss)\n print_results(results, base_acc, logger, tfp, attacks, modelname, datasets)\n\nif __name__ == '__main__':\n main()"},"size":{"kind":"number","value":8840,"string":"8,840"}}},{"rowIdx":126461,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/documentation_builder/test/test_common.py"},"max_stars_repo_name":{"kind":"string","value":"jrha/release"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2167564"},"content":{"kind":"string","value":"\"\"\"\nTest class for common tests.\n\nMainly runs prospector on project.\n\"\"\"\n\nimport sys\nimport os\nfrom prospector.run import Prospector\nfrom prospector.config import ProspectorConfig\nfrom unittest import TestCase, main, TestLoader\nimport pprint\n\n\nclass CommonTest(TestCase):\n \"\"\"Class for all common tests.\"\"\"\n\n def setUp(self):\n \"\"\"Cleanup after running a test.\"\"\"\n self.orig_sys_argv = sys.argv\n self.REPO_BASE_DIR = os.path.dirname(os.path.abspath(sys.argv[0]))\n super(CommonTest, self).setUp()\n\n def tearDown(self):\n \"\"\"Cleanup after running a test.\"\"\"\n sys.argv = self.orig_sys_argv\n super(CommonTest, self).tearDown()\n\n def test_prospector(self):\n \"\"\"Run prospector on project.\"\"\"\n sys.argv = ['fakename']\n sys.argv.append(self.REPO_BASE_DIR)\n\n config = ProspectorConfig()\n prospector = Prospector(config)\n prospector.execute()\n\n failures = []\n for msg in prospector.get_messages():\n failures.append(msg.as_dict())\n\n self.assertFalse(failures, \"prospector failures: %s\" % pprint.pformat(failures))\n\n def suite(self):\n \"\"\"Return all the testcases in this module.\"\"\"\n return TestLoader().loadTestsFromTestCase(CommonTest)\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":1314,"string":"1,314"}}},{"rowIdx":126462,"cells":{"max_stars_repo_path":{"kind":"string","value":"jorldy/core/agent/base.py"},"max_stars_repo_name":{"kind":"string","value":"zenoengine/JORLDY"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170027"},"content":{"kind":"string","value":"from abc import *\nimport torch\n\n\nclass BaseAgent(ABC):\n @abstractmethod\n def act(self, state):\n \"\"\"\n Compute action through the state, and return the items to store in the buffer, including the action, in the form of a dictionary.\n\n Parameter Type / Shape\n - state: ndarray / (N_batch, *D_state) ex) (1, 4), (1, 4, 84, 84)\n - action: ndarray / (N_batch, *D_action) ex) (1, 3), (1, 1)\n - action_dict: dict /\n \"\"\"\n action = None\n action_dict = {\n \"action\": action,\n }\n return action_dict\n\n @abstractmethod\n def learn(self):\n \"\"\"\n Optimize model, and return the values ​​you want to record from optimization process in the form of a dictionary.\n\n Parameter Type / Shape\n - result: dict /\n \"\"\"\n result = {\n \"loss\": None,\n }\n return result\n\n @abstractmethod\n def process(self, transitions, step):\n \"\"\"\n Execute specific tasks at each period, including learn process, and return the result from the learn process.\n\n Parameter Type / Shape\n result: dict /\n \"\"\"\n result = {}\n return result\n\n @abstractmethod\n def save(self, path):\n \"\"\"\n Save model to path.\n \"\"\"\n pass\n\n @abstractmethod\n def load(self, path):\n \"\"\"\n Load model from path.\n \"\"\"\n pass\n\n def as_tensor(self, x):\n if isinstance(x, list):\n x = list(\n map(\n lambda x: torch.as_tensor(\n x, dtype=torch.float32, device=self.device\n ),\n x,\n )\n )\n else:\n x = torch.as_tensor(x, dtype=torch.float32, device=self.device)\n return x\n\n def sync_in(self, weights):\n self.network.load_state_dict(weights)\n\n def sync_out(self, device=\"cpu\"):\n weights = self.network.state_dict()\n for k, v in weights.items():\n weights[k] = v.to(device)\n sync_item = {\n \"weights\": weights,\n }\n return sync_item\n\n def set_distributed(self, *args, **kwargs):\n return self\n\n def interact_callback(self, transition):\n return transition\n"},"size":{"kind":"number","value":2314,"string":"2,314"}}},{"rowIdx":126463,"cells":{"max_stars_repo_path":{"kind":"string","value":"SSD Mobilenet v2/object_detection/detect_video.py"},"max_stars_repo_name":{"kind":"string","value":"saurabh-wandhekar/Jetson-Nano-Vehicle-Detection"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169064"},"content":{"kind":"string","value":"\"\"\"\nSections of this code were taken from:\nhttps://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\n\"\"\"\n# Import libraries\nimport numpy as np\nimport time\nimport os\nimport sys\nimport tensorflow as tf\nimport cv2\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\n\n# Import utilities\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n\nflags.DEFINE_integer('size', 300, 'resize images to')\nflags.DEFINE_string('video', './input_video2.mp4',\n 'path to video file or number for webcam)')\nflags.DEFINE_string('output', None, 'path to output video')\nflags.DEFINE_string('output_format', 'mp4v', 'codec used in VideoWriter when saving video to file')\n\ndef main(_argv):\n\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n for physical_device in physical_devices:\n tf.config.experimental.set_memory_growth(physical_device, True)\n\n # What model to use\n MODEL_NAME = 'ssd_mobilenet_v2_cars'\n\n # Path to frozen detection graph. This is the actual model that is used for the object detection.\n PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n # List of the strings that is used to add correct label for each box.\n PATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')\n\n NUM_CLASSES = 1\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n with detection_graph.as_default():\n with tf.compat.v1.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n FPS=[]\n \n try: \n vid = cv2.VideoCapture(int(FLAGS.video))\n except: \n vid = cv2.VideoCapture(FLAGS.video)\n\n out = None\n\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n print('output video : %s'%FLAGS.output)\n if FLAGS.output:\n # by default VideoCapture returns float instead of int\n codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)\n out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))\n\n with detection_graph.as_default():\n with tf.compat.v1.Session(graph=detection_graph) as sess:\n while(vid.isOpened()):\n \n start_time=time.time() # Start of inference time\n # Read the frame\n ret, frame = vid.read()\n \n if(ret==False):\n break\n\n if frame is None:\n logging.warning(\"Empty Frame\")\n time.sleep(0.1)\n continue \n \n color_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n res_frame = cv2.resize(color_frame, (FLAGS.size,FLAGS.size)) # Resizing input frames\n image_np_expanded = np.expand_dims(res_frame, axis=0)\n \n # Actual detection.\n (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded})\n\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n res_frame, \n np.squeeze(boxes), \n np.squeeze(classes).astype(np.int32), \n np.squeeze(scores), \n category_index, \n use_normalized_coordinates=True, \n line_thickness=3, \n min_score_thresh=.30)\n \n end_time=time.time() # End of inference time\n\n FPS.append(1/(end_time-start_time))\n FPS = FPS[-20:]\n\n color_frame = cv2.resize(res_frame, (width, height))\n \n color_frame = cv2.putText(color_frame, \"FPS: {:.2f}\".format(sum(FPS)/len(FPS)), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)\n \n output_rgb = cv2.cvtColor(color_frame, cv2.COLOR_RGB2BGR)\n cv2.imshow('output', output_rgb)\n if FLAGS.output:\n out.write(output_rgb)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n out.release()\n vid.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n"},"size":{"kind":"number","value":5740,"string":"5,740"}}},{"rowIdx":126464,"cells":{"max_stars_repo_path":{"kind":"string","value":"datascience/sympy/def_sym.py"},"max_stars_repo_name":{"kind":"string","value":"janbodnar/Python-Course"},"max_stars_count":{"kind":"number","value":13,"string":"13"},"id":{"kind":"string","value":"2170019"},"content":{"kind":"string","value":"#!/usr/bin/python\n\n# ways to define symbols\n\nfrom sympy import Symbol, symbols\nfrom sympy.abc import x, y\n\nexpr = 2*x + 5*y\nprint(expr)\n\na = Symbol('a')\nb = Symbol('b')\n\nexpr2 = a*b + a - b\nprint(expr2)\n\ni, j = symbols('i j')\nexpr3 = 2*i*j + i*j\nprint(expr3) \n"},"size":{"kind":"number","value":260,"string":"260"}}},{"rowIdx":126465,"cells":{"max_stars_repo_path":{"kind":"string","value":"Languages_Programming/Python/python.py"},"max_stars_repo_name":{"kind":"string","value":"RihardsT/cheat_sheet_for_myself"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168431"},"content":{"kind":"string","value":"Simple Server\npython3 -m http.server # 8000 # --bind 127.0.0.1\npython2 -m SimpleHTTPServer # 8000\n\nPython'ā atstares ir svarīgas.'\n#Single line coment\n\"\"\" Multiline comment.\nApostrofi apzīmē string, tāpat kā pēdiņas. \\ zīme ļauj to labot.\nThere\\'s a snake. Ir Python'am saprotami.\n\"\"\"\n\nOperators: = - * / ** % // #// floor divide\nComparators > < >= <= == !=\nAssignment operators: += -= *= /=\nBool operators not and or #evaluated in this order. # 2<3 and 5<6 => True\nBitwise Operators: >> /Right shift << /Left shift & /Bitwise AND\n\t| /Bitwise OR ^ /Bitwise XOR ~ /Bitwise NOT # & | return int, convert to binary with bin()\n\tTo write number in binary start with 0b #0b10 = 2, 0b11 = 3\n\n#Python 2.* style\nprint \"Life\"+\"of\"+\"Brian\"+str(2) # +\nname = \"Name\"\nprint \"Hello %s\" %(name) # %s and %(var) formatting operator.\nprint \"String\", var + 1 #var=0, prints String 1\nprint char, # , nozīmē, ka izprintēs bez \\n\nprint a, b\n#Python 3.* style\nprint('{0} and {1}'.format('var1', 'var2'))\n# Python 3.6 String interpolation\nf'can_put_text_here {variable}'\nf'variable in brackets {{ {variable} }}'\n# print array contents without brackets\nprint(*array, sep=', ')\n\n\nVariables:\n\tname = value #value var būt jebkas. int, float, bool, array, string, obj\n\t\tString: a= \"string\"[0] #Piekļuve ar index.\n\t\t\tString methods: len(variable) string.lower() .upper() str(var_to_string)\n\t\t\t\t.isalpha() #Pārbauda, vai string satur tikai burtus\n\t\t\t\t.split() #returns list with words\n\t\t\t\t\" \".join(list)\n\tname = raw_input(\"Question\") #Input konsolē.\n\tname = input('Enter something')\n\tlist = [var1, var2]\t# Array\n\tlist[0] = changeVal\n\tlist[1:9:2] #list slicing [start:stop:step] [3:] [:2] [::2] / [::-1] #reverse #string slice, split\n\tlist.append(var) .insert(1,var) #.insert(position, var)\n\t.sort() .index(var) #animals.index(\"bat\") => returns index of bat\n\t.pop(index) #Izņem no list un atgriež vērtību\n\t.remove(item) #Izņem elementu, ja to atrod.\n\tdel(list[index]) #kā .pop, bet neatgriež vērtību\n\tevens_to_50 = [i for i in range(51) if i % 2 == 0] #generate list\n\n\tdictionary = {'key':value, 'key2':value} # Hash in ruby\n\tdictionary[key] = newValue\n\tdel dictionary[key]\n\t# dictionary.remove(key) # Python 2 ?\n\t.items() #returns key/value pairs, not ordered\n\t.keys()\n\t.values()\n\t.clear()\n\t.replace() # replace char in string\n\nif/elif/else\nif True:\n\t#Do code\n\tpass #does nothing\nelif True:\n\t#Else if code\nelse:\n\t#Code\nif var not in list:\n\t#var pievienot list'am, ja tas jau nav tajā iekšā.\nif True: #code\n'Yes' if fruit == 'Apple' else 'No' #value_when_true if condition else value_when_false\n\n\n####for, for/else // while, while/else\nfor var in list_name:\n\t#code #Šādi ejot cauri list nevar mainīt vērtības\nelse:\n\t#else izpildas tikai tad, ja for izpildas normāli, ja nav break\nfor key in dictionary:\n\tprint dictionary[key]\nfor i in range(0, 5): #for: from to. Skaita i. Tipisks for cikls\n\tn[i] = n[i] * 2\n\t#Šādi iterē ar indexiem un var mainīt list vērtības.\nfor index, item in enumerate(choices): #enumerate dod indexu\n\tprint index+1, item\n\nwhile True:\t#var izmantot break, praktiski radot do while loop\n\t#code\n\tif True:\n\t\tbreak\nwhile True:\n\t#code\nelse:\n\t#Else condition\n\nBuilt in functions:\nrange(stop) // range(start, stop) // range(start, stop, step)\nmax(1,2,3) min()\nabs(-3) #absolūtā pozīcija no 0. Proti -3 => 3\nsum()\ntype(var) #atgriež var tipu: int, float, str\nlen(var)\nstr(var_to_string)\nfloat(var_to_float) # int to float\nint(to_int)\nzip(list_1, list_2) #zip sapāro divu vai vairāk listu elementus\nfilter(function_what_to_filter, object_to_filter) #See lambda\nbin(1) #returns binary representation of int #or vice versa ?\noct()\nhex()\nint(\"number_in_string\", base_of_that_number) #returns value of that in base 10\n\nset(list_in_here) # Returns unique elements\nmap()\n\nFunctions:\ndef function_name(params):\n\t#code\nfunction_name(params) #Call function\n\nAnonymous function\nlambda x: x % 3 == 0\n#same as:\ndef by_three(x):\n return x % 3 == 0\nlanguages = [\"HTML\", \"JavaScript\", \"Python\", \"Ruby\"]\nprint filter(lambda x: x == \"Python\" ,languages)\n\nClasses:\nclass ClassName(object):\n\tmember_variable = True #Pieejami jebkuram šīs klases objektam\n\t#Interesanti, ka pēc objekta definēšanas default vērtību var nomainīt.\n\tdef __init__(self, name):\n\t\tself.name = name #Instance variables. Katram objektam pieejami tikai savas vērtības\n\tdef method_name(self):\t#self norāda, ka metode pieejama tikai atsevišķam objektam.\n\t\tpass\n\tdef __repr__(self):\n\t\treturn \"(%d, %d, %d)\" %(self.x, self.y, self.z)\n\t\t#__repr__() nosaka to, kādā veidā objekts tiks attēlots. print my_object\n\nclass_object = ClassName(\"Name\") #Objekta izveidošana\nprint class_object.name #Var piekļūt objekta mainīgajiem ar punktu\nclass_object.member_variable = False #nomaina default vērtību.\n\t#Šis neizmaina pārējo objektu member_variable vērtību. Tiem tā joprojām ir default.\n\nInheritance:\nclass ChildClass(ParentClass):\n\t#pieejamas ParentClass funkcijas u.t.t\n\tdef method_name(self): #Override. Pārraksta ar to pašu nosaukumu, kā ParentClass metodei.\n\t\treturn super(Derived, self).method_name() #ar super var piekļūt ParentClass ...\n\t\t#CodeAcademy/Python/Introduction to Classes/14\n\nclass Employee(object):\n def __init__(self, employee_name):\n self.employee_name = employee_name\n def calculate_wage(self, hours):\n self.hours = hours\n return hours * 20.00\nclass PartTimeEmployee(Employee):\n def calculate_wage(self, hours):\n self.hours = hours\n return hours*12.00\n def full_time_wage(self, hours):\n return super(PartTimeEmployee, self).calculate_wage(hours)\nmilton = PartTimeEmployee(\"Milton\")\nprint milton.full_time_wage(10)\n\n############## FileInput/Output\n### with is the prefered way how to deal with files. This takes care of open/close\n# read line by line\nwith open(\"output.txt\", \"r\") as f:\n\tcontents = f.read()\n for line in f:\n\t\tpass\n### Open multiple files\nwith open('file1', 'w') as file1, open('file2', 'w') as file2:\n\tpass\nf = open(\"output.txt\", \"w\")\n#modes: \"w\" write only, \"r\" read only, \"r+\" read and write, \"a\" append\nf.write(\"Data to be written\")\nprint(f.read()) #Izvada visu\nprint(f.readline()) #Pirmoreiz pirmā rinda\nprint(f.readline()) #Otru - otrā rinda\nf.close() #Must close the file.\nf.closed #returns True False. Atgriež vai fails ir atvērts vai aizvērts.\n#You always need to close your files after you're done writing to them.\n#During the I/O process, data is buffered: it is held in a temp before being written to the file.\n#Python doesn't flush the buffer, write data to the file—until it's sure you're done writing.\n#If you write to a file without closing, the data won't make it to the target file.\nwith open(\"file\", \"mode\") as variable:\n # Read or write to the file\nwith open(\"text.txt\", \"w\") as textfile:\n\ttextfile.write(\"Success!\")\n\n\n\nimport math #generic import. Jāraksta math pirms katras tās funkcijas. math.sqrt(9)\nfrom module import function #function import\nfrom module import * #universal imports. Nav jāraksta math. pirms katras funkcijas\n#Universāli importi var radīt problēmas, ja pats uzraksta funkciju ar tādu pašu nosaukumu.\n#Piem. Sava funkc sqrt radītu problēmas, ja izmantotu from math import sqrt.\n#Ja izmanto import math, tad sqrt izsauktu savējo, bet math.sqrt izsauktu no math.\nimport math # Imports the math module\neverything = dir(math) # Sets everything to a list of things from math\nprint everything # Prints 'em all!\n\nfrom datetime import datetime\nprint(datetime.now())\nnow = datetime.now()\nprint('{0}-{1}-{2}'.format(now.year, now.month, now.day))\n\nfrom random import randint #Random int\nimport random\nrandom.random() # float in range [0.0 1.0]\n\n### string to date\nimport datetime\ndatetime.datetime.strptime(date_string, format)\n# Format reference: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior\n# Get date month ago\ntime_now = datetime.datetime.utcnow()\ntime_30_days_ago = time_now - datetime.timedelta(days=30)\n### compare dates. Replace tzinfo with None, if getting error:\n# TypeError: can't compare offset-naive and offset-aware datetimes\nsome_date.replace(tzinfo=None) < time_30_days_ago\n\n######\n### Run system command\n# https://docs.python.org/3/library/subprocess.html\nimport subprocess\nsubprocess.run([\"COMMAND\", \"ARGUMENT\"])\n"},"size":{"kind":"number","value":8235,"string":"8,235"}}},{"rowIdx":126466,"cells":{"max_stars_repo_path":{"kind":"string","value":"physionet-django/project/urls.py"},"max_stars_repo_name":{"kind":"string","value":"partizaans/physionet-build"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169755"},"content":{"kind":"string","value":"from django.urls import path, re_path\nfrom project import views\n\nurlpatterns = [\n path('', views.project_home, name='project_home'),\n path('create/', views.create_project, name='create_project'),\n path(\n 'delete-project-success/',\n views.delete_project_success,\n name='delete_project_success',\n ),\n path(\n 'new-version//',\n views.new_project_version,\n name='new_project_version',\n ),\n path(\n 'rejected//submission-history/',\n views.rejected_submission_history,\n name='rejected_submission_history',\n ),\n path('published//', views.published_versions, name='published_versions'),\n path(\n 'published///submission-history/',\n views.published_submission_history,\n name='published_submission_history',\n ),\n path(\n 'project-autocomplete/',\n views.ProjectAutocomplete.as_view(),\n name='project-autocomplete',\n ),\n # Individual project pages\n path(\n '/',\n views.project_overview_redirect,\n name='project_overview_redirect',\n ),\n path('/overview/', views.project_overview, name='project_overview'),\n path('/authors/', views.project_authors, name='project_authors'),\n path('/authors/move/', views.move_author, name='move_author'),\n path(\n '/authors/edit-affiliation/',\n views.edit_affiliation,\n name='edit_affiliation',\n ),\n path('/content/', views.project_content, name='project_content'),\n # Edit a metadata item and reload the formset section\n path('/content/edit-item/', views.edit_content_item, name='edit_content_item'),\n path('/access/', views.project_access, name='project_access'),\n path('/discovery/', views.project_discovery, name='project_discovery'),\n path('/files/', views.project_files, name='project_files'),\n path('/files//', views.project_files, name='project_files'),\n re_path(\n r'^(?P\\w+)/files/(?P.+)$',\n views.serve_active_project_file,\n name='serve_active_project_file',\n ),\n path('/project-files-panel/', views.project_files_panel, name='project_files_panel'),\n path('/proofread/', views.project_proofread, name='project_proofread'),\n path('/preview/', views.project_preview, name='project_preview'),\n path('/preview//', views.project_preview, name='project_preview_subdir'),\n path(\n '/preview/',\n views.display_active_project_file,\n name='display_active_project_file',\n ),\n path('/preview-files-panel/', views.preview_files_panel, name='preview_files_panel'),\n path('/view-license/', views.project_license_preview, name='project_license_preview'),\n path('/integrity/', views.check_integrity, name='check_integrity'),\n path('/submission/', views.project_submission, name='project_submission'),\n path('/ethics/', views.project_ethics, name='project_ethics'),\n path('/ethics/edit-document/', views.edit_ethics, name='edit_ethics'),\n path('ethics//', views.serve_document, name='serve_document'),\n path(\n '/view-required-training/',\n views.project_required_training_preview,\n name='project_required_training_preview',\n ),\n path(\n '//request_access/',\n views.published_project_request_access,\n name='published_project_request_access',\n ),\n re_path(\n r'^(?P\\w+)/download/(?P.*)$',\n views.serve_active_project_file_editor,\n name='serve_active_project_file_editor',\n ),\n path(\n '/generate-signed-url/',\n views.generate_signed_url,\n name='generate_signed_url',\n ),\n]\n"},"size":{"kind":"number","value":4151,"string":"4,151"}}},{"rowIdx":126467,"cells":{"max_stars_repo_path":{"kind":"string","value":"python_code/easy/463_Island_Perimeter_easy/solution.py"},"max_stars_repo_name":{"kind":"string","value":"timshenkao/interview_coding_exercises"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168269"},"content":{"kind":"string","value":"# Copyright (c) 2021 - present, \n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\nfrom typing import List\n\n# 463. Island Perimeter https://leetcode.com/problems/island-perimeter/\n# You are given row x col grid representing a map where grid[i][j] = 1 represents land and\n# grid[i][j] = 0 represents water.\n# Grid cells are connected horizontally/vertically (not diagonally). The grid is completely surrounded by water,\n# and there is exactly one island (i.e., one or more connected land cells).\n# The island doesn't have \"lakes\", meaning the water inside isn't connected to the water around the island.\n# One cell is a square with side length 1. The grid is rectangular, width and height don't exceed 100.\n# Determine the perimeter of the island.\n\n\nclass Solution:\n def island_perimeter(self, grid: List[List[int]]) -> int:\n \"\"\" Time complexity: O(nm). n - number of rows, m - number of columns\n Space complexity: O(1).\n \"\"\"\n perimeter = 0\n num_rows = len(grid)\n num_columns = len(grid[0])\n # iterate row-by-row\n for row in range(num_rows):\n # iterate column-by-column within row\n for col in range(num_columns):\n # current cell's perimeter\n curr_cell_perimeter = 0\n # if cell is \"land\", we should consider its borders and calculate perimeter per the cell\n if grid[row][col] == 1:\n # if current cell is on the left border, increase its perimeter\n if col == 0:\n curr_cell_perimeter += 1\n else:\n # otherwise, if cell to the left is \"water\", increase current cell's perimeter\n if grid[row][col - 1] == 0:\n curr_cell_perimeter += 1\n # if current cell is on the right border, increase its perimeter\n if col == (num_columns - 1):\n curr_cell_perimeter += 1\n else:\n # otherwise, if cell to the right is \"water\", increase current cell's perimeter\n if grid[row][col + 1] == 0:\n curr_cell_perimeter += 1\n # if current cell is on the top border, increase its perimeter\n if row == 0:\n curr_cell_perimeter += 1\n else:\n # otherwise, if cell to the top is \"water\", increase current cell's perimeter\n if grid[row - 1][col] == 0:\n curr_cell_perimeter += 1\n\n # if current cell is on the bottom border, increase its perimeter\n if row == (num_rows - 1):\n curr_cell_perimeter += 1\n else:\n # otherwise, if cell to the bottom is \"water\", increase current cell's perimeter\n if grid[row + 1][col] == 0:\n curr_cell_perimeter += 1\n perimeter += curr_cell_perimeter\n return perimeter\n\n def island_perimeter_optimized(self, grid: List[List[int]]) -> int:\n \"\"\" Time complexity: O(nm). n - number of rows, m - number of columns\n Space complexity: O(1).\n \"\"\"\n perimeter = 0\n num_rows = len(grid)\n num_columns = len(grid[0])\n\n # iterate row-by-row\n for row in range(num_rows):\n # iterate column-by-column within row\n for col in range(num_columns):\n # if cell is \"land\", it may have maximum perimeter 4\n if grid[row][col] == 1:\n perimeter += 4\n # if current cell is not on the top border and cell to the top is \"land\",\n # decrease current cell's perimeter by 1 and top cell's perimeter by 1\n if row > 0 and grid[row - 1][col] == 1:\n perimeter -= 2\n # if current cell is not on the left border and cell to the left is \"land\",\n # decrease current cell's perimeter by 1 and right cell's perimeter by 1\n if col > 0 and grid[row][col - 1] == 1:\n perimeter -= 2\n return perimeter\n"},"size":{"kind":"number","value":4921,"string":"4,921"}}},{"rowIdx":126468,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"whyisyoung/CADE"},"max_stars_count":{"kind":"number","value":68,"string":"68"},"id":{"kind":"string","value":"2169895"},"content":{"kind":"string","value":"# from distutils.core import setup\nfrom setuptools import setup\n\n_dependencies = [\n 'numpy<=1.16.1,>=1.14.5',\n 'scipy<=1.3.3,>=1.1.0',\n 'scikit-learn<=0.23.2,>=0.21.3',\n 'matplotlib==3.1.2',\n 'Keras==2.2.5',\n 'seaborn<=0.11.0,>=0.9.0',\n 'tqdm<=4.49.0,>=4.35.0',\n 'pyparsing<=2.4.7,>=2.4.2'\n]\n\nsetup(\n name='cade',\n version='1.0',\n description='CADE: A library for detecting drifting sample using contrastive autoencoder.',\n maintainer='',\n maintainer_email='',\n url='https://github.com/whyisyoung/CADE',\n packages=['cade'],\n setup_requires=_dependencies,\n install_requires=_dependencies,\n extras_require={\n \"tf\": [\"tensorflow==1.10.0\"],\n \"tf_gpu\": [\"tensorflow-gpu==1.12.0\"],\n }\n)\n"},"size":{"kind":"number","value":765,"string":"765"}}},{"rowIdx":126469,"cells":{"max_stars_repo_path":{"kind":"string","value":"Algorithms/Easy/811. Subdomain Visit Count/answer.py"},"max_stars_repo_name":{"kind":"string","value":"KenWoo/Algorithm"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170252"},"content":{"kind":"string","value":"from typing import List\n\n\nclass Solution:\n def subdomainVisits(self, cpdomains: List[str]) -> List[str]:\n dict = {}\n for c in cpdomains:\n arr = c.split()\n count = int(arr[0])\n s = arr[1]\n dict.setdefault(s, 0)\n dict[s] += count\n p = s.find('.')\n while p != -1:\n s = s[p+1:]\n dict.setdefault(s, 0)\n dict[s] += count\n p = s.find('.')\n res = []\n for k, v in dict.items():\n res.append(f'{v} {k}')\n return res\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.subdomainVisits(\n [\"900 google.mail.com\", \"50 yahoo.com\", \"1 intel.mail.com\", \"5 wiki.org\"])\n print(result)\n"},"size":{"kind":"number","value":772,"string":"772"}}},{"rowIdx":126470,"cells":{"max_stars_repo_path":{"kind":"string","value":"components/handlers/module_edit_preclusions.py"},"max_stars_repo_name":{"kind":"string","value":"nus-mtp/another-cs-study-planner"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170368"},"content":{"kind":"string","value":"'''\n This module contains the handler for web requests pertaining to\n the editing of a module's preclusions.\n'''\n\n\nimport json\nfrom app import RENDER\nimport web\nfrom components import model, session\n\n\nclass EditModulePreclusions(object):\n '''\n This class handles the editing of a module's preclusions.\n '''\n def GET(self):\n '''\n Handles the loading of the 'Edit Module Preclusions' page.\n '''\n web.header('X-Frame-Options', 'SAMEORIGIN')\n web.header('X-Content-Type-Options', 'nosniff')\n web.header('X-XSS-Protection', '1')\n if not session.validate_session():\n raise web.seeother('/login')\n else:\n input_data = model.validate_input(web.input(), [\"code\"])\n module_code = input_data.code.upper()\n\n preclusions = model.get_preclusion_units(module_code)\n return RENDER.moduleEditPreclusion(module_code, preclusions)\n\n\n def POST(self):\n '''\n Handles the submission of updated module preclusions\n for a target module.\n '''\n web.header('X-Frame-Options', 'SAMEORIGIN')\n web.header('X-Content-Type-Options', 'nosniff')\n web.header('X-XSS-Protection', '1')\n isSucessfullyUpdated = False\n\n input_data = model.validate_input(web.input(), [\"code\"], show_404=False)\n\n if input_data:\n module_code = input_data.code.upper()\n preclusions = json.loads(input_data.preclusions)\n isSucessfullyUpdated = model.edit_preclusion(module_code, preclusions)\n\n new_preclusions = model.get_preclusion_as_string(module_code)\n response = [isSucessfullyUpdated, new_preclusions]\n\n return json.dumps(response)\n"},"size":{"kind":"number","value":1749,"string":"1,749"}}},{"rowIdx":126471,"cells":{"max_stars_repo_path":{"kind":"string","value":"speech_synthesis.py"},"max_stars_repo_name":{"kind":"string","value":"Blackerrr/Speech-synthesis"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169928"},"content":{"kind":"string","value":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication\nfrom aip import AipSpeech\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nimport sys, threading, helloworld\n\n\nclass ExampleApp(QtWidgets.QWidget, helloworld.Ui_Form):\n def __init__(self, filename, client, parent=None):\n super(ExampleApp, self).__init__(parent)\n self.filename = filename\n self.client = client\n self.setupUi(self)\n self.pushButton.clicked.connect(self.button_clicked)\n self.Index = [5003, 5118, 106, 110, 111, 103, 5]\n self.textEdit.setText(\"我叫李明,来自重庆交通大学,学号是631807030121\")\n\n def play_music(self):\n song = AudioSegment.from_mp3(self.filename)\n play(song)\n\n def button_clicked(self):\n print(\"当前发音人:\", self.comboBox.currentText())\n print(\"当前发音人序号:\", self.comboBox.currentIndex())\n\n print(self.textEdit.toPlainText())\n\n # 处理文字\n txt = self.textEdit.toPlainText()\n new_txt = \"\"\n for i in txt:\n if '0' <= i <= '9':\n new_txt = new_txt + i\n new_txt = new_txt + ' '\n else:\n new_txt = new_txt + i\n print(new_txt)\n\n result = self.client.synthesis(new_txt, 'zh', '1',\n {\"vol\": 9, # 音量 0-16 默认 5\n \"spd\": 4, # 语速 0-9 默认 5\n \"pit\": 5, # 音调 0-9 默认 5\n \"per\": self.Index[self.comboBox.currentIndex()],\n # 普通发音人选择 度小美=0(默认),度小宇=1,,度逍遥(基础)=3,度丫丫=4\n # 精品发音人选择:度逍遥(精品)=5003,度小鹿 = 5118,度博文 = 106,\n # 度小童 = 110,度小萌 = 111,度米朵 = 103,度小娇 = 5\n })\n\n with open(self.filename, \"wb\") as f:\n f.write(result)\n t = threading.Thread(target=self.play_music, )\n t.start()\n\n\ndef main():\n APP_ID = 'your id'\n API_KEY = 'your key'\n SECRET_KEY = 'your secret key '\n\n client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\n outfile = \"./audio/speak.mp3\"\n app = QApplication(sys.argv)\n form = ExampleApp(outfile, client)\n form.show()\n app.exec_()\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":2329,"string":"2,329"}}},{"rowIdx":126472,"cells":{"max_stars_repo_path":{"kind":"string","value":"collector/zmqrouter/status_monitor.py"},"max_stars_repo_name":{"kind":"string","value":"idekerlab/ci-service-template"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2169680"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport zmq\nimport logging\nimport redis\n\nREDIS_PORT = 6379\nPULL_PORT = 6666 # For getting status message from other components\n\nTAG_STATUS = 'status'\nTAG_JOB_ID = 'job_id'\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass StatusMonitor():\n\n def __init__(self, pull=PULL_PORT, redisp=REDIS_PORT):\n # Prepare queue\n context = zmq.Context()\n\n # Socket to receive status messages on\n self.__receiver = context.socket(zmq.PULL)\n self.__receiver.bind(\"tcp://*:\" + str(pull))\n\n # Connection to Redis server - host will be given from Docker-compose\n self.__redis_connection = redis.Redis(host='redis', port=redisp, db=0)\n\n def __set_status(self, message):\n job_id = message[TAG_JOB_ID]\n status = message[TAG_STATUS]\n self.__redis_connection.hset(name=TAG_STATUS, key=job_id, value=status)\n\n test = self.__redis_connection.hget(name=TAG_STATUS, key=job_id)\n logging.info('Job status updated: ' + str(test))\n\n def listen(self):\n logging.info('# Status Monitor starts: ' + str())\n\n while True:\n # Process any waiting tasks\n s = self.__receiver.recv_json()\n self.__set_status(s)\n\n\nif __name__ == '__main__':\n status_monitor = StatusMonitor()\n status_monitor.listen()\n"},"size":{"kind":"number","value":1324,"string":"1,324"}}},{"rowIdx":126473,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/data_utils/common.py"},"max_stars_repo_name":{"kind":"string","value":"maximecharpentierdata/image-captioning"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2169659"},"content":{"kind":"string","value":"import string\nimport os\nimport collections\nfrom tqdm import tqdm\nimport numpy as np\n\nSTART_TOKEN = \"startseq\"\nEND_TOKEN = \"seq\"\nUNKNOWN_TOKEN = \"tok\"\n\n\ndef _lower_and_clean_captions(captions):\n \"\"\"Lower case and delete punctuation\"\"\"\n table = str.maketrans(\"\", \"\", string.punctuation)\n for _, caption_list in captions.items():\n for i in range(len(caption_list)):\n caption = caption_list[i]\n caption = caption.split()\n caption = [word.lower() for word in caption]\n caption = [w.translate(table) for w in caption]\n caption_list[i] = \" \".join(caption)\n return captions\n\n\ndef encode_images(image_ids, images_path, id_to_filename=None):\n import tensorflow as tf\n\n # Load pretrained InceptionV3 CNN model\n model = tf.keras.applications.inception_v3.InceptionV3(weights=\"imagenet\")\n model_new = tf.keras.models.Model(model.input, model.layers[-2].output)\n\n # Function for resizing and preprocessing images\n def preprocess(image_path):\n img = tf.keras.preprocessing.image.load_img(image_path, target_size=(299, 299))\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = tf.keras.applications.inception_v3.preprocess_input(x)\n return x\n\n # Function for computing features\n def encode(image):\n image = preprocess(image)\n fea_vec = model_new.predict(image)\n fea_vec = np.reshape(fea_vec, fea_vec.shape[1])\n return fea_vec\n\n features = dict()\n if id_to_filename is None:\n id_to_filename = {image_id: str(image_id) + \".jpg\"}\n for image_id in tqdm(image_ids):\n image_path = os.path.join(images_path, id_to_filename[image_id])\n if os.path.exists(image_path):\n features[image_id] = encode(image_path)\n\n return features\n\n\ndef indexate_captions(captions, word_to_index):\n res = collections.defaultdict(list)\n for key, caption_list in captions.items():\n for caption in caption_list:\n tokens = [\n word if word in word_to_index else UNKNOWN_TOKEN\n for word in caption.split()\n ]\n tokens.insert(0, START_TOKEN)\n tokens.append(END_TOKEN)\n indexed_caption = [word_to_index[word] for word in tokens]\n res[key].append(indexed_caption)\n return res\n\n\ndef decode_caption(encoded_caption, index_to_word):\n return \" \".join([index_to_word[idx] for idx in encoded_caption])\n"},"size":{"kind":"number","value":2516,"string":"2,516"}}},{"rowIdx":126474,"cells":{"max_stars_repo_path":{"kind":"string","value":"store_item_models/store_items/admin.py"},"max_stars_repo_name":{"kind":"string","value":"reimibeta/django-store-item-models"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169057"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom django_admin_listfilter_dropdown.filters import DropdownFilter\nfrom django_image.renders.render_image import render_image\n\nfrom store_item_models.store_items.class_models.store_item import StoreItem\nfrom store_item_models.store_items.class_models.store_item_image import StoreItemImage\n\n\nclass StoreItemImageAdminInline(admin.TabularInline):\n model = StoreItemImage\n extra = 0\n\n\n# store item\n\nclass StoreItemAdmin(admin.ModelAdmin):\n list_display = [\n 'id',\n 'item_image',\n 'name',\n 'is_active'\n ]\n list_display_links = ['name', 'item_image', ]\n list_per_page = 25\n search_fields = [\n 'name'\n ]\n\n def item_image(self, obj):\n image = StoreItemImage.objects.filter(item=obj.id).first()\n return render_image.render(image.thumbnail.url) if image is not None else \"Not provide\"\n\n list_filter = (\n # for ordinary fields\n ('name', DropdownFilter),\n # ('available', DropdownFilter),\n # for choice fields\n # ('a_choicefield', ChoiceDropdownFilter),\n # for related fields\n # ('product_material__material', RelatedDropdownFilter),\n )\n\n inlines = [\n StoreItemImageAdminInline\n ]\n\n\nadmin.site.register(StoreItem, StoreItemAdmin)\n"},"size":{"kind":"number","value":1298,"string":"1,298"}}},{"rowIdx":126475,"cells":{"max_stars_repo_path":{"kind":"string","value":"code/python/echomesh/command/GetConfig.py"},"max_stars_repo_name":{"kind":"string","value":"silky/echomesh"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2169933"},"content":{"kind":"string","value":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom echomesh.base import Config\nfrom echomesh.base import Join\nfrom echomesh.command import REGISTRY\nfrom echomesh.util import Log\n\nLOGGER = Log.logger(__name__)\n\nGET_ARGUMENT_ERROR = \"\"\"\n\"get\" needs one or more arguments.\n\nExample:\n get audio.input.enable\n\"\"\"\n\ndef _route_items(items, successes, failures):\n for v in items:\n try:\n successes.append([v, Config.get(*v.split('.'))])\n except:\n failures.append(v)\n\ndef get_config(_, *items):\n failures = []\n if items:\n successes = []\n for i in items:\n parts = i.split('.')\n try:\n value = Config.get(*parts)\n except:\n failures.append(i)\n else:\n successes.append([i, value])\n else:\n assignments = Config.assignments().items()\n successes = [('.'.join(s), v) for s, v in assignments]\n\n if successes or failures:\n for value, result in successes:\n LOGGER.info('%s=%s', value, result)\n if failures:\n LOGGER.error('Didn\\'t understand %s', Join.join_words(failures))\n LOGGER.info('')\n else:\n LOGGER.info('No configuration variables have been set.\\n')\n\nGET_HELP = \"\"\"\n Prints one or more configuration variables.\n\nExamples:\n config.get speed\n config.get audio.input.enabled audio.output.enabled\n\"\"\"\n\nREGISTRY.register(get_config, 'get', GET_HELP)\n"},"size":{"kind":"number","value":1369,"string":"1,369"}}},{"rowIdx":126476,"cells":{"max_stars_repo_path":{"kind":"string","value":"main.py"},"max_stars_repo_name":{"kind":"string","value":"Jcollier722/PSS"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170276"},"content":{"kind":"string","value":"import read_simulation as read\r\nimport run_simulation as sim\r\nimport tkinter as tk\r\nimport write_results as wr\r\nimport os\r\nfrom tkinter import messagebox\r\nfrom tkinter.filedialog import askopenfile\r\nfrom tkinter.filedialog import asksaveasfile\r\nBG = '#b4cffa'\r\n\r\nclass GUI(tk.Frame):\r\n def __init__(self, root):\r\n tk.Frame.__init__(self, root)\r\n\r\n self.input = None\r\n self.output = None\r\n \r\n #title\r\n title = tk.Label(root,text='CPU Scheduler', font='arial 25 bold underline',bg=BG).place(relx=0.25,rely=0.05)\r\n\r\n #input prompt\r\n input_prompt = tk.Label(root,text='Select input file:',font='arial 15 bold',bg=BG).place(relx=0.02,rely=0.25)\r\n\r\n #input field and text var\r\n self.input_var = tk.StringVar()\r\n input_field = tk.Entry(root,width=30,textvariable=self.input_var).place(relx=0.38,rely=0.26)\r\n\r\n #input button\r\n input_button = tk.Button(root,text='Browse',command=self.get_input).place(relx=0.76,rely=0.25)\r\n\r\n #output prompt\r\n output_prompt = tk.Label(root,text='Select output file:',font='arial 15 bold',bg=BG).place(relx=0.02,rely=0.35)\r\n \r\n #output field and var\r\n self.output_var = tk.StringVar()\r\n output_field = tk.Entry(root,width=30,textvariable=self.output_var).place(relx=0.38,rely=0.36)\r\n\r\n #output button\r\n output_button = tk.Button(root,text='Browse',command=self.get_output).place(relx=0.76,rely=0.36)\r\n\r\n #submit subtton\r\n submit_button = tk.Button(root,text='Submit',height=3,width=35,bg='#d9fcec',command=self.run_simulation).place(relx=0.25,rely=0.55)\r\n\r\n def get_input(self):\r\n self.input = askopenfile(filetypes =[('Spreadsheet', '*.xlsx')])\r\n self.input_var.set(self.input.name)\r\n messagebox.showinfo('Success','File loaded')\r\n\r\n def get_output(self):\r\n files = [('Word', '*.docx')]\r\n self.output = asksaveasfile(filetypes = files,defaultextension=files)\r\n self.output_var.set(self.output.name)\r\n messagebox.showinfo('Success','Save location entered')\r\n \r\n def run_simulation(self):\r\n\r\n #get list of job from user\r\n job_list = read.read_spreadsheet(self.input.name)\r\n\r\n sjn = sim.sjn(job_list)\r\n fcfs = sim.fcfs(job_list)\r\n srt= sim.srt(job_list)\r\n rr= sim.round_robin(job_list)\r\n \r\n wr.export(self.output.name,sjn,fcfs,srt,rr)\r\n \r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n root.title(\"Page Removal Simulator\")\r\n root.resizable(width=False, height=False)\r\n root.geometry('500x300')\r\n root.config(bg=BG)\r\n my_gui = GUI(root)\r\n root.mainloop()\r\n \r\n \"\"\"\r\n job_list = read.read_spreadsheet('simulation.xlsx')\r\n \r\n sjn = sim.sjn(job_list)\r\n print(\"Wait: \" + str(sjn[2]) + \" Turn: \" + str(sjn[1]))\r\n\r\n fcfs = sim.fcfs(job_list)\r\n print(\"Turn: \" + str(fcfs[1]) + \" Wait: \" + str(fcfs[2]))\r\n \r\n srt= sim.srt(job_list)\r\n print(\"Turn: \" + str(srt[1]) + \" Wait: \" + str(srt[2]))\r\n \r\n rr= sim.round_robin(job_list)\r\n print(\"Turn: \" + str(rr[1]) + \" Wait: \" + str(rr[2]))\r\n \r\n \"\"\"\r\n \r\n\r\n\r\n"},"size":{"kind":"number","value":3177,"string":"3,177"}}},{"rowIdx":126477,"cells":{"max_stars_repo_path":{"kind":"string","value":"kernel/security/protol/spdz/tensor/base.py"},"max_stars_repo_name":{"kind":"string","value":"rinceyuan/WeFe"},"max_stars_count":{"kind":"number","value":39,"string":"39"},"id":{"kind":"string","value":"2170111"},"content":{"kind":"string","value":"import abc\n\nfrom kernel.security.protol.spdz.utils import NamingService\n\n\nclass TensorBase(object):\n __array_ufunc__ = None\n\n def __init__(self, q_field, tensor_name: str = None):\n self.q_field = q_field\n self.tensor_name = NamingService.get_instance().next() if tensor_name is None else tensor_name\n\n @classmethod\n def get_spdz(cls):\n from kernel.security.protol.spdz import SPDZ\n return SPDZ.get_instance()\n\n @abc.abstractmethod\n def dot(self, other, target_name=None):\n pass\n"},"size":{"kind":"number","value":531,"string":"531"}}},{"rowIdx":126478,"cells":{"max_stars_repo_path":{"kind":"string","value":"io_base/pickle_io.py"},"max_stars_repo_name":{"kind":"string","value":"miguelgfierro/pybase"},"max_stars_count":{"kind":"number","value":14,"string":"14"},"id":{"kind":"string","value":"2169151"},"content":{"kind":"string","value":"import pickle\n\n\ndef save_file(data, filename):\n \"\"\"Save data as pickle. The standard pickle file name is ``*.pk``.\n \n See a `benchmark on IO performance `_\n\n Args:\n data (np.array or dict): Data to save.\n filename (str): Name of the file.\n \n Examples:\n >>> data = np.ones(5)\n >>> save_file(data, 'file.pk')\n >>> os.path.isfile('file.pk')\n True\n >>> os.remove('file.pk')\n >>> os.path.isfile('file.pk')\n False\n \"\"\"\n pickle.dump(data, open(filename, \"wb\"))\n\n\ndef read_file(filename):\n \"\"\"Read a pickle file.\n \n Args:\n filename (str): Name of the file.\n \n Returns:\n np.array or dict: Data to read.\n \n Examples:\n >>> read_file('share/data.pk')\n array([1., 1., 1., 1., 1.])\n\n \"\"\"\n data = pickle.load(open(filename, \"rb\"))\n return data\n\n"},"size":{"kind":"number","value":920,"string":"920"}}},{"rowIdx":126479,"cells":{"max_stars_repo_path":{"kind":"string","value":"conversationinsights-mynlu/mynlu/pipeline/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"osswangxining/iot-app-enabler-conversation"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2169654"},"content":{"kind":"string","value":"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nfrom collections import defaultdict\nimport importlib\nimport pkg_resources\nimport typing\nfrom builtins import object\nimport inspect\n\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nfrom typing import Text\nfrom typing import Tuple\n\nlogger = logging.getLogger(__name__)\n\n\ndef validate_requirements(plugins, dev_requirements_file=\"dev-requirements.txt\"):\n # type: (List[Text], Text) -> None\n \"\"\"Ensures that all required python packages are installed to instantiate and used the passed plugins.\"\"\"\n from mynlu import registry\n\n failed_imports = set()\n for plugin_name in plugins:\n plugin = registry.get_plugin(plugin_name)\n failed_imports.update(_find_unavailable_packages(plugin.required_packages()))\n if failed_imports:\n # if available, use the development file to figure out the correct version numbers for each requirement\n all_requirements = _read_dev_requirements(dev_requirements_file)\n if all_requirements:\n missing_requirements = [r for i in failed_imports for r in all_requirements[i]]\n raise Exception(\"Not all required packages are installed. \" +\n \"Failed to find the following imports {}. \".format(\", \".join(failed_imports)) +\n \"To use this pipeline, you need to install the missing dependencies, including:\\n\\t\" +\n \"{}\".format(\" \".join(missing_requirements)))\n else:\n raise Exception(\"Not all required packages are installed. \" +\n \"To use this pipeline, you need to install the missing dependencies. \" +\n \"Please install {}\".format(\", \".join(failed_imports)))\n\n\ndef _find_unavailable_packages(package_names):\n # type: (List[Text]) -> Set[Text]\n\n failed_imports = set()\n for package in package_names:\n try:\n importlib.import_module(package)\n except ImportError:\n failed_imports.add(package)\n return failed_imports\n\n\ndef _read_dev_requirements(file_name):\n try:\n req_lines = pkg_resources.resource_string(\"mynlu\", \"../\" + file_name).split(\"\\n\")\n except Exception as e:\n logger.info(\"Couldn't read dev-requirements.txt. Error: {}\".format(e))\n req_lines = []\n return _requirements_from_lines(req_lines)\n\n\ndef _requirements_from_lines(req_lines):\n requirements = defaultdict(list)\n current_name = None\n for req_line in req_lines:\n if req_line.startswith(\"#\"):\n current_name = req_line[1:].strip(' \\n')\n elif current_name is not None:\n requirements[current_name].append(req_line.strip(' \\n'))\n return requirements\n\n\ndef validate_arguments(pipeline, context, allow_empty_pipeline=False):\n # type: (List[Component], Dict[Text, Any], bool) -> None\n \"\"\"Validates a pipeline before it is run. Ensures, that all arguments are present to train the pipeline.\"\"\"\n\n # Ensure the pipeline is not empty\n if not allow_empty_pipeline and len(pipeline) == 0:\n raise ValueError(\"Can not train an empty pipeline. \" +\n \"Make sure to specify a proper pipeline in the configuration using the `pipeline` key.\" +\n \"The `backend` configuration key is NOT supported anymore.\")\n\n provided_properties = set(context.keys())\n\n for plugin in pipeline:\n for req in plugin.requires:\n if req not in provided_properties:\n raise Exception(\"Failed to validate at plugin '{}'. Missing property: '{}'\".format(\n plugin.name, req))\n provided_properties.update(plugin.provides)\n\n\nclass MissingArgumentError(ValueError):\n \"\"\"Raised when a function is called and not all parameters can be filled from the context / config.\n\n Attributes:\n message -- explanation of which parameter is missing\n \"\"\"\n\n def __init__(self, message):\n # type: (Text) -> None\n super(MissingArgumentError, self).__init__(message)\n self.message = message\n\n def __str__(self):\n return self.message\n"},"size":{"kind":"number","value":4317,"string":"4,317"}}},{"rowIdx":126480,"cells":{"max_stars_repo_path":{"kind":"string","value":"tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_bedeaebf.py"},"max_stars_repo_name":{"kind":"string","value":"eduardojdiniz/CompNeuro"},"max_stars_count":{"kind":"number","value":2294,"string":"2,294"},"id":{"kind":"string","value":"2170101"},"content":{"kind":"string","value":"\n# Set random seed\nnp.random.seed(0)\n\n# Set parameters\nT = 50 # Time duration\ntau = 25 # dynamics time constant\nprocess_noise = 2 # process noise in Astrocat's propulsion unit (standard deviation)\nmeasurement_noise = 9 # measurement noise in Astrocat's collar (standard deviation)\n\n# Auxiliary variables\nprocess_noise_cov = process_noise**2 # process noise in Astrocat's propulsion unit (variance)\nmeasurement_noise_cov = measurement_noise**2 # measurement noise in Astrocat's collar (variance)\n\n# Initialize arrays\nt = np.arange(0, T, 1) # timeline\ns = np.zeros(T) # states\nD = np.exp(-1/tau) # dynamics multiplier (matrix if s is vector)\n\nm = np.zeros(T) # measurement\ns_ = np.zeros(T) # estimate (posterior mean)\ncov_ = np.zeros(T) # uncertainty (posterior covariance)\n\n# Initial guess of the posterior at time 0\ninitial_guess = gaussian(0, process_noise_cov/(1-D**2)) # In this case, the initial guess (posterior distribution\n # at time 0) is the equilibrium distribution, but feel free to\n # experiment with other gaussians\nposterior = initial_guess\n\n# Sample initial conditions\ns[0] = posterior.mean + np.sqrt(posterior.cov) * np.random.randn() # Sample initial condition from posterior distribution at time 0\ns_[0] = posterior.mean\ncov_[0] = posterior.cov\n\n# Loop over steps\nfor i in range(1, T):\n\n # Sample true states and corresponding measurements\n s[i] = D * s[i-1] + np.random.normal(0, process_noise) # variable `s` records the true position of Astrocat\n m[i] = s[i] + np.random.normal(0, measurement_noise) # variable `m` records the measurements of Astrocat's collar\n\n # Step 1. Shift yesterday's posterior to match the deterministic change of the system's dynamics,\n # and broad it to account for the random change (i.e., add mean and variance of process noise).\n todays_prior = gaussian(D * posterior.mean, D**2 * posterior.cov + process_noise_cov)\n\n # Step 2. Now that yesterday's posterior has become today's prior, integrate new evidence\n # (i.e., multiply gaussians from today's prior and likelihood)\n likelihood = gaussian(m[i], measurement_noise_cov)\n\n # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood\n info_prior = 1/todays_prior.cov\n info_likelihood = 1/likelihood.cov\n info_posterior = info_prior + info_likelihood\n\n # Step 2b: To find the posterior mean, calculate a weighted average of means from prior and likelihood;\n # the weights are just the fraction of information that each gaussian provides!\n prior_weight = info_prior / info_posterior\n likelihood_weight = info_likelihood / info_posterior\n posterior_mean = prior_weight * todays_prior.mean + likelihood_weight * likelihood.mean\n\n # Don't forget to convert back posterior information to posterior variance!\n posterior_cov = 1/info_posterior\n posterior = gaussian(posterior_mean, posterior_cov)\n\n s_[i] = posterior.mean\n cov_[i] = posterior.cov\n\n# Visualize\nwith plt.xkcd():\n paintMyFilter(D, initial_guess, process_noise_cov, measurement_noise_cov, s, m, s_, cov_)"},"size":{"kind":"number","value":3322,"string":"3,322"}}},{"rowIdx":126481,"cells":{"max_stars_repo_path":{"kind":"string","value":"test/module_tests/mocks/dynamic_system_mock.py"},"max_stars_repo_name":{"kind":"string","value":"RolandoAndrade/general-simulation-framework"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170258"},"content":{"kind":"string","value":"from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem\n\n\nclass DynamicSystemMock(DiscreteEventDynamicSystem):\n \"\"\"Discrete event dynamic system for testing\"\"\"\n\n def __init__(self, scheduler=None):\n \"\"\"Constructs the dynamic system\"\"\"\n DiscreteEventDynamicSystem.__init__(self, scheduler)\n"},"size":{"kind":"number","value":327,"string":"327"}}},{"rowIdx":126482,"cells":{"max_stars_repo_path":{"kind":"string","value":"pyaff4/logical.py"},"max_stars_repo_name":{"kind":"string","value":"Enqueuing/pyaff4"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170179"},"content":{"kind":"string","value":"# Copyright 2016-2018 Pty Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport os\nimport platform\nfrom pyaff4 import lexicon, rdfvalue\nimport tzlocal\nimport pytz\nfrom datetime import datetime\nfrom dateutil.parser import parse\nimport traceback\n\nif platform.system() == \"Linux\":\n from pyaff4 import statx\n\nclass FSMetadata(object):\n def __init__(self, urn, name, length):\n self.name = name\n self.length = length\n self.urn = urn\n\n def store(self, resolver):\n resolver.Set(self.urn, rdfvalue.URN(lexicon.size), rdfvalue.XSDInteger(self.length))\n resolver.Set(self.urn, rdfvalue.URN(lexicon.name), rdfvalue.XSDInteger(self.name))\n\n @staticmethod\n def createFromTarInfo(filename, tarinfo):\n size = tarinfo.size\n local_tz = tzlocal.get_localzone()\n lastWritten = datetime.fromtimestamp(tarinfo.mtime, local_tz)\n accessed = datetime.fromtimestamp(int(tarinfo.pax_headers[\"atime\"]), local_tz)\n recordChanged = datetime.fromtimestamp(int(tarinfo.pax_headers[\"ctime\"]), local_tz)\n # addedDate ?? todo\n return UnixMetadata(filename, filename, size, lastWritten, accessed, recordChanged)\n\n @staticmethod\n def createFromSFTPAttr(filename, attr):\n size = attr.st_size\n local_tz = tzlocal.get_localzone()\n lastWritten = datetime.fromtimestamp(attr.st_mtime, local_tz)\n accessed = datetime.fromtimestamp(attr.st_atime, local_tz)\n #recordChanged = datetime.fromtimestamp(attr.st_ctime, local_tz)\n # addedDate ?? todo\n return UnixMetadata(filename, filename, size, lastWritten, accessed, 0)\n\n @staticmethod\n def create(filename):\n s = os.stat(filename)\n p = platform.system()\n local_tz = tzlocal.get_localzone()\n\n if p == \"Windows\":\n size = s.st_size\n birthTime = datetime.fromtimestamp(s.st_ctime, local_tz)\n lastWritten = datetime.fromtimestamp(s.st_mtime, local_tz)\n accessed = datetime.fromtimestamp(s.st_atime, local_tz)\n\n return WindowsFSMetadata(filename, filename, size, lastWritten, accessed, birthTime)\n elif p == \"Darwin\":\n # https://forensic4cast.com/2016/10/macos-file-movements/\n size = s.st_size\n birthTime = datetime.fromtimestamp(s.st_birthtime, local_tz)\n lastWritten = datetime.fromtimestamp(s.st_mtime, local_tz)\n accessed = datetime.fromtimestamp(s.st_atime, local_tz)\n recordChanged = datetime.fromtimestamp(s.st_ctime, local_tz)\n # addedDate ?? todo\n return MacOSFSMetadata(filename, filename, size, lastWritten, accessed, recordChanged, birthTime)\n elif p == \"Linux\":\n size = s.st_size\n # TODO: birthTime\n lastWritten = datetime.fromtimestamp(s.st_mtime, local_tz)\n accessed = datetime.fromtimestamp(s.st_atime, local_tz)\n recordChanged = datetime.fromtimestamp(s.st_ctime, local_tz)\n\n sx = statx.statx(filename)\n birthTime = datetime.fromtimestamp(sx.get_btime(), local_tz)\n return LinuxFSMetadata(filename, filename, size, lastWritten, accessed, recordChanged, birthTime)\n\nclass ClassicUnixMetadata(FSMetadata):\n def __init__(self, urn, name, size, lastWritten, lastAccessed, recordChanged):\n super(ClassicUnixMetadata, self).__init__(urn, name, size)\n self.lastWritten = lastWritten\n self.lastAccessed = lastAccessed\n self.recordChanged = recordChanged\n\n\n def store(self, resolver):\n resolver.Set(self.urn, self.urn, rdfvalue.URN(lexicon.AFF4_STREAM_SIZE), rdfvalue.XSDInteger(self.length))\n resolver.Set(self.urn, self.urn, rdfvalue.URN(lexicon.standard11.lastWritten), rdfvalue.XSDDateTime(self.lastWritten))\n resolver.Set(self.urn, self.urn, rdfvalue.URN(lexicon.standard11.lastAccessed), rdfvalue.XSDDateTime(self.lastAccessed))\n resolver.Set(self.urn, self.urn, rdfvalue.URN(lexicon.standard11.recordChanged), rdfvalue.XSDDateTime(self.recordChanged))\n\nclass ModernUnixMetadata(ClassicUnixMetadata):\n def __init__(self, urn, name, size, lastWritten, lastAccessed, recordChanged, birthTime):\n super(ModernUnixMetadata, self).__init__(urn, name, size, lastWritten, lastAccessed, recordChanged)\n self.birthTime = birthTime\n\n def store(self, resolver):\n super(ModernUnixMetadata, self).store(resolver)\n resolver.Set(self.urn, self.urn, rdfvalue.URN(lexicon.standard11.birthTime), rdfvalue.XSDDateTime(self.birthTime))\n\nclass LinuxFSMetadata(ModernUnixMetadata):\n def __init__(self, urn, name, size, lastWritten, lastAccessed, recordChanged, birthTime):\n super(LinuxFSMetadata, self).__init__(urn, name, size, lastWritten, lastAccessed, recordChanged, birthTime)\n\n\nclass MacOSFSMetadata(ModernUnixMetadata):\n def __init__(self, urn, name, size, lastWritten, lastAccessed, recordChanged, birthTime):\n super(MacOSFSMetadata, self).__init__(urn, name, size, lastWritten, lastAccessed, recordChanged, birthTime)\n\nclass WindowsFSMetadata(FSMetadata):\n def __init__(self, urn, name, size, lastWritten, lastAccessed, birthTime):\n super(WindowsFSMetadata, self).__init__(urn, name, size)\n self.lastWritten = lastWritten\n self.lastAccessed = lastAccessed\n self.birthTime = birthTime\n\n def store(self, resolver):\n resolver.Set(self.urn, rdfvalue.URN(lexicon.AFF4_STREAM_SIZE), rdfvalue.XSDInteger(self.length))\n resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.lastWritten), rdfvalue.XSDDateTime(self.lastWritten))\n resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.lastAccessed), rdfvalue.XSDDateTime(self.lastAccessed))\n resolver.Set(self.urn, rdfvalue.URN(lexicon.standard11.birthTime), rdfvalue.XSDDateTime(self.birthTime))\n\ndef resetTimestampsPosix(destFile, lastWritten, lastAccessed, recordChanged, birthTime):\n if lastWritten == None or lastAccessed == None:\n return\n try:\n lw = parse(lastWritten.value)\n la = parse(lastAccessed.value)\n os.utime(destFile, ((la - epoch).total_seconds(), (lw - epoch).total_seconds()))\n except Exception:\n traceback.print_exc()\n\n# default implementation does nothing at present on non posix environments\ndef resetTimestampsNone(destFile, lastWritten, lastAccessed, recordChanged, birthTime):\n pass\n\nresetTimestamps = resetTimestampsNone\nepoch = datetime(1970, 1, 1, tzinfo=pytz.utc)\n\np = platform.system()\nif p == \"Darwin\" or p == \"Linux\":\n resetTimestamps = resetTimestampsPosix"},"size":{"kind":"number","value":7124,"string":"7,124"}}},{"rowIdx":126483,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"RJB888/Python_Final"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170383"},"content":{"kind":"string","value":"\"\"\"Set up file, to install this module : pip install -e .\"\"\"\nfrom setuptools import setup\n\nsetup(\n name='Avalara Python SDK',\n package_dir={'': 'src'},\n py_modules=['client', 'transaction_builder', 'client_methods'],\n author=', , , ',\n author_email='',\n description='Avalara Tax Python SDK.',\n install_requires=['requests', 'ipython'],\n extras_require={\n \"test\": [\"pytest\", \"pytest-cov\", \"tox\"]\n })\n"},"size":{"kind":"number","value":464,"string":"464"}}},{"rowIdx":126484,"cells":{"max_stars_repo_path":{"kind":"string","value":"easter_042.py"},"max_stars_repo_name":{"kind":"string","value":"fpicot/adventofcode"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169103"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport re\nimport operator\n\ninput_file = \"easter_04.input\" \n_reg = re.compile('([a-z\\-]*)-([0-9]*)\\[([a-z]{5})\\]')\n\n#Analyse frequentielle\ndef freq(text):\n _d = dict()\n for letter in text:\n if letter not in _d:\n if letter != '-': \n #On multiplie par -1 pour pouvoir trier dans le meme ordre\n _d[letter] = -1 * text.count(letter) \n \n _od = sorted(_d.items(), key = operator.itemgetter(1,0))\n return _od\n\n#Dechiffrement du nom\ndef decypher(id,text):\n _key = list(\"\")\n _result = ''\n for l in text:\n _result += _key[(_key.index(l) + id) % 26] if not l == \"-\" else ' '\n return _result\n \n#Parcours du fichier\nfor line in open(input_file):\n #Extraction des elements\n match = _reg.match(line)\n name = match.group(1)\n id = int(match.group(2))\n checksum = match.group(3)\n \n #Calcul du checksum\n _od = freq(name)\n checksum_calc = '{}{}{}{}{}'.format(_od[0][0],_od[1][0],_od[2][0],_od[3][0],_od[4][0])\n\n #Comparaison \n if checksum_calc == checksum:\n cleartext = decypher(id,name)\n if cleartext == \"northpole object storage\":\n print(\"L'ID est : {}\".format(id))\n\n\n\n\n"},"size":{"kind":"number","value":1167,"string":"1,167"}}},{"rowIdx":126485,"cells":{"max_stars_repo_path":{"kind":"string","value":"library/sensors/Gps.py"},"max_stars_repo_name":{"kind":"string","value":"OpenSpaceProgram/pyOSP"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170263"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom gps import *\n#add the project folder to pythpath\nsys.path.append('../../')\n\nfrom library.components.SensorModule import SensorModule as Sensor\nfrom library.components.MetaData import MetaData as MetaData\n\n\nclass Gps(Sensor):\n session = None\n\n def __init__(self):\n super(Gps, self).__init__()\n # Create the GPSD client instance\n self.session = gps(mode=WATCH_ENABLE)\n\n # Lat\n latitudeMetaData = MetaData(\"Latitude\")\n latitudeMetaData.setUnit(\"Degrees Lat\")\n latitudeMetaData.setValueCallback(self.getLatitude)\n\n self.addMetaData(latitudeMetaData)\n\n # Lon\n logitudeMetaData = MetaData(\"Longitude\")\n logitudeMetaData.setUnit(\"Degrees Lon\")\n logitudeMetaData.setValueCallback(self.getLongitude)\n\n self.addMetaData(logitudeMetaData)\n # UTC\n utcMetaData = MetaData(\"UTC\")\n utcMetaData.setUnit(\" UTC\")\n utcMetaData.setValueCallback(self.getUtc)\n\n self.addMetaData(utcMetaData)\n # Altitude\n altitudeMetaData = MetaData(\"Altitude\")\n altitudeMetaData.setUnit(\"m \")\n altitudeMetaData.setValueCallback(self.getAltitude)\n\n self.addMetaData(altitudeMetaData)\n # Speed\n speedMetaData = MetaData(\"Speed\")\n speedMetaData.setUnit(\"m/s\")\n speedMetaData.setValueCallback(self.getSpeed)\n\n self.addMetaData(speedMetaData)\n\n def getLatitude(self):\n # Loop through the reports from the GPSD client\n for report in self.session:\n # If we have lat as a key in the report\n # we can get the reading for lat!\n if ('lat' in report.keys()):\n return str(report['lat'])\n\n def getLongitude(self):\n # Loop through the reports from the GPSD client\n for report in self.session:\n # If we have lon as a key in the report\n # we can get the reading for lon!\n if ('lon' in report.keys()):\n return str(report['lon'])\n\n def getUtc(self):\n # Loop through the reports from the GPSD client\n for report in self.session:\n # If we have time as a key in the report\n # we can get the reading for time!\n if ('time' in report.keys()):\n return str(report['time'])\n\n def getAltitude(self):\n # Loop through the reports from the GPSD client\n for report in self.session:\n # If we have alt as a key in the report\n # we can get the reading for altitude!\n if ('alt' in report.keys()):\n return str(report['alt'])\n\n def getSpeed(self):\n # Loop through the reports from the GPSD client\n for report in self.session:\n # If we have speed as a key in the report\n # we can get the reading for speed!\n if ('speed' in report.keys()):\n return str(report['speed'])\n\n def getMetaData(self):\n return super(Gps, self).getMetaData()"},"size":{"kind":"number","value":3031,"string":"3,031"}}},{"rowIdx":126486,"cells":{"max_stars_repo_path":{"kind":"string","value":"dfvfs/file_io/modi_file_io.py"},"max_stars_repo_name":{"kind":"string","value":"dfjxs/dfvfs"},"max_stars_count":{"kind":"number","value":176,"string":"176"},"id":{"kind":"string","value":"2169969"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"The Mac OS disk image file-like object.\"\"\"\n\nimport pymodi\n\nfrom dfvfs.file_io import file_object_io\nfrom dfvfs.lib import errors\nfrom dfvfs.resolver import resolver\n\n\nclass MODIFile(file_object_io.FileObjectIO):\n \"\"\"File input/output (IO) object using pymodi.\"\"\"\n\n def __init__(self, resolver_context, path_spec):\n \"\"\"Initializes a file input/output (IO) object.\n\n Args:\n resolver_context (Context): resolver context.\n path_spec (PathSpec): a path specification.\n \"\"\"\n super(MODIFile, self).__init__(resolver_context, path_spec)\n self._sub_file_objects = []\n\n def _Close(self):\n \"\"\"Closes the file-like object.\"\"\"\n super(MODIFile, self)._Close()\n self._sub_file_objects = []\n\n def _OpenFileObject(self, path_spec):\n \"\"\"Opens the file-like object defined by path specification.\n\n Args:\n path_spec (PathSpec): path specification.\n\n Returns:\n pymodi.handle: a file-like object.\n\n Raises:\n PathSpecError: if the path specification is incorrect.\n \"\"\"\n if not path_spec.HasParent():\n raise errors.PathSpecError(\n 'Unsupported path specification without parent.')\n\n file_object = resolver.Resolver.OpenFileObject(\n path_spec.parent, resolver_context=self._resolver_context)\n\n modi_file = pymodi.handle()\n modi_file.open_file_object(file_object)\n\n self._sub_file_objects.append(file_object)\n self._sub_file_objects.reverse()\n\n return modi_file\n\n def get_size(self):\n \"\"\"Retrieves the size of the file-like object.\n\n Returns:\n int: size of the file-like object data.\n\n Raises:\n IOError: if the file-like object has not been opened.\n OSError: if the file-like object has not been opened.\n \"\"\"\n if not self._is_open:\n raise IOError('Not opened.')\n\n return self._file_object.get_media_size()\n"},"size":{"kind":"number","value":1856,"string":"1,856"}}},{"rowIdx":126487,"cells":{"max_stars_repo_path":{"kind":"string","value":"bot/commands/editcommandmods.py"},"max_stars_repo_name":{"kind":"string","value":"NMisko/monkalot"},"max_stars_count":{"kind":"number","value":20,"string":"20"},"id":{"kind":"string","value":"2168959"},"content":{"kind":"string","value":"\"\"\"Commands: \"!addmod\", \"!delmod\".\"\"\"\n\nfrom bot.commands.abstract.command import Command\nfrom bot.utilities.permission import Permission\nfrom bot.utilities.tools import replace_vars\n\n\nclass EditCommandMods(Command):\n \"\"\"Command for owners to add or delete mods to list of trusted mods.\"\"\"\n\n perm = Permission.Admin\n\n def __init__(self, _):\n \"\"\"Initialize variables.\"\"\"\n self.responses = {}\n\n def match(self, bot, user, msg, tag_info):\n \"\"\"Match if !addmod or !delmod.\"\"\"\n return (msg.startswith(\"!addmod \") or msg.startswith(\"!delmod \")) and len(\n msg.split(\" \")\n ) == 2\n\n def run(self, bot, user, msg, tag_info):\n \"\"\"Add or delete a mod.\"\"\"\n self.responses = bot.config.responses[\"EditCommandMods\"]\n mod = msg.split(\" \")[1].lower()\n if msg.startswith(\"!addmod \"):\n if mod not in bot.config.trusted_mods:\n bot.config.trusted_mods.append(mod)\n bot.write(self.responses[\"mod_added\"][\"msg\"])\n else:\n var = {\"\": mod}\n bot.write(replace_vars(self.responses[\"already_mod\"][\"msg\"], var))\n elif msg.startswith(\"!delmod \"):\n if mod in bot.config.trusted_mods:\n bot.config.trusted_mods.remove(mod)\n bot.write(self.responses[\"mod_deleted\"][\"msg\"])\n else:\n var = {\"\": mod}\n bot.write(replace_vars(self.responses[\"user_not_in_list\"][\"msg\"], var))\n\n bot.config.write_trusted_mods()\n"},"size":{"kind":"number","value":1547,"string":"1,547"}}},{"rowIdx":126488,"cells":{"max_stars_repo_path":{"kind":"string","value":"geocamUtil/gpx.py"},"max_stars_repo_name":{"kind":"string","value":"finleyexp/georef_geocamutilweb"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169984"},"content":{"kind":"string","value":"#__BEGIN_LICENSE__\n# Copyright (c) 2017, United States Government, as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All rights reserved.\n#\n# The GeoRef platform is licensed under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n#__END_LICENSE__\n\nimport datetime\nfrom xml.dom import minidom\n\nimport iso8601\n\nfrom geocamUtil import anyjson as json\n\n\nclass RaiseValueError(object):\n pass\nRAISE_VALUE_ERROR = RaiseValueError()\n\n\ndef getChild(node, name, dflt=RAISE_VALUE_ERROR, ns=None):\n # getElementsByTagName() returns a list of descendant nodes with\n # tags that match the specified name. getChild() returns the first\n # such descendant that is a direct child. (e.g. it gets the \n # that is a direct child rather than the one that is buried inside\n # the