{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \")\nprint('''''')\nprint('''''')\n\nprint(\" \")\n#print(\"\")\n"},"size":{"kind":"number","value":3174,"string":"3,174"}}},{"rowIdx":126980,"cells":{"max_stars_repo_path":{"kind":"string","value":"api/test_songs.py"},"max_stars_repo_name":{"kind":"string","value":"olefrank/ngsongslist"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170839"},"content":{"kind":"string","value":"import unittest, json\nfrom songs import Songs\n\nclass SongsTests(unittest.TestCase):\n\n def test_get_songs(self):\n songs_library = Songs(Songs.TEST_FILE)\n songs = json.loads(songs_library.get_songs())\n #check the count\n self.assertEqual(6, len(songs))\n #check ordering\n self.assertEqual(\"Lycanthropic Metamorphosis\", songs[0]['title'])\n self.assertEqual(\"The Yousicians\", songs[0]['artist'])\n self.assertEqual(\"Wishing In The Night\", songs[1]['title'])\n self.assertEqual(\"You've Got The Power\", songs[2]['title'])\n self.assertEqual(\"Opa Opa Ta Bouzoukia\", songs[3]['title'])\n self.assertEqual(5, songs[3]['rating'])\n self.assertEqual(\"Awaki-Waki\", songs[4]['title'])\n self.assertEqual(4.8, songs[4]['rating'])\n self.assertEqual(\"Mr Fastfinger\", songs[4]['artist'])\n self.assertEqual(\"A New Kennel\", songs[5]['title'])\n\n def test_get_songs_paging(self):\n songs_library = Songs(Songs.TEST_FILE)\n songs = json.loads(songs_library.get_songs(skip=1, count=2))\n self.assertEqual(2, len(songs))\n self.assertEqual(\"Wishing In The Night\", songs[0]['title'])\n self.assertEqual(\"You've Got The Power\", songs[1]['title'])\n\n songs = json.loads(songs_library.get_songs(skip=5, count=2))\n self.assertEqual(1, len(songs))\n self.assertEqual(\"A New Kennel\", songs[0]['title'])\n\n songs = json.loads(songs_library.get_songs(skip=6, count=20))\n self.assertEqual(0, len(songs))\n\n songs = json.loads(songs_library.get_songs(skip=0, count=0))\n self.assertEqual(0, len(songs))\n\n def test_get_average_difficulty(self):\n songs_library = Songs(Songs.TEST_FILE)\n avg_difficulty = json.loads(songs_library.get_average_difficulty())['avg_difficulty']\n self.assertEqual(12.93, avg_difficulty)\n\n def test_search(self):\n songs_library = Songs(Songs.TEST_FILE)\n songs = json.loads(songs_library.search_songs('Lycanthropic'))\n self.assertEqual(\"Lycanthropic Metamorphosis\", songs[0]['title'])\n self.assertEqual(1, len(songs))\n results = json.loads(songs_library.search_songs('metamorphosis'))\n self.assertEqual(1, len(songs))\n self.assertEqual(\"Lycanthropic Metamorphosis\", songs[0]['title'])\n\n songs = json.loads(songs_library.search_songs('The YOUsicians'))\n self.assertEqual(5, len(songs))\n self.assertEqual(\"Lycanthropic Metamorphosis\", songs[0]['title'])\n self.assertEqual(\"Wishing In The Night\", songs[1]['title'])\n self.assertEqual(\"You've Got The Power\", songs[2]['title'])\n self.assertEqual(\"Opa Opa Ta Bouzoukia\", songs[3]['title'])\n self.assertEqual(\"A New Kennel\", songs[4]['title'])\n\n songs = json.loads(songs_library.search_songs('xx'))\n self.assertEqual(0, len(songs))\n\n songs = json.loads(songs_library.search_songs(''))\n self.assertEqual(6, len(songs))\n\n\n#execute the tests\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":3048,"string":"3,048"}}},{"rowIdx":126981,"cells":{"max_stars_repo_path":{"kind":"string","value":"PyHive/VcfIntegration/SNPTools_poprob.py"},"max_stars_repo_name":{"kind":"string","value":"elowy01/igsr_analysis"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2170326"},"content":{"kind":"string","value":"import eHive\nimport os\nfrom VCFIntegration.SNPTools import SNPTools\n\nclass SNPTools_poprob(eHive.BaseRunnable):\n \"\"\"Run SNPTools poprob on a VCF containing biallelic SNPs\"\"\"\n\n def run(self):\n vcf_g = SNPTools(vcf=self.param_required('vcf_file'),\n snptools_folder=self.param_required('snptools_folder'))\n\n outprefix = os.path.split(self.param_required('outprefix'))[1]\n\n if self.param_is_defined('work_dir'):\n if not os.path.isdir(self.param('work_dir')):\n os.makedirs(self.param('work_dir'))\n\n prob_f = \"\"\n if self.param_is_defined('verbose'):\n prob_f = vcf_g.run_poprob(outprefix=outprefix,\n rawlist=self.param_required('rawlist'),\n outdir=self.param_required('work_dir'),\n verbose=True)\n else:\n prob_f = vcf_g.run_poprob(outprefix=outprefix,\n rawlist=self.param_required('rawlist'),\n outdir=self.param_required('work_dir'),\n verbose=False)\n\n self.param('prob_f', prob_f)\n\n def write_output(self):\n self.warning('Work is done!')\n\n self.dataflow({'prob_f': self.param('prob_f')}, 1)\n"},"size":{"kind":"number","value":1349,"string":"1,349"}}},{"rowIdx":126982,"cells":{"max_stars_repo_path":{"kind":"string","value":"conversion_tools/speclib_to_mgf.py"},"max_stars_repo_name":{"kind":"string","value":"xiaoping-yang/ms2pip_c"},"max_stars_count":{"kind":"number","value":14,"string":"14"},"id":{"kind":"string","value":"2170879"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\"\"\"\nConvert MSP and SPTXT spectral library files.\n\nWrites three files: mgf with the spectra; PEPREC with the peptide sequences;\nmeta with additional metainformation.\n\nArguments:\n arg1 path to spectral library file\n arg2 prefix for spec_id\n\n\"\"\"\n\nimport re\nimport sys\nimport logging\n\n\nAMINO_MASSES = {\n \"A\": 71.037114,\n \"C\": 103.009185,\n \"D\": 115.026943,\n \"E\": 129.042593,\n \"F\": 147.068414,\n \"G\": 57.021464,\n \"H\": 137.058912,\n \"I\": 113.084064,\n \"K\": 128.094963,\n \"L\": 113.084064,\n \"M\": 131.040485,\n \"N\": 114.042927,\n \"P\": 97.052764,\n \"Q\": 128.058578,\n \"R\": 156.101111,\n \"S\": 87.032028,\n \"T\": 101.047679,\n \"V\": 99.068414,\n \"W\": 186.079313,\n \"Y\": 163.063329,\n}\nPROTON_MASS = 1.007825035\nWATER_MASS = 18.010601\n\n\ndef setup_logging():\n \"\"\"Initiate logging.\"\"\"\n root_logger = logging.getLogger()\n handler = logging.StreamHandler()\n handler.setFormatter(\n logging.Formatter(\"%(asctime)s %(levelname)s %(module)s %(message)s\")\n )\n root_logger.addHandler(handler)\n root_logger.setLevel(logging.INFO)\n\n\ndef parse_peprec_mods(mods, ptm_list):\n \"\"\"Parse PEPREC modification string out of MSP Mod string.\"\"\"\n if mods.split(\"/\")[0] != \"0\":\n num_mods = mods[0]\n mod_list = [mod.split(\",\") for mod in mods.split(\"/\")[1:]]\n\n peprec_mods = []\n for location, aa, name in mod_list:\n if not (location == \"0\" and name == \"iTRAQ\"):\n location = str(int(location) + 1)\n peprec_mods.append(location)\n peprec_mods.append(name)\n\n if name not in ptm_list:\n ptm_list[name] = 1\n else:\n ptm_list[name] += 1\n\n peprec_mods = \"|\".join(peprec_mods)\n\n else:\n peprec_mods = \"-\"\n\n return peprec_mods\n\n\ndef validate(spec_id, peptide, charge, mods, reported_mw):\n \"\"\"Validate amino acids and reported peptide mass.\"\"\"\n invalid_aas = [\"B\", \"J\", \"O\", \"U\", \"X\", \"Z\"]\n if any(aa in invalid_aas for aa in peptide):\n logging.warning(\"Peptide with non-canonical amino acid found: %s\", peptide)\n\n elif (\n mods.split(\"/\")[0] == \"0\"\n ): # Cannot validate mass of peptide with unknown modification\n calculated = WATER_MASS + sum([AMINO_MASSES[x] for x in peptide])\n reported = float(reported_mw) * float(charge) - float(charge) * PROTON_MASS\n if abs(calculated - reported) > 0.5:\n logging.warning(\n \"Reported MW does not match calculated mass for spectrum %s\", spec_id\n )\n\n\ndef parse_speclib(speclib_filename, title_prefix, speclib_format=\"msp\"):\n \"\"\"Parse MSP file.\"\"\"\n filename = \".\".join(speclib_filename.split(\".\")[:-1])\n fpip = open(filename + \".peprec\", \"w\")\n fpip.write(\"spec_id modifications peptide charge\\n\")\n fmgf = open(filename + \".mgf\", \"w\")\n fmeta = open(filename + \".meta\", \"w\")\n\n with open(speclib_filename) as f:\n mod_dict = {}\n spec_id = 1\n peak_sep = None\n peptide = None\n charge = None\n parentmz = None\n mods = None\n purity = None\n HCDenergy = None\n read_spec = False\n mgf = \"\"\n\n for row in f:\n if read_spec:\n # Infer peak int/mz separator\n if not peak_sep:\n if \"\\t\" in row:\n peak_sep = \"\\t\"\n elif \" \" in row:\n peak_sep = \" \"\n else:\n raise ValueError(\"Invalid peak separator\")\n\n line = row.rstrip().split(peak_sep)\n\n # Read all peaks, so save to output files and set read_spec to False\n if row[0].isdigit():\n # Continue reading spectrum\n mgf += \" \".join([line[0], line[1]]) + \"\\n\"\n continue\n\n # Last peak reached, finish up spectrum\n else:\n validate(spec_id, peptide, charge, mods, parentmz)\n peprec_mods = parse_peprec_mods(mods, mod_dict)\n fpip.write(\n \"{}{} {} {} {}\\n\".format(\n title_prefix, spec_id, peprec_mods, peptide, charge\n )\n )\n fmeta.write(\n \"{}{} {} {} {} {} {}\\n\".format(\n title_prefix,\n spec_id,\n charge,\n peptide,\n parentmz,\n purity,\n HCDenergy,\n )\n )\n\n buf = \"BEGIN IONS\\n\"\n buf += \"TITLE=\" + title_prefix + str(spec_id) + \"\\n\"\n buf += \"CHARGE=\" + str(charge) + \"\\n\"\n buf += \"PEPMASS=\" + parentmz + \"\\n\"\n fmgf.write(\"{}{}END IONS\\n\\n\".format(buf, mgf))\n\n spec_id += 1\n read_spec = False\n mgf = \"\"\n\n if row.startswith(\"Name:\"):\n line = row.rstrip().split(\" \")\n tmp = line[1].split(\"/\")\n peptide = tmp[0].replace(\"(O)\", \"\")\n if speclib_format == \"sptxt\":\n peptide = re.sub(r\"\\[\\d*\\]|[a-z]\", \"\", peptide)\n charge = tmp[1].split(\"_\")[0]\n continue\n\n elif row.startswith(\"Comment:\"):\n line = row.rstrip().split(\" \")\n for i in range(1, len(line)):\n if line[i].startswith(\"Mods=\"):\n tmp = line[i].split(\"=\")\n mods = tmp[1]\n if line[i].startswith(\"Parent=\"):\n tmp = line[i].split(\"=\")\n parentmz = tmp[1]\n if line[i].startswith(\"Purity=\"):\n tmp = line[i].split(\"=\")\n purity = tmp[1]\n if line[i].startswith(\"HCD=\"):\n tmp = line[i].split(\"=\")\n HCDenergy = tmp[1].replace(\"eV\", \"\")\n continue\n\n elif row.startswith(\"Num peaks:\") or row.startswith(\"NumPeaks:\"):\n read_spec = True\n continue\n\n fmgf.close()\n fpip.close()\n fmeta.close()\n\n return spec_id, mod_dict\n\n\ndef main():\n \"\"\"Run CLI.\"\"\"\n # Get arguments\n speclib_filename = sys.argv[1]\n title_prefix = sys.argv[2]\n\n speclib_ext = speclib_filename.split(\".\")[-1]\n if speclib_ext.lower() == \"sptxt\":\n speclib_format = \"sptxt\"\n elif speclib_ext.lower() == \"msp\":\n speclib_format = \"msp\"\n else:\n raise ValueError(\"Unknown spectral library format: `%s`\" % speclib_ext)\n\n logging.info(\"Converting %s to MGF, PEPREC and meta file\", speclib_filename)\n\n num_peptides, mod_dict = parse_speclib(\n speclib_filename, title_prefix, speclib_format=speclib_format\n )\n\n logging.info(\n \"Finished!\\nSpectral library contains %i peptides and the following modifications: %s\",\n num_peptides,\n mod_dict,\n )\n\n\nif __name__ == \"__main__\":\n setup_logging()\n main()\n"},"size":{"kind":"number","value":7284,"string":"7,284"}}},{"rowIdx":126983,"cells":{"max_stars_repo_path":{"kind":"string","value":"AVAPy/data_wizard/utils/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"antvis/AVAPy"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2170254"},"content":{"kind":"string","value":"# pylint: disable=C0103\n# flake8: noqa\n\"\"\"\nUtil functions for data_wizard\n\"\"\"\n\nfrom AVAPy.data_wizard.utils.json import *\nfrom AVAPy.data_wizard.utils.typeinfer import *\n"},"size":{"kind":"number","value":170,"string":"170"}}},{"rowIdx":126984,"cells":{"max_stars_repo_path":{"kind":"string","value":"source/emp_evaluation_system/migrations/0022_algorithm.py"},"max_stars_repo_name":{"kind":"string","value":"LukasLandwich/energy_management_panel"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169212"},"content":{"kind":"string","value":"# Generated by Django 3.1.3 on 2021-02-03 07:16\n\nimport datetime\nfrom django.db import migrations, models\nimport django.utils.timezone\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('emp_evaluation_system', '0021_auto_20210202_1539'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Algorithm',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(help_text='The name of the algorithm. Shown in admin page overview and at the frontend algorithm coparison page.', max_length=128)),\n ('backend_identifier', models.SlugField(help_text='The identifier is used to call the simulation API. Therefore, it has to be the exact same as the algorithm identifier at the backend!', max_length=64)),\n ('start_time', models.DateTimeField(default=datetime.datetime(2021, 2, 4, 7, 16, 30, 112370, tzinfo=utc), help_text='A starting time for the algorithm simulation.')),\n ('has_end_time', models.BooleanField(help_text='If the algorithm simulation has no specified end time, the simulation will use the acutual time when running as end time.')),\n ('end_time', models.DateTimeField(default=django.utils.timezone.now, help_text=\"A end time for the algorithm simulation. Only used when 'has end time' is checked.\")),\n ('description', models.TextField(blank=True, default=None, help_text='Give a description for other users. Will only be shown in admin context.', null=True)),\n ],\n ),\n ]\n"},"size":{"kind":"number","value":1679,"string":"1,679"}}},{"rowIdx":126985,"cells":{"max_stars_repo_path":{"kind":"string","value":"modules/controls.py"},"max_stars_repo_name":{"kind":"string","value":"mattmaniak/Termyy"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170177"},"content":{"kind":"string","value":"import sys\nimport termios\nimport tty\n\nimport modules.menu as menu\nimport modules.render as render\n\n\nclass Chars:\n w = ('w', 119)\n s = ('s', 115)\n a = ('a', 97)\n d = ('d', 100)\n capital_p = ('P', 80)\n enter = 13\n\n\ndef menu_event(pressed_key):\n if pressed_key in Chars.w: # Y axis is inverted in comparison to math.\n if menu.selected_button <= 1:\n menu.selected_button = 1\n else:\n menu.selected_button -= 1\n\n elif pressed_key in Chars.s:\n if menu.selected_button >= 2:\n menu.selected_button = 2\n else:\n menu.selected_button += 1\n\n elif menu.selected_button == 1 and ord(pressed_key) == Chars.enter:\n render.flushFrame() # New game button.\n termios.tcflush(sys.stdin, termios.TCIOFLUSH) # Flush input buffer.\n return 1\n\n elif menu.selected_button == 2 and ord(pressed_key) == Chars.enter:\n render.flushFrame() # Exit button.\n exit(0)\n\n\ndef game_event(pressed_key):\n if pressed_key in Chars.w:\n if render.Player.y <= 0:\n render.Player.y = 0\n else:\n render.Player.y -= 1\n\n elif pressed_key in Chars.s:\n if render.Player.y >= render.Map.height - render.Player.height:\n render.Player.y = render.Map.height - render.Player.height\n else:\n render.Player.y += 1\n\n elif pressed_key in Chars.a:\n if render.Player.x <= 0:\n render.Player.x = 0\n else:\n render.Player.x -= 1\n\n elif pressed_key in Chars.d:\n if render.Player.x >= render.Map.width - render.Player.width:\n render.Player.x = render.Map.width - render.Player.width\n else:\n render.Player.x += 1\n\n elif pressed_key in Chars.capital_p: # Exit key.\n render.flushFrame()\n return True\n\n\ndef key_event(type): # https://code.activestate.com/recipes/134892/\n file_descriptor = sys.stdin.fileno()\n old_settings = termios.tcgetattr(file_descriptor)\n try:\n tty.setraw(sys.stdin.fileno())\n pressed_key = sys.stdin.read(1)\n\n finally:\n termios.tcsetattr(file_descriptor, termios.TCSADRAIN, old_settings)\n\n if type == \"menu\":\n return menu_event(pressed_key)\n elif type == \"game\":\n return game_event(pressed_key)\n"},"size":{"kind":"number","value":2310,"string":"2,310"}}},{"rowIdx":126986,"cells":{"max_stars_repo_path":{"kind":"string","value":"docs/tutorial/pytorch/alexnet_fashion_mnist/fashion_mnist.py"},"max_stars_repo_name":{"kind":"string","value":"intel/neural-compressor"},"max_stars_count":{"kind":"number","value":172,"string":"172"},"id":{"kind":"string","value":"2170199"},"content":{"kind":"string","value":"import torch\nfrom torchvision import datasets, transforms\n\ndef download_dataset():\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5), (0.5))\n ])\n train_dataset = datasets.FashionMNIST('./data', train=True, download=True,\n transform=transform)\n test_dataset = datasets.FashionMNIST('./data', train=False,\n transform=transform)\n return train_dataset, test_dataset\n\ndef data_loader(batch_size=200): \n train_dataset, test_dataset = download_dataset()\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = batch_size, shuffle = True)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = batch_size, shuffle = True)\n return train_loader, test_loader\n\ndef main():\n train_loader, test_loader = data_loader(batch_size=100)\n print(train_loader.batch_size* len(train_loader))\n print(test_loader.batch_size* len(test_loader))\n \n \nif __name__ == \"__main__\":\n main()\n"},"size":{"kind":"number","value":1036,"string":"1,036"}}},{"rowIdx":126987,"cells":{"max_stars_repo_path":{"kind":"string","value":"grokking-the-coding-interview/bfs/Reverse-Level-Order-Traversal-(easy).py"},"max_stars_repo_name":{"kind":"string","value":"huandrew99/LeetCode"},"max_stars_count":{"kind":"number","value":36,"string":"36"},"id":{"kind":"string","value":"2170307"},"content":{"kind":"string","value":"\"\"\"\nLC 107\nGiven a binary tree, populate an array to represent its level-by-level traversal in reverse order, i.e., the lowest level comes first. You should populate the values of all nodes in each level from left to right in separate sub-arrays.\n\"\"\"\nfrom collections import deque\n\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\n\ndef traverse(root):\n if not root:\n return []\n\n res = []\n levels = [[root]]\n # traverse all\n while levels[-1]:\n levels.append([])\n for node in levels[-2]: # now it's -2\n if node.left:\n levels[-1].append(node.left)\n if node.right:\n levels[-1].append(node.right)\n # backward \n levels.pop()\n while levels:\n nodes = levels.pop()\n res.append([node.val for node in nodes])\n\n return res\n \n\ndef main():\n # [[9, 10, 5], [7, 1], [12]]\n root = TreeNode(12)\n root.left = TreeNode(7)\n root.right = TreeNode(1)\n root.left.left = TreeNode(9)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(5)\n print(\"Reverse level order traversal: \" + str(traverse(root)))\n\n\nmain()\n\n\n\"\"\"\nTime O(N)\nSpace O(N)\n\"\"\"\n\n"},"size":{"kind":"number","value":1141,"string":"1,141"}}},{"rowIdx":126988,"cells":{"max_stars_repo_path":{"kind":"string","value":"astronex/directions.py"},"max_stars_repo_name":{"kind":"string","value":"jaratma/astro-nex"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170026"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport math\nfrom datetime import datetime, timedelta, date, time\nfrom pytz import timezone\nimport pysw\nfrom utils import parsestrtime\n\ndef solar_rev(boss):\n date, time = parsestrtime(boss.state.curr_chart.date)\n d,m,y = [int(i) for i in date.split(\"/\")]\n nowyear = boss.state.date.dt.year\n julday = pysw.julday(nowyear,m,d,0.0)\n sun = boss.state.curr_chart.planets[0]\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n while sunnow > sun:\n julday -= 0.1\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n while sunnow < sun:\n julday += 0.01\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n julday -= 0.01\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n while sunnow < sun:\n julday += 0.001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n julday -= 0.001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n while sunnow < sun:\n julday += 0.0001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n julday -= 0.0001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n while sunnow < sun:\n julday += 0.00001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n julday -= 0.00001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n while sunnow < sun:\n julday += 0.000001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n julday -= 0.000001\n s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag)\n\n sol = pysw.revjul(julday)\n zone = boss.state.curr_chart.zone\n dt = boss.state.date.getnewdt(sol)\n boss.da.panel.set_date_only(dt)\n\ndef sec_prog(boss):\n chart = boss.state.curr_chart\n if not chart.date:\n chart = boss.state.now\n\n date = strdate_to_date(chart.date)\n nowyear = boss.state.date.dt.year\n birthyear = date.year\n yearsfrombirth = nowyear - birthyear\n progdate = date + timedelta(yearsfrombirth)\n\n if not boss.da.sec_alltimes:\n dt = combine_date(progdate)\n boss.state.calcdt.setdt(dt)\n boss.state.setprogchart(chart)\n birthday = synthbirthday(date,nowyear)\n boss.da.panel.set_date_only(birthday)\n else:\n nowdate = boss.state.date.dt\n prev_birthday = synthbirthday(date,nowyear)\n next_birthday = synthbirthday(date,nowyear+1)\n delta = nowdate - prev_birthday\n if delta.days < 0:\n next_birthday = prev_birthday\n prev_birthday = synthbirthday(date,nowyear-1)\n delta = nowdate - prev_birthday\n yearsfrombirth -= 1\n yeardelta = next_birthday - prev_birthday\n wholedelta = delta.days*86400+delta.seconds\n wholeyeardelta = yeardelta.days*86400+yeardelta.seconds\n frac = wholedelta/float(wholeyeardelta)\n oneday_ahead = date + timedelta(yearsfrombirth+1)\n daydelta = (oneday_ahead - progdate)\n daydelta = timedelta(daydelta.days*frac,daydelta.seconds*frac)\n inbetween_progdate = progdate + daydelta\n dt = combine_date(inbetween_progdate)\n boss.state.calcdt.setdt(dt)\n boss.state.setprogchart(chart)\n\n#curr.setloc(city,code)\n#curr.calcdt.setdt(datetime.datetime.combine(self.date,self.time))\n#curr.setchart()\n\ndef strdate_to_date(strdate):\n date,_,time = strdate.partition('T')\n try:\n y,mo,d = [ int(x) for x in date.split('-')]\n except ValueError:\n print date\n zone, time = time[8:], time[:5]\n try:\n zone.index(':')\n delta, zone = zone[:6], zone[6:]\n d1, d2 = delta[1:3], delta[4:6]\n tot = int(d1)+int(d2)/60.0\n except ValueError:\n delta, zone = zone[:5], zone[5:]\n d1, d2 = delta[1:3], delta[3:5]\n tot = int(d1)+int(d2)\n sign = {'+': 1, '-': -1}[delta[0]]\n delta = tot*sign\n h,m = [int(x) for x in time.split(':')]\n #h = (h + m/60.0) - delta\n #m = int((h - int(h))*60)\n return datetime(y,mo,d,int(h),m,0,tzinfo=timezone('UTC'))\n\ndef combine_date(dt):\n newdate = date(dt.year,dt.month,dt.day)\n newtime = time(dt.hour,dt.minute,dt.second)\n return datetime.combine(newdate,newtime)\n\ndef synthbirthday(date,nowyear):\n h = date.hour\n m = date.minute\n s = date.second\n y = nowyear\n mo = date.month\n d = date.day\n return datetime(y,mo,d,h,m,s,tzinfo=timezone('UTC'))\n"},"size":{"kind":"number","value":4347,"string":"4,347"}}},{"rowIdx":126989,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/when_ml_pipeline_meets_hydra/api/deployment.py"},"max_stars_repo_name":{"kind":"string","value":"omry/When-ML-pipeline-meets-Hydra"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170228"},"content":{"kind":"string","value":"import json\n\n\ndef foo(cluster_info):\n print(\"========== Run deployment's 'foo' subcommand ==========\")\n print(f\"cluster_info:\\n{json.dumps(dict(cluster_info), indent=2)}\")\n print(\"Do something here!\")\n\n\ndef bar(cluster_info):\n print(\"========== Run deployment's 'bar' subcommand ==========\")\n print(f\"cluster_info:\\n{json.dumps(dict(cluster_info), indent=2)}\")\n print(\"Do something here!\")\n"},"size":{"kind":"number","value":408,"string":"408"}}},{"rowIdx":126990,"cells":{"max_stars_repo_path":{"kind":"string","value":"bot/tests/conftest.py"},"max_stars_repo_name":{"kind":"string","value":"sh4rpy/volodya"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170219"},"content":{"kind":"string","value":"import os\n\nimport pytest\nfrom django.conf import settings\nfrom dotenv import load_dotenv\n\nfrom users.models import TelegramUser\n\n\nload_dotenv()\n\n\n@pytest.fixture(scope='session')\ndef django_db_setup():\n settings.DATABASES['default'] = {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.getenv('DB_NAME', 'postgres'),\n 'USER': os.getenv('DB_USER', 'postgres'),\n 'HOST': os.getenv('DB_HOST', 'db'),\n 'PORT': os.getenv('DB_PORT', 5432),\n 'PASSWORD': os.getenv('DB_PASSWORD', ''),\n }\n\n\n@pytest.fixture\ndef get_telegram_admin_user_id():\n return TelegramUser.objects.filter(is_admin=True).first().telegram_id\n\n\n@pytest.fixture\ndef get_telegram_user_id():\n return TelegramUser.objects.filter(is_admin=False).first().telegram_id\n"},"size":{"kind":"number","value":792,"string":"792"}}},{"rowIdx":126991,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/views.py"},"max_stars_repo_name":{"kind":"string","value":"patrickbeeson/has-it-ever-been"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170789"},"content":{"kind":"string","value":"import os\nimport requests\nfrom geopy.geocoders import Nominatim\n\nfrom flask import Flask, render_template, flash\n\nfrom . import app\nfrom .forms import LocationForm\n\napp.config.from_object(os.environ['APP_SETTINGS'])\n\nWUNDERGROUND_BASE_URL = app.config['WUNDERGROUND_BASE_URL']\nWUNDERGROUND_API_KEY = app.config['WUNDERGROUND_API_KEY']\n\n# base urls\nCONDITIONS_BASE_URL = '{}{}/conditions/q/'.format(\n WUNDERGROUND_BASE_URL,\n WUNDERGROUND_API_KEY\n)\nALMANAC_BASE_URL = '{}{}/almanac/q/'.format(\n WUNDERGROUND_BASE_URL,\n WUNDERGROUND_API_KEY\n)\n\n\ndef geocode_location(location):\n \"Get lat and lon coordinates for a zip code\"\n try:\n geolocator = Nominatim()\n location = geolocator.geocode(location)\n except Exception as e:\n print('There was a problem geocoding this address: {}'.format(e))\n\n return location\n\n\ndef get_current_temp(lat, lon):\n \"Get the current temp for a given location\"\n r = requests.get('{base}{lat},{lon}.json'.format(\n base=CONDITIONS_BASE_URL,\n lat=lat,\n lon=lon)\n )\n json_string = r.json()\n current_temp = json_string['current_observation']['temp_f']\n\n return int(current_temp)\n\n\ndef get_almanac_data(lat, lon):\n \"Get the almanac data for a given location\"\n r = requests.get('{base}{lat},{lon}.json'.format(\n base=ALMANAC_BASE_URL,\n lat=lat,\n lon=lon)\n )\n json_string = r.json()\n almanac_data = {}\n almanac_data['record_high'] = json_string['almanac']['temp_high']['record']['F']\n almanac_data['record_low'] = json_string['almanac']['temp_low']['record']['F']\n almanac_data['record_high_year'] = json_string['almanac']['temp_high']['recordyear']\n almanac_data['record_low_year'] = json_string['almanac']['temp_low']['recordyear']\n\n return almanac_data\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n \"\"\"\n Homepage view\n \"\"\"\n form = LocationForm()\n if form.validate_on_submit():\n temp_choice = form.temp_choice.data\n location = geocode_location(form.location.data)\n lat = location.latitude\n lon = location.longitude\n print(lat, lon)\n current_temp = get_current_temp(lat, lon)\n almanac_data = get_almanac_data(lat, lon)\n record_high = int(almanac_data['record_high'])\n record_low = int(almanac_data['record_low'])\n record_high_year = int(almanac_data['record_high_year'])\n record_low_year = int(almanac_data['record_low_year'])\n temp_diff_high_above = current_temp - record_high\n temp_diff_high_below = record_high - current_temp\n temp_diff_low_above = current_temp - record_low\n temp_diff_low_below = record_low - current_temp\n\n if temp_choice == 'hot':\n if current_temp >= record_high:\n flash(\n \"\"\"It's never been this hot!\n Currently, it's {} degrees, which is {} degrees above the\n record of {}, set in {}.\"\"\".format(\n current_temp,\n temp_diff_high_above,\n record_high,\n record_high_year)\n )\n else:\n flash(\n \"\"\"It's been this hot before.\n Currently, it's {} degrees, which is {} degrees below the\n record of {}, set in {}.\"\"\".format(\n current_temp,\n temp_diff_high_below,\n record_high,\n record_high_year)\n )\n else:\n if current_temp <= record_low:\n flash(\n \"\"\"It's never been this cold before.\n Currently, it's {} degrees, which is {} degrees below the\n record of {}, set in {}.\"\"\".format(\n current_temp,\n temp_diff_low_below,\n record_low,\n record_low_year)\n )\n else:\n flash(\n \"\"\"It's been this cold before.\n Currently, it's {} degrees, which is {} degrees above the\n record of {}, set in {}.\"\"\".format(\n current_temp,\n temp_diff_low_above,\n record_low,\n record_low_year)\n )\n return render_template(\n 'index.html',\n form=form,\n current_temp=current_temp,\n record_high=record_high,\n record_low=record_low\n )\n return render_template('index.html', form=form)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('404.html'), 404\n"},"size":{"kind":"number","value":4730,"string":"4,730"}}},{"rowIdx":126992,"cells":{"max_stars_repo_path":{"kind":"string","value":"chart/chart/python/docs/extract_tutorial_cmds.py"},"max_stars_repo_name":{"kind":"string","value":"JoeyBF/sseq"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"id":{"kind":"string","value":"2170584"},"content":{"kind":"string","value":"import sys\nimport pathlib\nimport re\nr = re.compile(\"\\n\\n>>> (.|\\n)*?\\n\\n\", flags=re.MULTILINE)\ntext = pathlib.Path(sys.argv[1]).read_text()\ngroups = [m.group(0)[2:-2].split(\"\\n\") for m in r.finditer(text)]\n\n\ndef join_continue_lines(lines):\n lines = [line for line in lines if line[:4] in (\">>> \", \"... \")]\n result = []\n cur_line = \"\"\n for line in lines:\n if line.startswith(\">>> \") and cur_line:\n result.append(cur_line[1:])\n cur_line = \"\"\n cur_line += f\"\\n{line[4:]}\"\n result.append(cur_line[1:])\n return result\n\nresult = [join_continue_lines(group) for group in groups]\n\nimport json\nprint(json.dumps(result))"},"size":{"kind":"number","value":664,"string":"664"}}},{"rowIdx":126993,"cells":{"max_stars_repo_path":{"kind":"string","value":"galaxyZooNet/utils.py"},"max_stars_repo_name":{"kind":"string","value":"hungjinh/galaxyZooNet"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2170945"},"content":{"kind":"string","value":"import yaml\nfrom easydict import EasyDict\n\n\ndef get_config_from_yaml(file_yaml):\n '''Get the config from a yaml file\n Args:\n file_yaml: path to the config yaml file\n Return:\n config (EasyDict)\n '''\n\n with open(file_yaml, 'r') as file_config:\n try:\n config = EasyDict(yaml.safe_load(file_config))\n return config\n except ValueError:\n print(\"INVALID yaml file format.\")\n exit(-1)\n\n\n\nif __name__=='__main__':\n config = get_config_from_yaml('../configs/resnet50_test.yaml')"},"size":{"kind":"number","value":574,"string":"574"}}},{"rowIdx":126994,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_randommock.py"},"max_stars_repo_name":{"kind":"string","value":"GodspeedYouBlackEmperor/pyalcs"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"id":{"kind":"string","value":"2170960"},"content":{"kind":"string","value":"from tests.randommock import RandomMock, SampleMock\n\n\nclass TestRandomTest:\n\n def test_randommock_returns_values_in_a_given_sequence(self):\n f = RandomMock([0.1, 0.2, 0.3])\n assert 0.1 == f()\n assert 0.2 == f()\n assert 0.3 == f()\n\n def test_samplemock_returns_list_elements_in_a_given_sequence_1(self):\n sample_func = SampleMock([2, 0, 1])\n assert [15, 3, 14] == sample_func([3, 14, 15], 3)\n\n def test_samplemock_returns_list_elements_in_a_given_sequence_2(self):\n sample_func = SampleMock([2, 0, 1])\n assert [15, 3, 14] == sample_func([3, 14, 15, 92, 6], 3)\n\n def test_samplemock_returns_list_elements_in_a_given_sequence_3(self):\n sample_func = SampleMock([1, 15, 2, 15])\n assert [14, 3, 15] == sample_func([3, 14, 15], 3)\n\n def test_testsample4(self):\n sample_func = SampleMock([10, 2, 1, 15])\n assert [3, 15, 14] == sample_func([3, 14, 15, 92, 6], 3)\n"},"size":{"kind":"number","value":954,"string":"954"}}},{"rowIdx":126995,"cells":{"max_stars_repo_path":{"kind":"string","value":"preprocess.py"},"max_stars_repo_name":{"kind":"string","value":"swetha-sundar/sentiment-analysis"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169815"},"content":{"kind":"string","value":"import pandas as pd\nimport numpy as np\n\ntrain_file = 'data/train.csv'\ntest_file = 'data/test.csv'\n\n#Explore the training dataset\n#Note: For some reason, if you dont specify the encoding as latin-1, the interpreter will throw an UTF-8 encoding error\ndata = pd.read_csv(train_file, error_bad_lines=False, encoding='latin-1')\ndata.columns = ['id', 'sentiment', 'text']\nprint(data.head(2), \"\\n\\n\")\n\n#Id information is not useful. So let's remove it\n#axis=1 indicates columns\ndata = data.drop(labels=['id'], axis=1)\nprint(data.head(10), \"\\n\\n\")\n\n'''\nObservations:\n1. Data has a mix of alphabets, numbers and symbols\n2. Mix of words with uppercase and lowercase letters\n3. We need to normalize the words to their base word. Leaving capitalizaed words in the middle of the tweet can be\n experimented with as they may hold different feature space like name of the person, country, etc.. \n4. No particular order of sentiment and tweets. If data is not randomly distributed then it can introduce bias to a learning model\n5. Need to split and shuffle the data to reduce variance (makes sure the model can generalize better on the data) \n and does not lead to overfitting \n6. Need to get an idea of the distribution of data\n'''\n\n#calculate the number of positive and negative tweets\npositives = data['sentiment'][data.sentiment == 1]\nnegatives = data['sentiment'][data.sentiment == 0]\n\nprint('Number of postive tweets {}' .format(len(positives)))\nprint('Number of negative tweets {}' .format(len(negatives)))\nprint('Total Length of the data is: {}' .format(data.shape[0]))\n\n#Are there any duplicates in the data? Get the unique counts to identify this\nprint(data.groupby('sentiment').describe())\n"},"size":{"kind":"number","value":1688,"string":"1,688"}}},{"rowIdx":126996,"cells":{"max_stars_repo_path":{"kind":"string","value":"talking_heads/hyperparams.py"},"max_stars_repo_name":{"kind":"string","value":"BUVANEASH/Talking_Heads"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"id":{"kind":"string","value":"2170905"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#/usr/bin/python3\n\nimport os\nimport re\nfrom ast import literal_eval\n\nclass hyperparameters():\n\n def __init__(self): \n self.use_dlib = True\n # Dataset\n self.dataset = \"/media/new_hdd1/VoxCeleb-2/Video/dev/mp4\"\n self.data = \"/media/new_hdd1/Face_Morp_2.0/Talking_Heads/data\"\n self.preprocessed = os.path.join(self.data,\"preprocessed\")\n \n # logdir\n self.model = \"AK\"\n self.modeldir = \"/media/new_hdd1/Face_Morp_2.0/Talking_Heads/results/model/\"\n self.logdir = os.path.join(self.modeldir, \"meta\")\n self.fine_logdir = os.path.join(self.modeldir, self.model)\n \n # No of training videos\n self.train_videos = len(os.listdir(self.preprocessed))\n \n # Network Architecture parameters\n # Encoder channels and self-attention channel\n self.enc_down_ch = [64,128,256,512]\n self.enc_self_att_ch = 256\n \n # Decoder channels and self-attention channel\n self.dec_down_ch = [256,128,64,3]\n self.dec_self_att_ch = 256\n \n # Residual Block channel\n self.res_blk_ch = 512\n \n # Embedding Vector\n self.N_Vec = 512\n \n # Considering input and output channel in a residual block, multiple of 2 because beta and gamma affine parameter.\n self.split_lens = [self.res_blk_ch]*11*2 + \\\n [self.res_blk_ch]*2*2 + \\\n [self.res_blk_ch]*2*2 + \\\n [self.dec_down_ch[0]]*2*2 + \\\n [self.dec_down_ch[1]]*2*2 + \\\n [self.dec_down_ch[2]]*2*2 + \\\n [self.dec_down_ch[3]]*2\n \n # Activation outputs from VGGFace and VGG19\n self.vggface_feat_layers = ['conv1_1','conv2_1','conv3_1','conv4_1','conv5_1']\n self.vgg19_feat_layers = ['block1_conv1','block2_conv1','block3_conv1','block4_conv1','block5_conv1']\n \n # Training hyperparameters\n # Image Size\n self.img_size = (256, 256, 3)\n \n # K-shot learning,\n self.K = 8#8\n\n # batch size\n self.batch = 1\n \n # Loss weights\n self.loss_vgg19_wt = 1e-2\n self.loss_vggface_wt = 2e-3\n self.loss_fm_wt = 1e1\n self.loss_mch_wt = 8e1\n self.learning_rate_EG = 5e-5\n self.learning_rate_D = 2e-4\n self.num_iterations = 10000000\n \n # Logging\n self.log_step = 10\n self.save_step = 1000\n self.summary_step = 100\n \n # hyperparams json and resourceconfig json\n self.hp_json = \"/opt/ml/input/config/hyperparameters.json\"\n self.resource_json = \"/opt/ml/input/config/resourceConfig.json\"\n \n def update(self,newdata):\n for key,value in newdata.items():\n setattr(self,key,value)\n \nHyperparams = hyperparameters()\n\ndef hp_json(hp_json):\n '''Overrides hyperparams from hyperparameters.json'''\n print(\"READING \",Hyperparams.hp_json)\n with open(hp_json) as f:\n text = f.read()\n str_dict = re.sub(r\"\\\"(-?\\d+(?:[\\.,]\\d+)?)\\\"\", r'\\1', text)\n str_dict = str_dict.replace(\"\\\"True\\\"\",\"True\").replace(\"\\\"False\\\"\",\"False\")\n return literal_eval(str_dict)\n\ndef resource_json(resource_json):\n '''Overrides hyperparams from resourceConfig.json'''\n print(\"READING \",Hyperparams.resource_json)\n with open(resource_json) as f:\n text = f.read()\n str_dict = re.sub(r\"\\\"(-?\\d+(?:[\\.,]\\d+)?)\\\"\", r'\\1', text)\n str_dict = str_dict.replace(\"\\\"True\\\"\",\"True\").replace(\"\\\"False\\\"\",\"False\")\n return literal_eval(str_dict)\n\nif os.path.exists(Hyperparams.hp_json):\n Hyperparams.update(hp_json(Hyperparams.hp_json))\nelse:\n Hyperparams.hp_json = 'hyperparameters.json'\n if os.path.exists(Hyperparams.hp_json):\n Hyperparams.update(hp_json(Hyperparams.hp_json))\n \nif os.path.exists(Hyperparams.resource_json):\n Hyperparams.update(resource_json(Hyperparams.hp_json))\nelse:\n Hyperparams.resource_json = 'resourceConfig.json'\n if os.path.exists(Hyperparams.resource_json):\n Hyperparams.update(resource_json(Hyperparams.hp_json))\n\nHyperparams.logdir = os.path.join(Hyperparams.modeldir, \"meta\") \nHyperparams.fine_logdir = os.path.join(Hyperparams.modeldir, Hyperparams.model)"},"size":{"kind":"number","value":4411,"string":"4,411"}}},{"rowIdx":126997,"cells":{"max_stars_repo_path":{"kind":"string","value":"official/projects/qat/vision/quantization/helper.py"},"max_stars_repo_name":{"kind":"string","value":"wnorris/models"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2168541"},"content":{"kind":"string","value":"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantization helpers.\"\"\"\nfrom typing import Any, Dict\n\nimport tensorflow as tf\n\nimport tensorflow_model_optimization as tfmot\nfrom official.projects.qat.vision.quantization import configs\n\n\n_QUANTIZATION_WEIGHT_NAMES = [\n 'output_max', 'output_min', 'optimizer_step', 'kernel_min', 'kernel_max',\n 'add_three_min', 'add_three_max', 'divide_six_min', 'divide_six_max',\n 'depthwise_kernel_min', 'depthwise_kernel_max',\n 'reduce_mean_quantizer_vars_min', 'reduce_mean_quantizer_vars_max',\n 'quantize_layer_min', 'quantize_layer_max',\n 'quantize_layer_1_min', 'quantize_layer_1_max',\n 'quantize_layer_2_min', 'quantize_layer_2_max',\n 'quantize_layer_3_min', 'quantize_layer_3_max',\n 'post_activation_min', 'post_activation_max',\n]\n\n_ORIGINAL_WEIGHT_NAME = [\n 'kernel', 'depthwise_kernel', 'gamma', 'beta', 'moving_mean',\n 'moving_variance', 'bias'\n]\n\n\ndef is_quantization_weight_name(name: str) -> bool:\n simple_name = name.split('/')[-1].split(':')[0]\n if simple_name in _QUANTIZATION_WEIGHT_NAMES:\n return True\n if simple_name in _ORIGINAL_WEIGHT_NAME:\n return False\n raise ValueError('Variable name {} is not supported.'.format(simple_name))\n\n\ndef copy_original_weights(original_model: tf.keras.Model,\n quantized_model: tf.keras.Model):\n \"\"\"Helper function that copy the original model weights to quantized model.\"\"\"\n original_weight_value = original_model.get_weights()\n weight_values = quantized_model.get_weights()\n\n original_idx = 0\n for idx, weight in enumerate(quantized_model.weights):\n if not is_quantization_weight_name(weight.name):\n if original_idx >= len(original_weight_value):\n raise ValueError('Not enought original model weights.')\n weight_values[idx] = original_weight_value[original_idx]\n original_idx = original_idx + 1\n\n if original_idx < len(original_weight_value):\n raise ValueError('Not enought quantized model weights.')\n\n quantized_model.set_weights(weight_values)\n\n\nclass LayerQuantizerHelper(object):\n \"\"\"Helper class that handles quantizers.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._quantizers = {}\n self._quantizer_vars = {}\n super().__init__(*args, **kwargs)\n\n def _all_value_quantizer(self):\n return tfmot.quantization.keras.quantizers.AllValuesQuantizer(\n num_bits=8, per_axis=False, symmetric=False, narrow_range=False)\n\n def _moving_average_quantizer(self):\n return tfmot.quantization.keras.quantizers.MovingAverageQuantizer(\n num_bits=8, per_axis=False, symmetric=False, narrow_range=False)\n\n def _add_quantizer(self, name, all_value_quantizer=False):\n if all_value_quantizer:\n self._quantizers[name] = self._all_value_quantizer()\n else:\n self._quantizers[name] = self._moving_average_quantizer()\n\n def _apply_quantizer(self, name, inputs, training, **kwargs):\n return self._quantizers[name](\n inputs, training, self._quantizer_vars[name], **kwargs)\n\n def _build_quantizer_vars(self):\n for name in self._quantizers:\n self._quantizer_vars[name] = self._quantizers[name].build(\n tensor_shape=None, name=name, layer=self)\n\n\nclass NoOpActivation:\n \"\"\"No-op activation which simply returns the incoming tensor.\n\n This activation is required to distinguish between `keras.activations.linear`\n which does the same thing. The main difference is that NoOpActivation should\n not have any quantize operation applied to it.\n \"\"\"\n\n def __call__(self, x: tf.Tensor) -> tf.Tensor:\n return x\n\n def get_config(self) -> Dict[str, Any]:\n \"\"\"Get a config of this object.\"\"\"\n return {}\n\n def __eq__(self, other: Any) -> bool:\n if not other or not isinstance(other, NoOpActivation):\n return False\n\n return True\n\n def __ne__(self, other: Any) -> bool:\n return not self.__eq__(other)\n\n\ndef quantize_wrapped_layer(cls, quantize_config):\n\n def constructor(*arg, **kwargs):\n return tfmot.quantization.keras.QuantizeWrapperV2(\n cls(*arg, **kwargs), quantize_config)\n\n return constructor\n\n\ndef norm_by_activation(activation, norm_quantized, norm_no_quantized):\n if activation not in ['relu', 'relu6']:\n return norm_quantized\n else:\n return norm_no_quantized\n\n\nConv2DQuantized = quantize_wrapped_layer(\n tf.keras.layers.Conv2D,\n configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False))\nConv2DOutputQuantized = quantize_wrapped_layer(\n tf.keras.layers.Conv2D,\n configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True))\nDepthwiseConv2DQuantized = quantize_wrapped_layer(\n tf.keras.layers.DepthwiseConv2D,\n configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],\n False))\nDepthwiseConv2DOutputQuantized = quantize_wrapped_layer(\n tf.keras.layers.DepthwiseConv2D,\n configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'],\n True))\nGlobalAveragePooling2DQuantized = quantize_wrapped_layer(\n tf.keras.layers.GlobalAveragePooling2D,\n configs.Default8BitQuantizeConfig([], [], True))\nAveragePooling2DQuantized = quantize_wrapped_layer(\n tf.keras.layers.AveragePooling2D,\n configs.Default8BitQuantizeConfig([], [], True))\nResizingQuantized = quantize_wrapped_layer(\n tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True))\nConcatenateQuantized = quantize_wrapped_layer(\n tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [],\n True))\nUpSampling2DQuantized = quantize_wrapped_layer(\n tf.keras.layers.UpSampling2D, configs.Default8BitQuantizeConfig([], [],\n True))\nReshapeQuantized = quantize_wrapped_layer(\n tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True))\n\n# pylint:disable=g-long-lambda\nBatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer(\n norm_layer, configs.Default8BitOutputQuantizeConfig())\nBatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer(\n norm_layer, configs.NoOpQuantizeConfig())\n"},"size":{"kind":"number","value":6782,"string":"6,782"}}},{"rowIdx":126998,"cells":{"max_stars_repo_path":{"kind":"string","value":"astrosql/deprecated/update.py"},"max_stars_repo_name":{"kind":"string","value":"ketozhang/astroSQL"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170624"},"content":{"kind":"string","value":"\"\"\"\nUpdate Procedure:\n1. Check if the entry exist by basename\n2. Check if updated is needed by WCS\n3. Write to database\n\"\"\"\n\nimport os\nimport peeweedb\nfrom config import config\nfrom pyzaphot import PhotFitsImage\nfrom sqlconnector import connect\nfrom writer import dict2sql\n\nconfig = config()\nstorepath = config['store']\nTABLE = config['mysql']['images_table']\n\n\ndef updater(data, table):\n db = connect()\n table = peeweedb.tables[table]\n\n try:\n wcsed = table.get(table.basename == data['basename']).WCSED\n if not wcsed == 'T':\n dict2sql(db, table, data)\n return None\n\n except table.DoesNotExist as e:\n print(e)\n dict2sql(db, table, data)\n return None\n\n\ndef zaphot_add_one_image_to_db(image, skip=False, delete=False, update=False, table=TABLE):\n # check image processed or not, if yes, return\n print(\"dealing with\", image)\n print(\"processing image : \" + image)\n imagetmp = PhotFitsImage(image)\n processed = os.path.isfile(storepath + imagetmp.savepath + imagetmp.uniformname) or os.path.isfile(\n storepath + imagetmp.savepath + imagetmp.uniformname + '.gz')\n if processed and not update:\n print(\"this image has already been processed\")\n if delete:\n print(\"Deleting this image!!!\")\n command = \"rm -f {0}\".format(image)\n print(command)\n # os.system(command)\n\n # only do WCS if it is not WCSED\n if not skip and imagetmp.WCSED != 'T':\n print('doing wcs here ...')\n # currently only works for KAIT images\n command = \"Ssolve-field-kait \".format(image)\n print(command)\n # os.system(command)\n\n imagetmp.extract_zeromagphotinfo()\n dbinf = imagetmp.get_databaseinfo()\n\n updater(dbinf, table)\n\n if delete:\n command = \"mv {0} {1}\".format(\n image, storepath + imagetmp.savepath)\n print(command)\n # os.system(command)\n else:\n command = \"cp {0} {1}\".format(\n image, storepath + imagetmp.savepath)\n print(command)\n # os.system(command)\n command = \"gzip {0}\".format(\n storepath + imagetmp.savepath + imagetmp.uniformname)\n print(command)\n # os.system(command)\n\n\ndef main(args):\n zaphot_add_one_image_to_db(args.image, skip=args.skip, delete=args.delete, update=args.update)\n"},"size":{"kind":"number","value":2347,"string":"2,347"}}},{"rowIdx":126999,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/unit/multi_dimensional_RNN/_test_mdgru_on_2d_grid.py"},"max_stars_repo_name":{"kind":"string","value":"X-rayLaser/multi-directional-mdrnn"},"max_stars_count":{"kind":"number","value":12,"string":"12"},"id":{"kind":"string","value":"2168866"},"content":{"kind":"string","value":"from .test_mdrnn_on_2d_grid import Degenerate2DInputToMDRNNTests, \\\n OutputShapeGiven2DTests, OutputShapeGiven6DInputTests\nimport tensorflow as tf\nfrom mdrnn import MDGRU\n\n\nclass Degenerate2DInputToMDGRUTests(Degenerate2DInputToMDRNNTests):\n def create_mdrnn(self, **kwargs):\n return MDGRU(**kwargs)\n\n def create_keras_rnn(self, **kwargs):\n return tf.keras.layers.GRU(implementation=1, reset_after=False, **kwargs)\n\n\nclass MDGRUOutputShapeGiven2DTests(OutputShapeGiven2DTests):\n def get_rnn_class(self):\n return MDGRU\n\n\nclass MDGRUOutputShapeGiven6DInputTests(OutputShapeGiven6DInputTests):\n def get_rnn_class(self):\n return MDGRU\n"},"size":{"kind":"number","value":674,"string":"674"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1269,"numItemsPerPage":100,"numTotalItems":129320,"offset":126900,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzQxMjYyMSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTc0MTYyMjEsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.B2C7ci6sYQam4ktKA44JHhURgoDxSOzoRdE2YYJOWjzsUQc9vwHbNNSyL_0Ez0L5DmuRXhjoEE8K-GCdqBzwAw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
2020/04/code.py
ahriley/advent-of-code
0
2170444
with open('input.txt') as f: lines = f.readlines() # process file passports = [] passport = {} for line in lines: if line == '\n': passports.append(passport) passport = {} continue items = line.split(' ') for item in items: key, val = item.split(':') passport[key] = val.strip('\n') # since last line is not empty, add final passport passports.append(passport) # part 1 ans1 = 0 for passport in passports: valid = len(passport) == 8 valid |= (len(passport) == 7) and ('cid' not in passport.keys()) ans1 += valid # part 2 def valid_year(val, low, high): return len(val) == 4 and low <= int(val) <= high def valid_haircolor(val): check = val[0] == '#' check &= len(val) == 7 check &= all([char.isdigit() or char in 'abcdef' for char in val[1:]]) return check def valid_pid(val): return len(val) == 9 and all([char.isdigit() for char in val]) ans2 = 0 eyecolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'] for passport in passports: valid = len(passport) == 8 valid |= (len(passport) == 7) and ('cid' not in passport.keys()) if valid: # check years valid &= valid_year(passport['byr'], 1920, 2002) valid &= valid_year(passport['iyr'], 2010, 2020) valid &= valid_year(passport['eyr'], 2020, 2030) # check height if 'in' in passport['hgt']: valid &= 59 <= float(passport['hgt'].strip('in')) <= 76 elif 'cm' in passport['hgt']: valid &= 150 <= float(passport['hgt'].strip('cm')) <= 193 else: valid &= False # check hair color valid &= valid_haircolor(passport['hcl']) # check eye color valid &= passport['ecl'] in eyecolors # check passport number valid &= valid_pid(passport['pid']) ans2 += valid # output answer = [] answer.append('Part 1: {}'.format(ans1)) answer.append('Part 2: {}'.format(ans2)) with open('solution.txt', 'w') as f: f.writelines('\n'.join(answer))
2,050
ssi/migrations/0001_initial.py
unt-libraries/serial-set-inventory
0
2170777
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import localflavor.us.models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Congress', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('congress_number', models.IntegerField()), ('begin_date', models.DateField(null=True, blank=True)), ('end_date', models.DateField(null=True, blank=True)), ], ), migrations.CreateModel( name='DocumentType', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('house', models.CharField(max_length=2, choices=[(b'House', b'H'), (b'Senate', b'S')])), ('document_type', models.CharField(max_length=100)), ('document_name', models.CharField(unique=True, max_length=106)), ], ), migrations.CreateModel( name='Institution', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('institution_name', models.CharField(max_length=255)), ('library_name', models.CharField(max_length=255)), ('address_2', models.CharField(max_length=255, blank=True)), ('city', models.CharField(max_length=255)), ('state', localflavor.us.models.USStateField()), ('zip_code', localflavor.us.models.USZipCodeField()), ('latitude', models.DecimalField(max_digits=9, decimal_places=6, blank=True)), ('longitude', models.DecimalField(max_digits=9, decimal_places=6, blank=True)), ('depository_number', models.CharField(max_length=255)), ('phone_number', localflavor.us.models.PhoneNumberField()), ('email_address', models.EmailField(max_length=254)), ('date_inventoried', models.DateField()), ('hidden', models.BooleanField(default=False)), ], ), migrations.CreateModel( name='Inventory', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('departmental_edition', models.BooleanField(default=False)), ('note', models.CharField(max_length=255)), ('institution', models.ForeignKey(to='ssi.Institution')), ], ), migrations.CreateModel( name='Session', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('session_number', models.CharField(max_length=2, choices=[(b'1', b'1st'), (b'2', b'2nd'), (b'3', b'3rd'), (b'4', b'4th'), (b'5', b'5th'), (b'S', b'Special')])), ('begin_date', models.DateField(null=True, blank=True)), ('end_date', models.DateField(null=True, blank=True)), ('congress', models.ForeignKey(to='ssi.Congress')), ], ), migrations.CreateModel( name='Volume', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('serial_number', models.CharField(max_length=255)), ('title', models.CharField(max_length=255, blank=True)), ('publication_numbers', models.CharField(max_length=255, blank=True)), ('annotation', models.CharField(max_length=255, blank=True)), ('not_issued', models.BooleanField(default=False)), ('document_type', models.ForeignKey(to='ssi.DocumentType')), ('session', models.ForeignKey(to='ssi.Session')), ], ), migrations.AddField( model_name='inventory', name='volume', field=models.ForeignKey(to='ssi.Volume'), ), migrations.AddField( model_name='institution', name='volumes', field=models.ManyToManyField(to='ssi.Volume', through='ssi.Inventory'), ), ]
4,445
tests/test_unit.py
GlezSeoane/monoshape
0
2169062
# -*- coding: utf-8 -*- """monoshape unit test suite""" import unittest from unittest import TestCase from PIL import Image from PIL import ImageChops from monoshape.__main__ import extract_shape __author__ = '<NAME>' __copyright__ = 'Copyright 2019, <NAME>' __credits__ = '<NAME>' __license__ = 'LICENSE' __version__ = '1.2' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' __status__ = 'Production' class UnitTestSuite(TestCase): """Unit test container class for monoshape library.""" @staticmethod def are_equals(img1, img2): """Determine if two images are equals in contents.""" return ImageChops.difference(img1, img2).getbbox() is None def test_simple_draw(self): source = 'test_src/original_pear.png' drawn = extract_shape(path=source, black_background=False, white_shape=False, rgb_shape=False, red=None, green=None, blue=None) expected = Image.open(source.replace('original', 'expected')) self.assertTrue(self.are_equals(drawn, expected)) def test_draw_with_white_output(self): source = 'test_src/original_plane.png' drawn = extract_shape(path=source, black_background=False, white_shape=True, rgb_shape=False, red=None, green=None, blue=None) expected = Image.open(source.replace('original', 'expected')) self.assertTrue(self.are_equals(drawn, expected)) def test_draw_with_rgb_output(self): source = 'test_src/original_udc.png' drawn = extract_shape(path=source, black_background=False, white_shape=False, rgb_shape=True, red=159, green=36, blue=110) expected = Image.open(source.replace('original', 'expected')) self.assertTrue(self.are_equals(drawn, expected)) def test_draw_with_black_background(self): source = 'test_src/original_spaceship.png' drawn = extract_shape(path=source, black_background=True, white_shape=False, rgb_shape=False, red=None, green=None, blue=None) expected = Image.open(source.replace('original', 'expected')) self.assertTrue(self.are_equals(drawn, expected)) if __name__ == '__main__': unittest.main()
2,864
3.1.4 - Project - Sliding Window Object Detection/Raspberry Pi/solution-live-sliding-window-object-detection.py
neubrom/computer-vision-with-embedded-machine-learning
27
2170155
#!/usr/bin/env python """ Pi Camera Sliding Window Object Detection Continuously captures images and performs inference on a sliding window to detect objects. Author: EdgeImpulse, Inc. Date: August 5, 2021 License: Apache-2.0 (apache.org/licenses/LICENSE-2.0) """ import os, sys, time, math import cv2 from picamera import PiCamera from picamera.array import PiRGBArray from edge_impulse_linux.image import ImageImpulseRunner # Settings model_file = "modelfile.eim" # Trained ML model from Edge Impulse target_label = "dog" # Which label we're looking for target_threshold = 0.6 # Draw box if output prob. >= this value cam_width = 320 # Width of frame (pixels) cam_height = 240 # Height of frame (pixels) rotation = 0 # Camera rotation (0, 90, 180, or 270) window_width = 96 # Window width (input to CNN) window_height = 96 # Window height (input to CNN) stride = 24 # How many pixels to move the window # The ImpulseRunner module will attempt to load files relative to its location, # so we make it load files relative to this program instead dir_path = os.path.dirname(os.path.realpath(__file__)) model_path = os.path.join(dir_path, model_file) # Load the model file runner = ImageImpulseRunner(model_path) # Initialize model (and print information if it loads) try: model_info = runner.init() labels = model_info['model_parameters']['labels'] print("Model name:", model_info['project']['name']) print("Model owner:", model_info['project']['owner']) print("Labels:", labels) # Exit if we cannot initialize the model except Exception as e: print("ERROR: Could not initialize model") print("Exception:", e) if (runner): runner.stop() sys.exit(1) # Compute number of window steps num_horizontal_windows = math.floor((cam_width - window_width) / stride) + 1 num_vertical_windows = math.floor((cam_height - window_height) / stride) + 1 # Initial framerate value fps = 0 # Start the camera with PiCamera() as camera: # Configure camera settings camera.resolution = (cam_width, cam_height) camera.rotation = rotation # Container for our frames raw_capture = PiRGBArray(camera, size=(cam_width, cam_height)) # Continuously capture frames (this acts as our main "while True" loop) for frame in camera.capture_continuous(raw_capture, format='bgr', use_video_port=True): # Get timestamp for calculating actual framerate timestamp = cv2.getTickCount() # Get Numpy array that represents the image img = frame.array # Convert image to RGB img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # >>> ENTER YOUR CODE HERE <<< # Loop over all possible windows, crop/copy image under window, # perform inference on windowed image, compare output to threshould, # print out info (x, y, w, h) of all bounding boxes that meet or exceed # that threshold. # Slide window across image and perform inference on each sub-image bboxes = [] for vertical_window in range(num_vertical_windows): for horizontal_window in range(num_horizontal_windows): # Crop out image under window x = horizontal_window * stride y = vertical_window * stride window_img = img[y:(y + window_height), x:(x + window_width)] # Extract features from image (e.g. convert to grayscale, crop, etc.) features, cropped = runner.get_features_from_image(window_img) # Do inference on sub-image (cropped window portion) res = None try: res = runner.classify(features) except Exception as e: print("ERROR: Could not perform inference") print("Exception:", e) # The output probabilities are stored in the results predictions = res['result']['classification'] # Remember bounding box location if target inference >= thresh. if predictions[target_label] >= target_threshold: bboxes.append((x, y, window_width, window_height, predictions[target_label])) # Draw bounding boxes on preview image for bb in bboxes: cv2.rectangle(img, pt1=(bb[0], bb[1]), pt2=(bb[0] + bb[2], bb[1] + bb[3]), color=(255, 255, 255)) # Print bounding box locations print("---") print("Boxes:") for bb in bboxes: print(" " + "x:" + str(bb[0]) + " y:" + str(bb[1]) + " w:" + str(bb[2]) + " h:" + str(bb[3]) + " prob:" + str(bb[4])) print("FPS:", round(fps, 2)) # Show the frame cv2.imshow("Frame", img) # Clear the stream to prepare for next frame raw_capture.truncate(0) # Calculate framrate frame_time = (cv2.getTickCount() - timestamp) / cv2.getTickFrequency() fps = 1 / frame_time # Press 'q' to quit if cv2.waitKey(1) == ord('q'): break # Clean up cv2.destroyAllWindows()
5,780
prepare_project.py
normanjaeckel/FaoCaseList
0
2170462
""" Small helper script to create settings file and WSGI file from templates. """ import os from string import Template from textwrap import dedent from django.utils.crypto import get_random_string DATABASES_DEVELOPMENT = dedent( """ DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": os.path.join( os.path.dirname(os.path.abspath(__file__)), "db.sqlite3" ), } } """ ).strip() DATABASES_PRODUCTION = dedent( """ DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql", "NAME": "fao_case_list", "USER": "fao_case_list", "PASSWORD": "", "HOST': "", "PORT': "", } } """ ).strip() def create_settings(): base_dir = os.path.abspath(os.path.dirname(__file__)) new_settings_file_path = os.path.join(base_dir, "fao_case_list_settings.py") if not os.path.exists(new_settings_file_path): default_settings_file_path = os.path.join( base_dir, "fao_case_list_settings.py.tpl" ) with open(default_settings_file_path) as default_settings_file: secret_key = get_random_string( 50, "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)" ) host = os.environ.get("FAO_CASE_LIST_HOST") databases = DATABASES_PRODUCTION if host else DATABASES_DEVELOPMENT context = dict( secret_key=secret_key, debug=not host, host=host if host else "*", databases=databases, ) settings = Template(default_settings_file.read()).substitute(**context) with open(new_settings_file_path, "w") as new_settings_file: new_settings_file.write(settings) print("Settings file {} successfully created.".format(new_settings_file_path)) def create_wsgi(): base_dir = os.path.abspath(os.path.dirname(__file__)) new_wsgi_file_path = os.path.join(base_dir, "fao_case_list_wsgi.py") if not os.path.exists(new_wsgi_file_path): default_wsgi_file_path = os.path.join(base_dir, "fao_case_list_wsgi.py.tpl") with open(default_wsgi_file_path) as default_wsgi_file: with open(new_wsgi_file_path, "w") as new_wsgi_file: new_wsgi_file.write(default_wsgi_file.read()) print("WSGI file {} successfully created.".format(new_wsgi_file_path)) if __name__ == "__main__": create_settings() create_wsgi()
2,601
polls_api/polls/admin.py
max-belichenko/Django-Polls-API
0
2170639
from django.contrib import admin from .models import Poll, Question, Choice, Answer admin.site.register(Poll) admin.site.register(Question) admin.site.register(Choice) admin.site.register(Answer)
199
PythonCode/Routing/RouteGeneration/plot_routes.py
NUIG-ROCSAFE/CBRNeVirtualEnvironment
3
2170231
import folium import sys import time import os from collections import namedtuple from selenium import webdriver from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By GPSCoordinate = namedtuple("GPSCoordinate", ["lat", "long"]) os.chdir('..\\..') #gps_coords_loc = 'MissionResolverCode\\work_resolver_runner.txt' gps_coords_loc = "D:\\IJCAIDemoCode\\CommsHubCode\\work_resolver_runner.txt" locations = open(gps_coords_loc, 'r') gps_coordinates = [] file_lines = locations.readlines() colors = ['blue','green','red', 'yellow', 'black'] marker_sides = [i for i in range(3, 3 + len(colors)+1)] color_counter = 0 map=folium.Map(location=[53.28174954176364, -9.061292838154545], tiles='Stamen Terrain',zoom_start=15) points = [] for location in file_lines: if location == '\n': folium.PolyLine(points, color = colors[color_counter], weight = 3).add_to(map) color_counter+=1 points = [] elif len(location.split(','))>1: print(location) location = location.replace('\n','') gps_coordinates.append(GPSCoordinate(location.split(',')[0], location.split(',')[1])) print('creating location: ', [location.split(',')[0], location.split(',')[1]]) folium.RegularPolygonMarker(location = [float(location.split(',')[0]), float(location.split(',')[1])], fill_color = colors[color_counter], radius = 5, number_of_sides=marker_sides[color_counter]).add_to(map) points.append((float(location.split(',')[0]), float(location.split(',')[1]))) folium.TileLayer('openstreetmap').add_to(map) map_loc = '\\PythonClientGPSMapping\\rav_route_mapping.html' #os.chdir('..\\..') print('current directory: ' + os.getcwd()) map.save(os.getcwd() + map_loc) browser = webdriver.Chrome() browser.get(os.getcwd() + map_loc) def show_path(driver, file_path): driver.get('https://www.darrinward.com/lat-long/') csv_input = driver.find_element_by_name('csv_file') csv_input.send_keys(os.getcwd() + file_path) wait = WebDriverWait(driver, 5) element = wait.until(EC.element_to_be_clickable((By.ID, 'labels_show'))) time.sleep(0.5) element = driver.find_element_by_id('labels_show') if element.is_selected(): print('clicking labels_show') time.sleep(1) try: element.click() except Exception as e: print(e) print('clicked labels_show') element = wait.until(EC.element_to_be_clickable((By.ID, 'line_show'))) time.sleep(0.5) element = driver.find_element_by_id('line_show') if element.is_selected(): print('clicking line_show') time.sleep(1) element.click() print('clicked line_show') driver.find_element_by_id('submitButton').click() second_browser = webdriver.Chrome() show_path(second_browser, '\\MissionResolverCode\\test_work_resolver31_all.csv') third_browser = webdriver.Chrome() show_path(third_browser, '\\MissionResolverCode\\test_work_resolver31_one.csv') fourth_browser = webdriver.Chrome() show_path(fourth_browser, '\\MissionResolverCode\\test_work_resolver31_two.csv') fifth_browser = webdriver.Chrome() show_path(fifth_browser, '\\MissionResolverCode\\test_work_resolver31_three.csv') import sys sys.exit(0) third_browser = webdriver.Chrome() third_browser.get('https://www.darrinward.com/lat-long/') csv_input = third_browser.find_element_by_name('csv_file') csv_input.send_keys(os.getcwd() + '\\MissionResolverCode\\test_work_resolver31_one.csv') wait = WebDriverWait(third_browser, 5) element = wait.until(EC.element_to_be_clickable((By.ID, 'labels_show'))) if element.is_selected(): element.click() element = wait.until(EC.element_to_be_clickable((By.ID, 'line_show'))) if third_browser.find_element_by_id('line_show').is_selected(): third_browser.find_element_by_id('line_show').click() time.sleep(2) third_browser.find_element_by_id('submitButton').click() fourth_browser = webdriver.Chrome() fourth_browser.get('https://www.darrinward.com/lat-long/') csv_input = fourth_browser.find_element_by_name('csv_file') csv_input.send_keys(os.getcwd() + '\\MissionResolverCode\\test_work_resolver31_one.csv') fourth_browser.find_element_by_id('labels_show').click() fourth_browser.find_element_by_id('line_show').click() fourth_browser.find_element_by_id('submitButton').click() fifth_browser = webdriver.Chrome() fifth_browser.get('https://www.darrinward.com/lat-long/') csv_input = fifth_browser.find_element_by_name('csv_file') csv_input.send_keys(os.getcwd() + '\\MissionResolverCode\\test_work_resolver31_one.csv') fifth_browser.find_element_by_id('labels_show').click() fifth_browser.find_element_by_id('line_show').click() fifth_browser.find_element_by_id('submitButton').click()
4,651
haiku/submissions/accepted/js.py
jsannemo/hiq-challenge-2017
0
2169621
#!/usr/bin/env python3 import sys line = sys.stdin.readline() S = int(line) syllables = set() for i in range(S): line = sys.stdin.readline() syllable = line.strip() syllables.add(syllable) poem = [sys.stdin.readline() for _ in range(3)] poem = list(map(lambda x: x.strip(), poem)) decomp = [] for line in poem: decomps = [set([0]) if i == len(line) else set() for i in range(len(line) + 1)] for pos in range(len(line) - 1, -1, -1): if line[pos] == ' ': decomps[pos] = decomps[pos + 1] for syllable_length in range(1, 8): new_pos = pos + syllable_length if new_pos <= len(line): syllable = line[pos:new_pos] if syllable in syllables: decomps[pos] = decomps[pos] | set([s + 1 for s in decomps[new_pos]]) decomp.append(decomps[0]) if 5 in decomp[0] and 7 in decomp[1] and 5 in decomp[2]: print("haiku") else: print("come back next year")
965
notes.py
Daniel-mon-Goose/flask_notes_app
0
2170661
from app import app, db from app.models import Note, PreviousNote @app.shell_context_processor def make_shell_context(): return {'db': db, 'Note': Note, 'PreviousNote': PreviousNote}
189
smartmove/visuals/maps.py
ryanjdillon/smartmove
0
2170753
''' This module contains functions for retrieving Kartverket map data for specified areas. Notes ----- The retrieval of map data uses `owslib`. Read more in the `owslib image tutorial <https://geopython.github.io/OWSLib/index.html?highlight=webmapservice>`_. Karverket data is retrieved from the `Kartverket WMS <http://kartverket.no/data/API-og-WMS/>`_. Karverket WMS example: .. code:: bash http://openwms.statkart.no/skwms1/wms.topo3?version=1.1.1&styles=&service=wms&REQUEST=map&SRS=EPSG:32633&BBOX=210924.955,6668620.35,255289.776,6688292.32&LAYERS=topo3_WMS&WIDTH=1650&HEIGHT=1100&FORMAT=image/png&BGCOLOR=0xFFFFFF&TRANSPARENT=TRUE ''' def get_wms_dict(xml): '''An almost useful routine from creating a dict from a capabilities XML Args ---- xml: str Capabilities XML in string format Returns ------- d: OrderedDict Capabilities XML key/values in dict format ''' from collections import OrderedDict from bs4 import BeautifulSoup def get_attrs(layer, key): return layer.find(key).attrs soup = BeautifulSoup(xml, 'lxml') layers = soup.findAll('layer')[1:] d = OrderedDict() for l in layers: title = l.find('title').text d[title] = OrderedDict() boundingboxes = l.findAll('boundingbox') for srs in sorted([srs.text for srs in l.findAll('srs')]): for bb in boundingboxes: if bb['srs'] == srs: d[title][srs] = OrderedDict() for k in sorted(bb.attrs.keys()): if k != 'srs': d[title][srs][k] = bb.attrs[k] return d def project_bbox(srs, lon0, lat0, lon1, lat1): '''Project the bounding box for map extent coords from WGS84 to `srs` Args ---- srs: str Spatial Reference System for map output lon0: float Minimum longitude for map extent lat0: float Minimum latitude for map extent lon1: float Maximum longitude for map extent lat1: float Maximum latitude for map extent Returns ------- bbox: float tuple Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units of the SRS ''' import pyproj wgs84 = pyproj.Proj(init='EPSG:4326') proj = pyproj.Proj('+init={}'.format(srs)) minx, miny = pyproj.transform(wgs84, proj, lon0, lat0) maxx, maxy = pyproj.transform(wgs84, proj, lon1, lat1) return (minx, miny, maxx, maxy) def get_size(bbox, width): '''Generate adjusted width and height from bounds and given width Args ---- bbox: float tuple Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units of the SRS width: int Pixel width for Karverket WMS GetMap() query Return ------ width: int Adjusted pixel width for Karverket WMS GetMap() query height: int Adjusted pixel height for Karverket WMS GetMap() query ''' import pyproj # Maximum WIDTH/HEIGHT dimension for Kartveket WMS GetMap call maxdim = 4096 # Make width equal `maxdim` if too large width = min(width, maxdim) # Get ratio between projected dimensions xdiff = bbox[2] - bbox[0] ydiff = bbox[3] - bbox[1] yx_ratio = ydiff / xdiff # Calcuate height from projected dimension height = round(width * yx_ratio) # Adjust values if height too large if height > maxdim: height = maxdim width = round(height / yx_ratio) return width, height def get_wms_png(wms, bbox, layer, srs, width=1600, transparent=True): '''Get map data via WMS GetMap method for given bounding box, and width Args ---- wms: owslib.wms WMS object with getmap() class method to call bbox: float tuple Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units of the SRS layer: str Name of WMS layer to retrieve srs: str Spatial reference system width: int Pixel width for Karverket WMS GetMap() query transparent: bool Switch to make background color transparent (Default: True) Returns ------- oswslib_img: owslib.image Image object with retrieved image data ''' img_fmt = 'image/png' # Generate size parameters from `bbox` and desired pixel width size = get_size(bbox, width) # Retrieve map data using WMS GetMap() call owslib_img = wms.getmap(layers=[layer], srs=srs, bbox=bbox, size=size, format=img_fmt, transparent=transparent) return owslib_img def png2geotiff(filename_png, srs, bbox): '''Read and convert png file to GEOTIFF file with GDAL Args ---- filename_png: str Path and filename of png file to be output srs: str Spatial reference system string (e.g. EPSG:4326) bbox: float tuple Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units of the SRS ''' import os import subprocess filename_tif = '{}.tif'.format(os.path.splitext(filename_png)[0]) params = (srs, bbox[0], bbox[1], bbox[2], bbox[3], filename_png, filename_tif) call = 'gdal_translate -a_srs {} -a_ullr {} {} {} {} {} {}'.format(*params) subprocess.check_call(call.split(' ')) return None def map_ticks(pos0, pos1, n, nsew=False): '''Generate n tick positions and labels from given start and end position Args ---- pos0: float Lon or Lat starting point pos1: float Lon or Lat end point n: int Number of tick positions and labels to generate nsew: bool Switch to append N, S, E, or W to tick labels (Default: False) Returns ------- ticks: list of float Projected tick positions labels: list of str Labels in DPS for generated tick positions ''' import numpy def parse_degminsec(dec_degs, method=None, round_secs=False): '''Parse decimal degrees to degrees, minutes and seconds''' degs = numpy.floor(dec_degs) dec_mins = numpy.abs((dec_degs - degs) * 60) mins = numpy.floor(dec_mins) secs = numpy.abs((dec_mins - mins) * 60) if method == 'lon': if degs < 0: nsew = 'W' elif degs > 0: nsew = 'E' else: nsew = '' elif method == 'lat': if degs < 0: nsew = 'S' elif degs > 0: nsew = 'N' else: nsew = '' else: nsew = '' if round_secs: secs = numpy.round(secs) return degs, mins, secs, nsew ticks = numpy.linspace(pos0, pos1, n) print('lon lat', pos0, pos1) fmt = "{:.0f}$\degree$ {:.0f}$'$ {:.0f}$''$" degs, mins, secs, nsews = parse_degminsec(ticks, round_secs=True) if nsew: fmt += ' {}' values = zip(degs, mins, secs, nsews) labels = [fmt.format(d, m, s, ns) for d, m, s in values] else: values = zip(degs, mins, secs) labels = [fmt.format(d, m, s) for d, m, s in values] return ticks, labels
7,224
tests/start/test_environment_sense.py
spookey/shorter
1
2170214
from shorter.start.environment import ( DELAY_DEF, DELAY_MAX, DELAY_MIN, DELAY_STP, ) def test_delay_sense(): assert DELAY_MIN <= DELAY_MAX assert DELAY_STP <= DELAY_MAX assert DELAY_DEF >= DELAY_MIN assert DELAY_DEF <= DELAY_MAX assert DELAY_DEF % DELAY_STP == 0
304
toiro/tokenizers/__init__.py
taishi-i/toiro
94
2170298
from .tokenizer_utils import ( is_nagisa_available, is_janome_available, is_mecab_available, is_sudachipy_available, is_spacy_available, is_ginza_available, is_kytea_available, is_jumanpp_available, is_sentencepiece_available, is_fugashi_ipadic_available, is_tinysegmenter_available, is_fugashi_unidic_available, available_tokenizers ) if is_nagisa_available(): from .tokenizer_nagisa import tokenize as tokenize_nagisa from .tokenizer_nagisa import original_usage as original_nagisa if is_janome_available(): from .tokenizer_janome import tokenize as tokenize_janome from .tokenizer_janome import original_usage as original_janome if is_mecab_available(): from .tokenizer_mecab_python3 import tokenize as tokenize_mecab from .tokenizer_mecab_python3 import original_usage as original_mecab if is_sudachipy_available(): from .tokenizer_sudachipy import tokenize as tokenize_sudachipy from .tokenizer_sudachipy import original_usage as original_sudachipy if is_spacy_available(): from .tokenizer_spacy import tokenize as tokenize_spacy from .tokenizer_spacy import original_usage as original_spacy if is_ginza_available(): from .tokenizer_ginza import tokenize as tokenize_ginza from .tokenizer_ginza import original_usage as original_ginza if is_kytea_available(): from .tokenizer_kytea import tokenize as tokenize_kytea from .tokenizer_kytea import original_usage as original_kytea if is_jumanpp_available(): from .tokenizer_jumanpp import tokenize as tokenize_jumanpp from .tokenizer_jumanpp import original_usage as original_jumanpp if is_sentencepiece_available(): from .tokenizer_sentencepiece import tokenize as tokenize_sentencepiece from .tokenizer_sentencepiece import original_usage as original_sentencepiece if is_fugashi_ipadic_available(): from .tokenizer_fugashi_ipadic import tokenize as tokenize_fugashi_ipadic from .tokenizer_fugashi_ipadic import original_usage as original_fugashi_ipadic if is_tinysegmenter_available(): from .tokenizer_tinysegmenter import tokenize as tokenize_tinysegmenter from .tokenizer_tinysegmenter import original_usage as original_tinysegmenter if is_fugashi_unidic_available(): from .tokenizer_fugashi_unidic import tokenize as tokenize_fugashi_unidic from .tokenizer_fugashi_unidic import original_usage as original_fugashi_unidic from .tokenizer_report import ( compare, compare_from_file, print_words, SelectTokenizer, get_avaiable_tokenizers )
2,588
Examples/Rendering/Python/TPlane.py
forestGzh/VTK
83
2168561
#!/usr/bin/env python # This simple example shows how to do basic texture mapping. import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Load in the texture map. A texture is any unsigned char image. If it # is not of this type, you will have to map it through a lookup table # or by using vtkImageShiftScale. bmpReader = vtk.vtkBMPReader() bmpReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp") atext = vtk.vtkTexture() atext.SetInputConnection(bmpReader.GetOutputPort()) atext.InterpolateOn() # Create a plane source and actor. The vtkPlanesSource generates # texture coordinates. plane = vtk.vtkPlaneSource() planeMapper = vtk.vtkPolyDataMapper() planeMapper.SetInputConnection(plane.GetOutputPort()) planeActor = vtk.vtkActor() planeActor.SetMapper(planeMapper) planeActor.SetTexture(atext) # Create the RenderWindow, Renderer and both Actors ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Add the actors to the renderer, set the background and size ren.AddActor(planeActor) ren.SetBackground(0.1, 0.2, 0.4) renWin.SetSize(500, 500) ren.ResetCamera() cam1 = ren.GetActiveCamera() cam1.Elevation(-30) cam1.Roll(-20) ren.ResetCameraClippingRange() iren.Initialize() renWin.Render() iren.Start()
1,344
Part_3_advanced/m08_abstract_protocol/abstract_class/example_2/tax_calculator/calculation.py
Mikma03/InfoShareacademy_Python_Courses
0
2170523
from typing import Sequence from tax_calculator.income import Income def calculate_overall_tax_value(incomes: Sequence[Income]) -> int: return sum([income.tax_value() for income in incomes])
199
setup.py
mlrequena78/hivemind
1
2170849
# coding=utf-8 from subprocess import check_output import sys import os import logging from setuptools import find_packages from setuptools import setup assert sys.version_info[0] == 3 and sys.version_info[1] >= 6, "hive requires Python 3.6 or newer" VERSION = 'notag' GIT_REVISION = 'nogitrev' GIT_DATE = 'nogitdate' class GitRevisionProvider(object): """ Static class to provide version and git revision information""" logger = logging.getLogger('GitRevisionProvider') @classmethod def is_git_sha(cls, s): from re import fullmatch return fullmatch('^g[0-9a-f]{8}$', s) is not None @classmethod def get_git_revision(cls, s): git_revision = str(GIT_REVISION) if cls.is_git_sha(s): git_revision = s.lstrip('g') return git_revision @classmethod def get_commits_count(cls, s): commits = None try: commits = int(s) except: pass return commits @classmethod def get_git_date(cls, commit): if commit == GIT_REVISION: return GIT_DATE command = "git show -s --format=%ci {}".format(commit) hivemind_git_date_string = check_output(command.split()).decode('utf-8').strip() return hivemind_git_date_string @classmethod def provide_git_revision(cls): """ Evaluate version and git revision and save it to a version file Evaluation is based on VERSION variable and git describe if .git directory is present in tree. In case when .git is not available version and git_revision is taken from get_distribution call """ version = str(VERSION) git_revision = str(GIT_REVISION) git_date = str(GIT_DATE) if os.path.exists(".git"): from subprocess import check_output command = 'git describe --tags --long --dirty' version_string = check_output(command.split()).decode('utf-8').strip() if version_string != 'fatal: No names found, cannot describe anything.': # git describe -> tag-commits-sha-dirty version_string = version_string.replace('-dirty', '') version_string = version_string.lstrip('v') parts = version_string.split('-') parts_len = len(parts) # only tag or git sha if parts_len == 1: if cls.is_git_sha(parts[0]): git_revision = parts[0] git_revision = git_revision.lstrip('g') else: version = parts[0] if parts_len == 2: version = parts[0] git_revision = cls.get_git_revision(parts[1]) if parts_len > 2: # git sha git_revision = cls.get_git_revision(parts[-1]) # commits after given tag commits = cls.get_commits_count(parts[-2]) # version based on tag version = ''.join(parts[:-1]) if commits is not None: version = ''.join(parts[:-2]) # normalize rc to rcN for PEP 440 compatibility version = version.lower() if version.endswith('rc'): version += '0' else: cls.logger.warning("Git describe command failed for current git repository") git_date = cls.get_git_date(git_revision) else: from pkg_resources import get_distribution try: version, git_revision = get_distribution("hivemind").version.split("+") except: cls.logger.warning("Unable to get version and git revision from package data") cls._save_version_file(version, git_revision, git_date) return version, git_revision @classmethod def _save_version_file(cls, hivemind_version, git_revision, git_date): """ Helper method to save version.py with current version and git_revision """ with open("hive/version.py", 'w') as version_file: version_file.write("# generated by setup.py\n") version_file.write("# contents will be overwritten\n") version_file.write("VERSION = '{}'\n".format(hivemind_version)) version_file.write("GIT_REVISION = '{}'\n".format(git_revision)) version_file.write("GIT_DATE = '{}'\n".format(git_date)) VERSION, GIT_REVISION = GitRevisionProvider.provide_git_revision() SQL_SCRIPTS_PATH = 'hive/db/sql_scripts/' SQL_UPGRADE_PATH = 'hive/db/sql_scripts/upgrade/' def get_sql_scripts(dir): from os import listdir from os.path import isfile, join return [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))] if __name__ == "__main__": setup( name='hivemind', version=VERSION + "+" + GIT_REVISION, description='Developer-friendly microservice powering social networks on the Steem blockchain.', long_description=open('README.md').read(), packages=find_packages(exclude=['scripts']), data_files=[(SQL_SCRIPTS_PATH, get_sql_scripts(SQL_SCRIPTS_PATH)), (SQL_UPGRADE_PATH, get_sql_scripts(SQL_UPGRADE_PATH))], setup_requires=[ 'pytest-runner' ], dependency_links=[ 'https://github.com/bcb/jsonrpcserver/tarball/8f3437a19b6d1a8f600ee2c9b112116c85f17827#egg=jsonrpcserver-4.1.3+8f3437a' ], install_requires=[ 'aiopg @ https://github.com/aio-libs/aiopg/tarball/862fff97e4ae465333451a4af2a838bfaa3dd0bc', 'jsonrpcserver @ https://github.com/bcb/jsonrpcserver/tarball/8f3437a19b6d1a8f600ee2c9b112116c85f17827#egg=jsonrpcserver', 'simplejson', 'aiohttp', 'certifi', 'sqlalchemy', 'funcy', 'toolz', 'maya', 'ujson', 'urllib3', 'psycopg2-binary', 'aiocache', 'configargparse', 'pdoc==0.3.2', 'diff-match-patch', 'prometheus-client', 'psutil', 'atomic', 'python-dateutil>=2.8.1', 'regex' ], extras_require={ 'dev': [ 'pyYAML', 'prettytable' ] }, entry_points={ 'console_scripts': [ 'hive=hive.cli:run', ] } )
6,600
rockPaperScissors.py
dedeogluhu/python-beginner-projects
0
2170709
import random #0 = rock 1 = paper 2 = scissors x = random.randrange(0,3) y = "oyunbaşı" z = int(input("\n 0 = rock \n 1 = paper \n 2 = scissors")) while True: if z == 0: if x == 0: print("berabere") elif x == 1: print("kağıt taşı sarar, you lose") elif x == 2: print("taş makası kırar, you win") elif z == 1: if x == 0: print("kağıt taşı sarar, you win") elif x == 1: print("berabere") elif x == 2: print("makas kağıdı keser, you lose") elif z == 2: if x == 0: print("taş makası kırar, you lose") elif x == 1: print("makas kağıdı keser, you win") elif x == 2: print("berabere") else: print("yanlış numara girdiniz") z = int(input("yeni bir satı giriniz")) x = random.randrange(0,3)
897
third_party/wrappers/sal_wrapper/sa_sal_api.py
ITh4cker/auto_tools
0
2170620
from ctypes import * import platform # **************************************** # Constants # **************************************** if platform.architecture()[0]=='32bit': TMSA_INVALID_HANDLE_VALUE = 0xffffffff else: TMSA_INVALID_HANDLE_VALUE = 0xffffffffffffffff # Return code TM_SA_SUCCESS = 0 TM_SA_ERR_BASE = TM_SA_SUCCESS TM_SA_ERR_INVALID_HANDLE = TM_SA_ERR_BASE - 1 TM_SA_ERR_FIND_PATH = TM_SA_ERR_BASE - 2 TM_SA_ERR_OPEN_FILE = TM_SA_ERR_BASE - 3 TM_SA_ERR_READ_FILE = TM_SA_ERR_BASE - 4 TM_SA_ERR_PATTERN_CONTENT = TM_SA_ERR_BASE - 5 TM_SA_ERR_INSUFFICIENT_BUF = TM_SA_ERR_BASE - 6 TM_SA_ERR_INVALID_PARAM = TM_SA_ERR_BASE - 7 TM_SA_ERR_MEM_ALLOC_FAIL = TM_SA_ERR_BASE - 8 TM_SA_ERR_UNKNOWN = TM_SA_ERR_BASE - 9 TM_SA_ERR_INVALID_HTTP_CONTENT = TM_SA_ERR_BASE - 10 TM_SA_ERR_UNSUPPORT_TYPE = TM_SA_ERR_BASE - 11 TM_SA_ERR_BUFFER_FULL = TM_SA_ERR_BASE - 12 TM_SA_ERR_CALL_SEQ = TM_SA_ERR_BASE - 13 TM_SA_ERR_INTERNAL = TM_SA_ERR_BASE - 14 TM_SA_ERR_EXCESSIVE_TRAFFIC = TM_SA_ERR_BASE - 15 TM_SA_ERR_INVALID_OPT_VALUE = TM_SA_ERR_BASE - 16 TM_SA_ERR_INVALID_EVENT_TYPE = TM_SA_ERR_BASE - 17 TM_SA_ERR_INVALID_EVENT_HANDLE = TM_SA_ERR_BASE - 18 TM_SA_ERR_FILTER_BY_RANK = TM_SA_ERR_BASE - 19 # TM_SA_DIAGNOSIS_TYPE TM_SA_DIAGNOSIS_INVALID = -1 TM_SA_DIAGNOSIS_LOWRISK = 1 TM_SA_DIAGNOSIS_UNDETERMINED = 2 TM_SA_DIAGNOSIS_MONITORING = 3 TM_SA_DIAGNOSIS_SUSPICIOUS = 4 TM_SA_DIAGNOSIS_MALICIOUS = 5 # TM_SA_OPTIONS TM_SA_OPT_LOGPATH = 1 TM_SA_OPT_LOGLEVEL = 2 TM_SA_OPT_UCONV_CALLBACK = 3 TM_SA_OPT_FEEDBACK_CALLBACK = 4 TM_SA_OPT_LOCAL_EVENT_STORE_CALLBACK = 5 TM_SA_OPT_NON_BLOCKING_MODE = 6 TM_SA_OPT_RANK_FOLDER = 7 TM_SA_OPT_NF_FOLDER = 8 # TM_SA_CONTENT_TYPE TM_SA_HTTP_REQ_HOST_IP = 1 TM_SA_HTTP_REQ_URL = 2 TM_SA_HTTP_REQ_HDR = 3 TM_SA_HTTP_REQ_BODY = 4 TM_SA_HTTP_RESP_HDR = 5 TM_SA_HTTP_RESP_BODY = 6 TM_SA_HTTP_SCRIPT_BODY = 7 # TM_SA_LOG_LEVEL TM_SA_LOG_OFF = 6 TM_SA_LOG_FATAL = 5 TM_SA_LOG_ERROR = 4 TM_SA_LOG_WARN = 3 TM_SA_LOG_INFO = 2 TM_SA_LOG_DEBUG = 1 TM_SA_LOG_TRACE = 0 TM_SA_LOG_ALL = TM_SA_LOG_TRACE # TM_SA_DESCRIPTION_TYPE TM_SA_DESCRIPTION_DECISION = 0 TM_SA_DESCRIPTION_BEHAVIOR = 1 TM_SA_DESCRIPTION_ANALYZER = 2 TM_SA_DESCRIPTION_MATCHED_RULES = 3 TM_SA_DESCRIPTION_USER_DEFINE = 4 TM_SA_DESCRIPTION_MAX = 5 # TM_SA_WEBPAGE_INFO_TYPE TM_SA_WEBPAGE_INFO_CHILDURLS = 0 TM_SA_WEBPAGE_INFO_FILETYPE = 1 # TM_SA_CONTEXT_TYPE TM_SA_CONTEXT_AUTO = 0 TM_SA_CONTEXT_AFFINITIVE = 1 # TM_SA_SCAN_TYPE TM_SCAN_TYPE_BROWSER_HTML_CONTENT = 0 TM_SCAN_TYPE_BROWSER_SCRIPT_EXECUTION = 1 TM_SCAN_TYPE_BROWSER_DOCUMENT_COMPLETE = 2 TM_SCAN_TYPE_BROWSER_HTML_SIGNATURE = 3 TM_SCAN_TYPE_PROXY_LINK = 4 TM_SCAN_TYPE_PROXY_ALL = 5 TMSA_TOTAL_SCAN_TYPE_COUNT = 6 # **************************************** # Interfaces # **************************************** # load library tmsadll = cdll.LoadLibrary('libtmsa.so') # alias TMSAEng_initialize = tmsadll.TMSAEng_initialize TMSAEng_uninitialize = tmsadll.TMSAEng_uninitialize TMSAEng_getEngineVersion = tmsadll.TMSAEng_getEngineVersion TMSAEng_getPatternVersion = tmsadll.TMSAEng_getPatternVersion TMSAEng_setOption = tmsadll.TMSAEng_setOption TMSAEng_getOption = tmsadll.TMSAEng_getOption TMSAEng_createContext = tmsadll.TMSAEng_createContext TMSAEng_createPage = tmsadll.TMSAEng_createPage TMSAEng_addContent = tmsadll.TMSAEng_addContent TMSAEng_scan = tmsadll.TMSAEng_scan TMSAEng_scanEx = tmsadll.TMSAEng_scanEx TMSAEng_getDiagnosis = tmsadll.TMSAEng_getDiagnosis TMSAEng_getDescription = tmsadll.TMSAEng_getDescription TMSAEng_getDescriptionEx = tmsadll.TMSAEng_getDescriptionEx TMSAEng_getWebPageInfo = tmsadll.TMSAEng_getWebPageInfo TMSAEng_freeHandle = tmsadll.TMSAEng_freeHandle # function prototypes def _define_prototypes(): prototypes = [ ( TMSAEng_initialize, c_long, [c_wchar_p, c_void_p] ), ( TMSAEng_uninitialize, c_long, [] ), ( TMSAEng_getEngineVersion, c_long, [c_void_p, c_void_p, c_void_p] ), ( TMSAEng_getPatternVersion, c_long, [c_void_p, c_void_p, c_void_p] ), ( TMSAEng_setOption, c_long, [c_int, c_void_p, c_uint32] ), ( TMSAEng_getOption, c_long, [c_int, c_void_p, c_void_p] ), ( TMSAEng_createContext, c_void_p, [c_wchar_p, c_int] ), ( TMSAEng_createPage, c_void_p, [c_void_p] ), ( TMSAEng_addContent, c_long, [c_void_p, c_int, c_char_p, c_uint32] ), ( TMSAEng_scan, c_void_p, [c_void_p, c_void_p, c_void_p] ), ( TMSAEng_scanEx, c_void_p, [c_void_p, c_int, c_void_p, c_void_p, c_void_p] ), ( TMSAEng_getDiagnosis, c_int, [c_void_p] ), ( TMSAEng_getDescription, c_long, [c_void_p, c_wchar_p, c_void_p] ), ( TMSAEng_getDescriptionEx, c_long, [c_void_p, c_int, c_wchar_p, c_void_p] ), ( TMSAEng_getWebPageInfo, c_long, [c_void_p, c_int, c_wchar_p, c_void_p] ), ( TMSAEng_freeHandle, c_long, [c_void_p] ), ] for prototype in prototypes: prototype[0].restype = prototype[1] prototype[0].argtypes = prototype[2] _define_prototypes()
6,740
packages/path.py
dlorenc/debsrc
0
2170689
Package: path.py Binary: python3-path Version: 15.1.0-1 Maintainer: Debian Python Modules Team <<EMAIL>> Uploaders: <NAME> <<EMAIL>> Build-Depends: debhelper-compat (= 13), dh-python (>= 2.20160609~), python3-all, python3-packaging, python3-pytest, python3-toml, python3-setuptools, python3-setuptools-scm Architecture: all Standards-Version: 4.5.1 Format: 3.0 (quilt) Files: b6de09a2a7d212787bbb1f55e4ecba15 2216 path.py_15.1.0-1.dsc 94594480b037f1ae442ff3711c2f92ea 43506 path.py_15.1.0.orig.tar.gz 0ac5834d3e46828c0f2d485a62f99d2f 3444 path.py_15.1.0-1.debian.tar.xz Vcs-Browser: https://salsa.debian.org/python-team/packages/path.py Vcs-Git: https://salsa.debian.org/python-team/packages/path.py.git Checksums-Sha256: a62eb976b953b598e285fc045d4c6169102414386a5b118e7a73e413e425dd33 2216 path.py_15.1.0-1.dsc 122306c5bc0132002e6e3ad0c242d3e6f05901dc4af6d187d846c09dc19d4571 43506 path.py_15.1.0.orig.tar.gz 37a69874e9fefee7d18ddc8793f9e988cd98a341396beff9844e068a3cfc3777 3444 path.py_15.1.0-1.debian.tar.xz Homepage: https://github.com/jaraco/path.py Package-List: python3-path deb python optional arch=all Testsuite: autopkgtest, autopkgtest-pkg-python Testsuite-Triggers: python3-packaging, python3-pytest, python3-setuptools Directory: pool/main/p/path.py Priority: optional Section: misc
1,305
openstack_dashboard/dashboards/admin/access_and_security/download_sql/urls.py
xuweiliang/Codelibrary
0
2169586
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import url from openstack_dashboard.dashboards.admin.access_and_security.\ download_sql import views urlpatterns = [ url(r'^download_db/$', views.download_db_file, name='download_db'), ]
606
lucent/optvis/param/__init__.py
TomFrederik/lucent
2
2169921
from lucent.optvis.param.images import image from lucent.optvis.param.spatial import * from lucent.optvis.param.lowres import * from lucent.optvis.param.cppn import * from lucent.optvis.param.color import * from lucent.optvis.param.gan import UpConvGAN
254
utils/torch.py
aycatakmaz/compass
15
2170741
import sys import torch def get_gpu_device(id): if torch.cuda.is_available(): device_gpu = torch.device("cuda:" + str(id)) else: raise RuntimeError('No gpu device found.') return device_gpu def get_cpu_device(): device_cpu = torch.device("cpu") return device_cpu def print_network_module(network, stream=sys.stdout): print(format(network), file=stream, flush=False) for name, param in network.named_parameters(): if 'kernel' in name: layer_str = "Name: {} - Shape {}".format(name, param.transpose(2, 1).shape) print(layer_str, file=stream, flush=False) def b_get_rotation_matrices_from_euler_angles_on_tensor(alphas, betas, gammas, device): zs = torch.zeros(alphas.shape, device=device, requires_grad=True) os = torch.ones(alphas.shape, device=device, requires_grad=True) def z(a): first_row = torch.stack([torch.cos(a), torch.sin(a), zs], dim=1) second_row = torch.stack([-torch.sin(a), torch.cos(a), zs], dim=1) third_row = torch.stack([zs, zs, os], dim=1) mat_z = torch.stack([first_row, second_row, third_row], dim=1) return mat_z def y(a): f_rw = torch.stack([torch.cos(a), zs, -torch.sin(a)], dim=1) s_rw = torch.stack([zs, os, zs], dim=1) t_rw = torch.stack([torch.sin(a), zs, torch.cos(a)], dim=1) mat_y = torch.stack([f_rw, s_rw, t_rw], dim=1) return mat_y return torch.bmm(torch.bmm(z(gammas), y(betas)), z(alphas)) def load_models_from_ckp(path_checkpoint, model): """ Function to load values in checkpoint file. :param path_checkpoint: path to ckp file :param model: model for which to load the weights :return: """ if path_checkpoint is not None: dict_ckp = torch.load(path_checkpoint, map_location=torch.device('cpu')) print("Loaded model from: {}".format(path_checkpoint)) for key in dict_ckp: print("{}".format(key)) model.load_state_dict(dict_ckp) return True return False def rotate_batch_cloud(b_points, b_mats): """ :param points: a tensor of point in row vector format [B X N X 3] :param mats: a tensor of 3 x 3 rotation matrices format [B X 3 X 3] :return: """ # Tranpose rotation matrices as we multiply row vector points return torch.bmm(b_points, b_mats.transpose(2, 1).contiguous())
2,426
store/migrations/0006_auto_20200712_1226.py
ai-nishant/django-ecommerce
0
2170301
# Generated by Django 3.0.8 on 2020-07-12 06:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('store', '0005_auto_20200712_1219'), ] operations = [ migrations.AlterField( model_name='product', name='availability', field=models.BooleanField(default=True), ), migrations.AlterField( model_name='shippingaddress', name='pincode', field=models.IntegerField(null=True), ), ]
555
main.py
JcQSteven/sectown_floor
1
2170656
#coding:utf-8 from tkinter import * import time from selenium import webdriver from selenium.webdriver.common.keys import Keys import threading import random class Automan(Frame): def __init__(self,master=None): Frame.__init__(self, master) self.pack() self.createWidgets() def createWidgets(self): self.username_var= StringVar(self,'') self.password_var=StringVar(self,'') self.login_url_var=StringVar(self,'http://dev3.securitytown.net/login') self.auto_url_var=StringVar(self,'http://dev3.securitytown.net/group/14/thread/33') #self.login_url_var=StringVar(self,'http://www.sectown.cn/login') #self.auto_url_var=StringVar(self,'https://www.sectown.cn/group/14/thread/') self.sleeptime_var=StringVar(self,'0.1') self.target_floor_var=StringVar(self,'0') self.con_var=StringVar(self,u'酷酷的我抢到了这一层!') self.info_var=StringVar(self,u'') self.check_box_var=IntVar(self) self.hint_var=StringVar(self,u'不要多开该程序,需要重新抢楼的话请终止抢楼先。\n有问题就联系金成强。不要提奇奇怪怪的需求!\n') self.hint_label=Label(self) self.hint_label['textvariable']=self.hint_var self.hint_label.pack() self.username_label = Label(self) self.username_label['text']=u'账号' self.username_label.pack() self.username_entry=Entry(self) self.username_entry['textvariable']=self.username_var self.username_entry.pack() self.password_label=Label(self) self.password_label['text']=u'密码' self.password_label.pack() self.password_entry = Entry(self) self.password_entry['textvariable'] = self.password_var self.password_entry.pack() self.login_url_label=Label(self) self.login_url_label['text']=u'登录账号网址' self.login_url_label.pack() self.login_url_entry = Entry(self) self.login_url_entry['textvariable'] = self.login_url_var self.login_url_entry['width']=30 self.login_url_entry.pack() self.auto_url_label=Label(self) self.auto_url_label['text']=u'抢楼网址' self.auto_url_label.pack() self.auto_url_entry = Entry(self) self.auto_url_entry['textvariable'] = self.auto_url_var self.auto_url_entry['width'] = 50 self.auto_url_entry.pack() self.sleeptime_label=Label(self) self.sleeptime_label['text']=u'刷新等待时间' self.sleeptime_label.pack() self.sleeptime_entry = Entry(self) self.sleeptime_entry['textvariable'] = self.sleeptime_var self.sleeptime_entry.pack() self.target_floor_label=Label(self) self.target_floor_label['text']=u'目标楼层' self.target_floor_label.pack() self.target_floor_entry = Entry(self) self.target_floor_entry['textvariable'] = self.target_floor_var self.target_floor_entry.pack() self.con_label = Label(self) self.con_label['text']='Content' self.con_label.pack() self.con_entry = Entry(self) self.con_entry['textvariable'] = self.con_var self.con_entry.pack() self.check_box=Checkbutton(self,text=u'是否自动刷楼',variable=self.check_box_var) self.check_box.pack() self.botton1=Button(self) self.botton1['text']=u'开始抢楼' self.botton1['command']=self.thread_control self.botton1.pack() self.botton2 = Button(self) self.botton2['text'] = u'停止抢楼' self.botton2['command'] = self.quit_auto self.botton2.pack() self.info_label=Label(self) self.info_label['textvariable']=self.info_var self.info_label['bg']='red' self.info_label.pack() self.botton3=Button(self,text=u'测试') self.botton3['command']=self.ceshi self.botton3.pack() self.thread_flag=True def ceshi(self): self.lang_list = [] f = open('random_lang.txt', 'r') lines = f.readlines() for line in lines: self.lang_list.append(line.decode('utf-8')) pass def login(self): self.username=self.username_var.get() self.password=<PASSWORD>() self.login_url=self.login_url_var.get() self.auto_url=self.auto_url_var.get() self.browser = webdriver.Chrome('./chromedriver') self.sleeptime=float(self.sleeptime_var.get()) self.target_floor=int(self.target_floor_var.get())-1 self.con=self.con_var.get() self.browser.get(self.login_url) user = self.browser.find_element_by_name('_username') pwd = self.browser.find_element_by_name('_password') user.send_keys(self.username) pwd.send_keys(self.password) pwd.send_keys(Keys.RETURN) self.auto_done() def auto_done(self): self.browser.execute_script('window.open("%s")'%self.auto_url) time.sleep(1) handles = self.browser.window_handles self.browser.switch_to_window(handles[-1]) floor=self.browser.find_elements_by_xpath('//*[@class="floor"]') test_floor=floor[-1].text test_floor_num=int(test_floor[:-1]) # last_page_ele = self.browser.find_element_by_xpath('//*[@class="pagination cd-pagination"]/li[last()]/a') # last_page = last_page_ele.get_attribute('href') last_page=self.auto_url_var.get()+'?page=99' self.browser.execute_script('window.open("%s")' % last_page) time.sleep(1) while 1: if self.thread_flag==False: self.info_var.set(u'您已终止程序') self.browser.quit() break if test_floor_num>31: last_page_ele_try = self.browser.find_element_by_xpath('//*[@class="pagination cd-pagination"]/li[last()]/a') last_page_try = last_page_ele_try.get_attribute('href') if last_page_try!=last_page: last_page=last_page_try self.browser.execute_script('window.open("%s")' % last_page) time.sleep(1) else: pass handles = self.browser.window_handles self.browser.switch_to_window(handles[-1]) floor = self.browser.find_elements_by_xpath('//*[@class="floor"]') last_floor = floor[-1].text self.info_var.set(u'目前楼层数' + last_floor) last_floor_num=int(last_floor[:-1]) if last_floor_num == self.target_floor: time.sleep(1) self.get_floor() # content = self.browser.find_element_by_tag_name('iframe') # self.browser.switch_to_frame(content) # p = self.browser.find_element_by_tag_name('body') # p.send_keys(self.con) # self.browser.switch_to_default_content() # self.browser.find_element_by_id('post-thread-btn').click() self.browser.quit() self.info_var.set(u'恭喜抢楼成功,抢到楼层%d'%(self.target_floor+1)) break else: if last_floor_num<self.target_floor: if self.check_box_var.get()==1: self.browser.switch_to_window(handles[-1]) self.get_floor() continue self.browser.refresh() time.sleep(self.sleeptime) else: self.browser.quit() self.info_var.set(u'抱歉,您要抢的楼层已经不存在,重新调整楼层位置') break #输入内容发送 def get_floor(self): content = self.browser.find_element_by_tag_name('iframe') self.browser.switch_to_frame(content) p = self.browser.find_element_by_tag_name('body') #p.send_keys(self.con) p.send_keys(self.lang_list[random.randint(0,len(self.lang_list)-1)]) self.browser.switch_to_default_content() self.browser.find_element_by_id('post-thread-btn').click() pass def quit_auto(self): self.thread_flag=False def read_lang(self): self.lang_list = [] f = open('random_lang.txt', 'r') lines = f.readlines() for line in lines: self.lang_list.append(line.decode('utf-8')) pass def thread_control(self): self.thread_flag=True self.t=threading.Thread(target=self.login) self.t.setDaemon(True) self.t.start() if __name__ == '__main__': root=Tk() root.title(u'安全通内部抢楼机器人') root.wm_attributes('-topmost', 1) root.geometry('400x600+30+30') auto_man=Automan(master=root) auto_man.mainloop()
8,654
fatima/utils/loggers/__init__.py
AmrMKayid/fatima
15
2170045
from .base import Logger from .tensorboard import TensorBoardLogger # from .wandb import WandBLogger
102
source/ConsoleLogger.py
BogyMitutoyoCTL/Sophy
0
2170402
from datetime import datetime class ConsoleLogger: def __init__(self, what): self.what = what def start(self,time:float): d = datetime.fromtimestamp(time) print("Zur Uhrzeit {} ist {} gestartet.".format(d, self.what)) def stop(self,time:float): x = datetime.fromtimestamp(time) print("Zur Uhrzeit {} ist {} gestoppt.".format(x, self.what))
395
cafeteria/users/urls.py
devGW/PostApp
0
2168913
from django.urls import path from . import views app_name = "users" urlpatterns = [ path("push-token/", view=views.PushToken.as_view(), name='push_token'), path("authentication/", view=views.StudentAuthentication.as_view(), name='authentication'), path("explore/", view=views.ExploreUser.as_view(), name="explore_user"), path("<int:user_id>/follow/", view=views.FollowUser.as_view(), name="flw_user"), path("<int:user_id>/unfollow/", view=views.UnFollowUser.as_view(), name="uflw_user"), path("<username>/password/", view=views.ChangePassword.as_view(), name="password"), path("<username>/", view=views.UserProfile.as_view(), name="user_propfile"), path("login/kakao/", view=views.KakaoLogin.as_view(), name='kakao_login'), path("<name>/already_nickname/", view=views.IsAlreadyName.as_view(), name='is_already_name'), path("<email>/already_email/", view=views.IsAlreadyEmail.as_view(), name='is_already_email'), path("<username>/already_id/", view=views.IsAlreadyId.as_view(), name='is_already_id'), ]
1,048
basics/pandas_basics.py
paulmorio/grusData
0
2167304
# previously we looked into numpy and its ndarrayobject in particular. Here # we build on that knowledge by looking at the data structures provided by the Pandas Library. # Pandas is a newer package built on top of NumPz and proveides an efficient # implementation of a DataFrame. # Here we will focus on the mechanics of using Series, DataFrame and related structures effectively. # We will use examples drawn from real datasets where appropriate, but these examples are not # necessarily the focus. import numpy as np import pandas as pd # Series # a pandas series is a one dimensional array of indeed data data = pd.Series([0.25, 0.5, 0.75, 1.0]) data # as attributes it has values and index attributes data.values data.index # can be sliced data[1:3] # The big difference is that it has a specific index value in the series data = pd.Series([0.25, 0.5, 0.75, 1.0], index=["a", "b", "c", "d"]) data # The series data object can also be likened and used as a python dictionary. # but for typed types only. So its a bit faster but a lot less general population_dict = {'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135} population = pd.Series(population_dict) population # Typical dictionary actions can be taken as well population['California'] # However you can also slice a dictionary which is a bit weird population['California':'Illinois'] # Turn a dictionary into a pd series area_dict = {'California': 423967, 'Texas': 695662, 'New York': 141297, 'Florida': 170312, 'Illinois': 149995} area = pd.Series(area_dict) area # lets take population and area and turn it into a dataframe states = pd.DataFrame({'population': population, 'area': area}) states # DataFrame as specialized dictionary # Similarly, we can also think of a DataFrame as a specialization of a dictionary. # Where a dictionary maps a key to a value, a DataFrame maps a column name to a Series # of column data. For example, asking for the 'area' attribute returns the Series object # containing the areas we saw earlier:
2,185
rack/version.py
tkaneko0204/rack
0
2170677
# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pbr.version from rack.openstack.common.gettextutils import _ RACK_VENDOR = "OpenStack Foundation" RACK_PRODUCT = "OpenStack Rack" RACK_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr.version.VersionInfo('rack') version_string = version_info.version_string def _load_config(): # Don't load in global context, since we can't assume # these modules are accessible when distutils uses # this module import ConfigParser from oslo.config import cfg from rack.openstack.common import log as logging global loaded, RACK_VENDOR, RACK_PRODUCT, RACK_PACKAGE if loaded: return loaded = True cfgfile = cfg.CONF.find_file("release") if cfgfile is None: return try: cfg = ConfigParser.RawConfigParser() cfg.read(cfgfile) RACK_VENDOR = cfg.get("Rack", "vendor") if cfg.has_option("Rack", "vendor"): RACK_VENDOR = cfg.get("Rack", "vendor") RACK_PRODUCT = cfg.get("Rack", "product") if cfg.has_option("Rack", "product"): RACK_PRODUCT = cfg.get("Rack", "product") RACK_PACKAGE = cfg.get("Rack", "package") if cfg.has_option("Rack", "package"): RACK_PACKAGE = cfg.get("Rack", "package") except Exception as ex: LOG = logging.getLogger(__name__) LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"), {'cfgfile': cfgfile, 'ex': ex}) def vendor_string(): _load_config() return RACK_VENDOR def product_string(): _load_config() return RACK_PRODUCT def package_string(): _load_config() return RACK_PACKAGE def version_string_with_package(): if package_string() is None: return version_info.version_string() else: return "%s-%s" % (version_info.version_string(), package_string())
2,502
apps/driving_pi/video_cam.py
neuralbotnetworks/ncappzoo
0
2166937
#! /usr/bin/env python3 # Copyright(c) 2019 Intel Corporation. # License: MIT See LICENSE file in root directory. from picamera.array import PiRGBArray from picamera import PiCamera import cv2 import logging as log import time from itertools import count res_width = 640 res_height = 480 class VideoCamera: """ This class controling the camera, now it's supporting only Webcam and Camera-Pi camera. by default user is using 'usb' camera """ def __init__(self, cam_type): self.cam_width = res_width self.cam_height = res_height self.camera_type = cam_type self.camera = None self.rawCapture = None if cam_type == "camerapi": log.info("Loading Camera Pi") self.camera = PiCamera() self.camera.resolution = (self.cam_width, self.cam_height) elif cam_type == "usb": camera_id = 0 log.info("Loading USB Camera id {}".format(camera_id)) self.camera = cv2.VideoCapture(camera_id) self.camera.set(cv2.CAP_PROP_FPS, 30) self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.cam_width) self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.cam_height) log.info("Camera size {}x{}".format(self.cam_width, self.cam_height)) def frame(self): """ Get frame from camera """ if self.camera_type == "camerapi": self.rawCapture = PiRGBArray(self.camera, size=(self.cam_width, self.cam_height)) framesArray = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True) return next(framesArray).array elif self.camera_type == "usb": assert self.camera.isOpened(), "Couldn't open Camera" success, orig_frame = self.camera.read() assert success, "Can't snap image" return orig_frame def clean_video(self): """ the cleaning the frame in each iteration of the loop. Use this function only when using camerapi type """ if self.camera_type == "camerapi": self.rawCapture.truncate(0)
2,155
bentley_ottmann_api/crud.py
PawWitGit/bentley-ottmann-api
0
2170653
from io import BytesIO from itertools import chain from random import randint from typing import Any, Union from typing import Iterable import matplotlib.pyplot as plt from sqlalchemy import select from sqlalchemy.orm import Session from starlette.responses import StreamingResponse from bentley_ottmann_api.bentley_ottmann import find_intersections from bentley_ottmann_api.models import Lines, Boards, BentleyOttmannPoints from bentley_ottmann_api.schemas import RandomBoard, LineOut def commit_and_refresh_model_instance( session: Session, instance: Any, refresh: bool = True, commit: bool = True ) -> Any: """ Function commits and refreshes model instance. Args: session: db session instance: instance to save refresh: refresh instance if `True` default: `True` commit: if `True` data will be saved in the database otherwise an object not saved in the database will be returned Returns: new created object in db """ session.add(instance) if commit: session.commit() if refresh: session.refresh(instance=instance) return instance def get_query_result_without_nested( result: Union[tuple, list], many: bool = False ) -> Any: """ Function parsed query result. Args: result: db query result many: if `True` objects will be returned in the list otherwise single object Returns: parsed db query result """ return list(chain(*result)) if many else result[0] if result else None def get(db: Session, model: Any, many: bool = False, **query_data: Any) -> Any: """ Function makes query without join. Args: db: session db model: model class query_data: query data many: if `True` all objects in the database will be returned otherwise one object will be returned Returns: objects or object from db """ result = db.execute(select(model).filter_by(**query_data)) # type: ignore return get_query_result_without_nested( result=result.all() if many else result.one_or_none(), many=many ) def create( db: Session, model: Any, refresh: bool = True, commit: bool = True, **model_data: Any, ) -> Any: """ Function inserts new record to database. Args: db: session db model: model class refresh: refresh instance if `True` default: `True` commit: if `True` data will be saved in the database otherwise an object not saved in the database will be returned Returns: new created object in db """ instance_model = model(**model_data) return commit_and_refresh_model_instance( session=db, instance=instance_model, refresh=refresh, commit=commit ) def get_boards_by_name(db: Session, name: str) -> Boards: """ Function returns boards by name. Args: db: database session name: boards name Returns: board if exists otherwise None """ return get(db=db, model=Boards, name=name) def get_all_boards(db: Session) -> list[Boards]: """ Function returns all board. Args: db: database session Returns: all boards if exists otherwise empty list """ return get(db=db, model=Boards, many=True) def create_board(db: Session, name: str) -> Boards: """ Function creates board. Args: db: database session name: board name Returns: new created instances """ return create(db=db, model=Boards, name=name) def create_line_and_add_to_bord( db: Session, point_x: list[float], point_y: list[float], board: Boards ) -> Lines: """ Function creates new line and adds its to board. Args: db: database sessions point_x: coordinate point x point_y: coordinate point y board: board Returns: new line """ line = create(db=db, model=Lines, point_x=point_x, point_y=point_y) board.lines.append(line) commit_and_refresh_model_instance(session=db, instance=board, refresh=False) return line def _get_random_point(min_x: int, max_x: int, min_y: int, max_y: int) -> list[float]: """ Functions returns random point. Args: min_x: min x max_x: max x min_y: min y max_y: max y Returns: random point """ return [ randint(min_x, max_x), randint(min_y, max_y), ] def create_random_lines( db: Session, random_board_data: RandomBoard, board: Boards ) -> None: """ Function creates random lines. Args: db: database session random_board_data: random board data board: board Returns: `None` """ for _ in range(random_board_data.max_items): create_line_and_add_to_bord( db=db, point_x=_get_random_point( min_x=random_board_data.min_range_x, max_x=random_board_data.max_range_x, min_y=random_board_data.min_range_y, max_y=random_board_data.max_range_y, ), point_y=_get_random_point( min_x=random_board_data.min_range_x, max_x=random_board_data.max_range_x, min_y=random_board_data.min_range_y, max_y=random_board_data.max_range_y, ), board=board, ) def delete_lines(db: Session, lines: list[LineOut]) -> None: """ Function deletes lines Args: db: database session lines: lines Returns: `None` """ for line in lines: db.delete(instance=line) db.commit() def delete_boards(db: Session, board: Boards) -> None: """ Function deletes boards. Args: db: database session board: board Returns: `None` """ delete_lines(lines=board.lines, db=db) db.delete(board) db.commit() def draw_lines(lines: Iterable) -> plt: """ Function draw lines. Args: lines: lines to draw Returns: image """ plt.figure() plt.title("Lines") for x_point, y_point in lines: plt.plot([x_point[0], y_point[0]], [x_point[1], y_point[1]]) return plt def get_draw_response(plt_image: plt) -> StreamingResponse: """ Method gets response from plt image. Args: plt_image: plt image Returns: """ buf = BytesIO() plt_image.savefig(buf, format="png") buf.seek(0) return StreamingResponse(buf, media_type="image/png") def delete_old_intersection_points(db: Session, board: Boards) -> None: """ Method deletes old intersection points. Args: db: db session board: board Returns: `None` """ for point in board.bentley_ottmann_points: db.delete(point) db.commit() def find_intersection_for_board(db: Session, board: Boards) -> None: """ Method finds all intersections point for board. Args: db: db session board: board Returns: `None` """ delete_old_intersection_points(db=db, board=board) points = find_intersections( lines=[(line.point_x, line.point_y) for line in board.lines] ) for idx, point in enumerate(points, 1): create( db=db, model=BentleyOttmannPoints, point=point, board=board, name=f"P{idx}" ) def draw_lines_and_intersection_points(board: Boards) -> plt: """ Method draws lines and intersection points. Args: board: board Returns: plt image """ plt = draw_lines(lines=(line.get_coordinates() for line in board.lines)) plt.title("Intersection Lines") for idx, point_instance in enumerate(board.bentley_ottmann_points, 1): x, y = point_instance.point[0], point_instance.point[1] plt.scatter(x, y, color="black") plt.text(x + 0.2, y, point_instance.name, verticalalignment="top") return plt
8,073
Moudle/Joomla/joomla_config_find.py
WingsSec/Meppo
60
2169814
#!/usr/bin/env python3 # _*_ coding:utf-8 _*_ import requests from Config.config_requests import ua requests.packages.urllib3.disable_warnings() # 脚本信息 ###################################################### NAME='joomla_config_find' AUTHOR="Trans" REMARK='joomla config文件查找' FOFA_RULE='app="joomla"' ###################################################### def poc(target): result = {} configlist = ['configuration.php','configuration.php_old','configuration.php_new','configuration.php~','configuration.php.new','configuration.php.new~','configuration.php.old','configuration.php.old~','configuration.bak','configuration.php.bak','configuration.php.bkp','configuration.txt','configuration.php.txt','configuration - Copy.php','configuration.php.swo','configuration.php_bak','configuration.php#','configuration.orig','configuration.php.save','configuration.php.original','configuration.php.swp','configuration.save','.configuration.php.swp','configuration.php1','configuration.php2','configuration.php3','configuration.php4','configuration.php4','configuration.php6','configuration.php7','configuration.phtml','configuration.php-dist'] for filename in configlist: url = target + filename headers = { "User-Agent":ua, "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3", "Connection": "close" } res = requests.get(url, headers=headers, timeout=5, verify=False) res = res.text #print(res) if "ftp_pass" in res or "dbtype" in res or "force_ssl" in res or "dbprefix" in res: result['message'] = res result['target_url'] = url return result if __name__ == '__main__': # poc调用 poc("http://127.0.0.1/")
1,755
tokenizecode/tokenizer.py
villmow/tokenizecode
0
2169935
from dataclasses import dataclass from pathlib import Path from typing import Optional, Union import tokenizers import torch import transformers from tensortree import TensorTree from tokenizecode.bpe import TokenizerBPE from tokenizecode.parser import CodeParser, CodeParsingOutput, Span from tokenizecode.utils import TensorTreeWithStrings, TensorTreeWithInts, is_tree_of_strings, get_project_root DEFAULT_TOKENIZER_BPE = get_project_root() / "trained_tokenizers/20211108_bpe30k-fpl40k-with-nonterminals.json" @dataclass class TokenizedCodeOutput(CodeParsingOutput): tree: TensorTreeWithInts # just changing the type def __post_init__(self): if len(self.positions) != len(self.tree): raise ValueError("Should have a position for every node in the tree.") class CodeTokenizer: """ Combines tokenizer and bpe. """ def __init__(self, tokenizer: Optional[TokenizerBPE] = None, parser: Optional[CodeParser] = None): if tokenizer is None: tokenizer = TokenizerBPE.from_pretrained(DEFAULT_TOKENIZER_BPE) self.tokenizer = tokenizer self._parser = parser if parser is not None else None @property def hf_tokenizer(self) -> transformers.PreTrainedTokenizerFast: return self.tokenizer.tokenizer @property def parser(self): if self._parser is None: self._parser = CodeParser() return self._parser def _inputs_to_tree(self, inputs: Union[str, TensorTreeWithStrings], lang: Optional[str] = None) -> TensorTreeWithStrings: """ Either parses a piece of code or uses the tree.""" if isinstance(inputs, str): code = inputs if lang is None: raise ValueError("Will parse code. Language needs to be known.") parsing_output = self.parse(code, lang) return parsing_output.tree elif isinstance(inputs, CodeParsingOutput): tree = inputs.tree else: tree = inputs if not is_tree_of_strings(tree): raise ValueError("Tree should consist of strings.") return tree @classmethod def from_file(cls, tokenizer_file_or_directory: Path): from tokenizecode.bpe import TokenizerBPE return cls(TokenizerBPE.from_pretrained(tokenizer_file_or_directory)) def parse(self, code: str, lang: str) -> CodeParsingOutput: """ Turns a piece code into a syntax tree. """ return self.parser.parse(code, lang) @staticmethod def unparse(tree: Union[TensorTreeWithStrings, CodeParsingOutput]) -> str: """ Turns a syntax_tree tree back into code. """ if isinstance(tree, CodeParsingOutput): tree = tree.tree if not is_tree_of_strings(tree): raise ValueError("Tree should consist of strings.") return CodeParser.unparse(tree) def encode(self, inputs: Union[str, TensorTreeWithStrings], lang: Optional[str] = None) -> tokenizers.Encoding: """ Encodes a piece of code or a syntax tree and returns an encoding for all tokens.""" tree = self._inputs_to_tree(inputs, lang) return self.tokenizer.encode_text(tree.leaves()) def encode_text(self, text: Union[str, list[str]]) -> tokenizers.Encoding: """ Encodes any piece of text or list of text. """ return self.tokenizer.encode_text(text) def encode_to_tree( self, inputs: Union[str, TensorTreeWithStrings], lang: Optional[str] = None ) -> TensorTreeWithInts: """ Encodes a piece of code or a syntax tree and returns **the full syntax tree** encoded (ie only ids as nodes).""" tree = self._inputs_to_tree(inputs, lang) return self.tokenizer.encode_tree(tree) def encode_lines(self, code: str, lang: str, line_start: int, line_end: int, mask_line_start: int = None, mask_line_end: int = None) -> Union[tokenizers.Encoding, tuple[tokenizers.Encoding, tokenizers.Encoding]]: """ Parses the whole file and then selects relevant lines. Lines start at 1. If mask line start is set, those lines will be cut out and the mask token is inserted. """ line_start -= 1 line_end -= 1 mask_line_start = mask_line_start - 1 if mask_line_start is not None else None mask_line_end = mask_line_end - 1 if mask_line_end is not None else None assert line_start <= mask_line_start <= mask_line_end <= line_end output = self.parse(code, lang) tree = output.tree context_tokens = [] mask_tokens = [] for idx, (node, span) in enumerate(zip(tree.node_data, output.positions)): if not tree.is_leaf(idx): continue if line_start <= span.start_point.row <= line_end: if mask_line_start is not None and mask_line_start == span.start_point.row: # keep first space if node.isspace() and not mask_tokens: context_tokens.append(node) context_tokens.append("___MASK___") mask_tokens.append(node) elif mask_line_start is not None and mask_line_start <= span.start_point.row <= mask_line_end: mask_tokens.append(node) else: context_tokens.append(node) if mask_line_start is not None: return self.tokenizer.encode_text(context_tokens), self.tokenizer.encode_text(mask_tokens) return self.tokenizer.encode_text(context_tokens) def decode(self, ids) -> str: if isinstance(ids, TensorTreeWithInts): tree = ids decoded_tree = self.decode_tree(tree, keep_bpe=False) return self.unparse(decoded_tree) return self.tokenizer.decode_text(ids) def decode_tree(self, tree: TensorTreeWithInts, keep_bpe: bool = False) -> TensorTreeWithStrings: """ Returns a tree with strings as node data. keep_bpe keeps artificial BPE nodes. """ return self.tokenizer.decode_tree(tree, keep_bpe) @staticmethod def tree_to_tokens(tree: TensorTree) -> Union[torch.Tensor, list[str]]: return tree.leaves() def add_specials(self, tokens): self.tokenizer.tokenizer.add_special_tokens( {"additional_special_tokens": tokens} ) def save(self, filepath: Union[str, Path], pretty: bool = False): self.tokenizer.tokenizer.save_pretrained(str(filepath), legacy_format=False) # self.tokenizer.tokenizer.save(str(filepath), pretty) def __len__(self): return len(self.hf_tokenizer)
6,672
amocrm_asterisk_ng/infrastructure/tracing/startup.py
iqtek/amocrn_asterisk_ng
0
2170524
from logging import Logger from fastapi import FastAPI from starlette.middleware.base import BaseHTTPMiddleware from amocrm_asterisk_ng.infrastructure import ioc from .TraceIdFilter import TraceIdFilter from .TracingMiddleware import TracingMiddleware __all__ = [ "tracing_startup", ] def tracing_startup() -> None: app = ioc.get_instance(FastAPI) app.add_middleware(BaseHTTPMiddleware, dispatch=TracingMiddleware()) logger = ioc.get_instance(Logger) logger.addFilter(TraceIdFilter())
512
application.py
robmarano/aws_elastic_beanstalk_flask
1
2170266
from flask import Flask import logging from datetime import datetime from random import randint import requests import json from pprint import pprint application = Flask(__name__, static_folder='simple-react/build', static_url_path='/') # set a 'SECRET_KEY' to enable the Flask session cookies application.secret_key = 'random development key' # Routes @application.route('/api/time') def get_current_time(): now = datetime.now() # current date and time return {'datetime': '{}'.format(now.strftime("%m/%d/%Y, %H:%M:%S"))} @application.route('/random/number') def generate_random_number(): random_number = randint(0,100) return {'random_number': '{}'.format(random_number)} @application.route('/random/string') def generate_random_string(): str_list = ['first','second','third'] position=randint(0,len(str_list)-1) random_string = str_list[position] return {'random_string': random_string} @application.route('/random/quote') def generate_random_quote(): quotes = [{'random_quote': 'Yeah we all shine on, like the moon, and the stars, and the sun.', 'quote_author': '<NAME>'}, {'random_quote': 'I begin with an idea and then it becomes something else.', 'quote_author': '<NAME>'}, {'random_quote': 'Things do not change. We change.', 'quote_author': '<NAME>'}, {'random_quote': 'If you take each challenge one step at a time, with faith in every footstep, your strength and understanding will increase.', 'quote_author': '<NAME>'}, {'random_quote': 'All I can say about life is, Oh God, enjoy it!', 'quote_author':'<NAME>'}, {'random_quote': 'A day of worry is more exhausting than a day of work.', 'quote_author':'<NAME>'}] #print(quotes) #quote_api_endpoint = 'https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=jsonp&jsonp=?' #quote_api_endpoint = 'https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=json' #response = requests.get(quote_api_endpoint) #pprint(response) #if response.ok: # data = json.loads(response.content) # print(data) #quote = response.json()['quoteText'] #author = response.json()['quoteAuthor'] #return {'random_quote': quote, 'quote_author': author} position=randint(0,len(quotes)-1) random_quote = quotes[position] print(random_quote) #quote=random_quote['random_quote'] #author=random_quote['quote_author'] return random_quote @application.route('/') def index(): logging.warning('hello') return application.send_static_file('index.html') # # Mainline # if __name__ == '__main__': application.debug = False application.run()
2,621
src/wrappers/igraph.py
synwalk/synwalk-analysis
2
2169583
from clusim.clustering import Clustering from igraph import Graph def read_graph(filepath, format='edgelist') -> Graph: """Reads a graph from an edge list file and returns an igraph Graph object. Parameters ---------- filepath : str Path to the edge list file. format : str Format of the input file, e.g. 'edgelist', 'pajek',... Returns ------ Graph An igraph Graph object. """ graph = Graph.Read(filepath, format=format) graph.to_undirected() # igraph returns a directed graph by default graph.simplify(combine_edges='sum') # handle one-based vertex numbering: if edge list numbers vertices starting from 1, # the first vertex (vertex '0') will be an artifact if graph.vs[0].degree() == 0: graph.delete_vertices([0]) return graph def walktrap(filepath): """Wrapper around igraph's walktrap implementation. Parameters ---------- filepath : str Path to a edge list file. Returns ------ Clustering A clusim Clustering object. """ graph = read_graph(filepath) member_list = graph.community_walktrap().as_clustering().membership clu = Clustering().from_membership_list(member_list) return clu.relabel_clusters_by_size() def spinglass(filepath): """Wrapper around igraph's spinglass implementation. Parameters ---------- filepath : str Path to a edge list file. Returns ------ Clustering A clusim Clustering object. """ graph = read_graph(filepath) member_list = graph.community_spinglass().membership clu = Clustering().from_membership_list(member_list) return clu.relabel_clusters_by_size() def label_propagation(filepath): """Wrapper around igraph's label propagation implementation. Parameters ---------- filepath : str Path to a edge list file. Returns ------ Clustering A clusim Clustering object. """ graph = read_graph(filepath) member_list = graph.community_label_propagation().membership clu = Clustering().from_membership_list(member_list) return clu.relabel_clusters_by_size() def louvain(filepath): """Wrapper around igraph's multilevel implementation. Parameters ---------- filepath : str Path to a edge list file. Returns ------ Clustering A clusim Clustering object. """ graph = read_graph(filepath) member_list = graph.community_multilevel().membership clu = Clustering().from_membership_list(member_list) return clu.relabel_clusters_by_size() def edge_betweenness(filepath): """Wrapper around igraph's edge betweenness implementation. Parameters ---------- filepath : str Path to a edge list file. Returns ------ Clustering A clusim Clustering object. """ graph = read_graph(filepath) member_list = graph.community_edge_betweenness().as_clustering().membership clu = Clustering().from_membership_list(member_list) return clu.relabel_clusters_by_size()
3,108
tests/test_processing.py
PriceSpider-NeuIntel/imagewizard
4
2170486
import unittest import sys sys.path.append("..") import imagewizard import cv2 as cv import numpy.testing as npt class TestProcessing(unittest.TestCase): im_pro = imagewizard.Processing() street_org = cv.imread("data/original_images/street.png") lenna_org = cv.imread("data/original_images/lenna.png") # resize tests resize_actual = cv.imread( "data/processed_images/resize/shrink-300px-300px.png") resize_test = im_pro.resize(street_org, resize_width=300, resize_height=300, order='bgr') # grayscale tests img2grayscale_actual = cv.imread("data/processed_images/gray/gray.png", cv.IMREAD_GRAYSCALE) img2grayscale_test = im_pro.img2grayscale(lenna_org, order='bgr') img2grayscale_inv_actual = cv.imread( "data/processed_images/gray/gray_inverted.png", cv.IMREAD_GRAYSCALE) img2grayscale_inv_test = im_pro.img2grayscale(lenna_org, inverted=True, order='bgr') grayscale_actual = [img2grayscale_actual, img2grayscale_inv_actual] grayscale_test = [img2grayscale_test, img2grayscale_inv_test] # rotate tests rotate_actual = cv.imread("data/processed_images/rotate/rotate-90deg.png") rotate_test = im_pro.rotate(lenna_org, rotation_degree = 90, order='bgr') # crop tests crop_actual = cv.imread("data/processed_images/crop/crop1.png") crop_test = im_pro.crop(street_org, start_x = 50, end_x = 100, start_y = 50, end_y = 100, is_percentage = True, order='bgr') # mirror tests mirror_actual = cv.imread("data/processed_images/mirror/flip_x.png") mirror_test = im_pro.mirror(lenna_org, flip_code=1, order='bgr') # blur tests blur_actual = cv.imread("data/processed_images/blur/blur5.png") blur_test = im_pro.blur(street_org, blur_level = 5, order='bgr') # luminosity tests lum_actual = cv.imread("data/processed_images/luminosity/lum_100.png") lum_test = im_pro.luminosity(lenna_org, intensity_shift = 100, order = 'bgr') # segmentation tests rgb_colors_list = [[224, 166, 147], [110, 34, 71], [195, 98, 100]] seg_actual = cv.imread("data/processed_images/segmented_image.png") seg_test = im_pro.segmentation(lenna_org, rgb_colors_list, 'bgr') def test_resize(self): npt.assert_array_equal(self.resize_test, self.resize_actual, 'Resized image does not equal actual result') def test_grayscale(self): npt.assert_array_equal( self.grayscale_actual, self.grayscale_test, 'img2grayscale result does not equal actual result') def test_rotate(self): npt.assert_array_equal(self.rotate_actual, self.rotate_test, 'Rotated image does not equal actual result') def test_crop(self): npt.assert_array_equal(self.crop_actual, self.crop_test, 'Cropped image does not equal actual result') def test_mirror(self): npt.assert_array_equal(self.mirror_actual, self.mirror_test, 'Mirrored flip X image does not equal actual result') def test_blur(self): npt.assert_array_equal(self.blur_actual, self.blur_test, 'Blurred image does not equal actual result') def test_luminosity(self): npt.assert_array_equal(self.lum_actual, self.lum_test, 'luminosity 100 image does not equal actual result') def test_segmentation(self): npt.assert_array_equal(self.seg_actual, self.seg_test, 'segmentation image does not equal actual result')
3,817
gamelogic.py
itchono/dawn-of-dominion
0
2168550
# Dawn of Dominion Game Logic import os import json import base64 import logging from sessionmanager import Session # SET UP LOGGER gamelogger = logging.getLogger(__name__) gamelogger.setLevel(logging.INFO) c_handler = logging.StreamHandler() c_handler.setLevel(logging.INFO) gamelogger.addHandler(c_handler) STANDARD_FIELDS = ["name", "id", "description", "cost", "ATK", "DEF", "CHP", "MHP", "TER", "sprite_location"] # LOAD UNITS _, unitnames, _ = next(os.walk("static/units")) units: list = [] sprites = {} for unit in unitnames: try: with open(f"static/units/{unit}/{unit}.json", "rb") as f: unitdata: dict = json.load(f) # Validate unit for field in STANDARD_FIELDS: try: assert field in unitdata.keys() except AssertionError: gamelogger.exception( f"Unit {unit} is missing field: {field}", exc_info=e) # Read sprites try: with open(f"static/units/{unit}/{unitdata['sprite_location']}", "rb") as f: sprites[unitdata["id"]] = base64.b64encode(f.read()).decode("utf-8") except OSError as e: gamelogger.exception( f"Could not read sprite for: {unit}", exc_info=e) if all(field in unitdata.keys() for field in ["multiX", "multiY"]): # Multi-grid unit for i in range(unitdata["multiX"] * unitdata["multiY"]): try: ind_dot = unitdata['sprite_location'].index(".") with open(f"static/units/{unit}/{unitdata['sprite_location'][:ind_dot]}{i+1}{unitdata['sprite_location'][ind_dot:]}", "rb") as f: sprites[unitdata["id"] + str(i+1)] = base64.b64encode(f.read()).decode("utf-8") except OSError as e: gamelogger.exception( f"Could not read multi-sprite for: {unit}-{i+1}", exc_info=e) units.append(unitdata) except OSError as e: gamelogger.exception(f"Could not read unit: {unit}", exc_info=e) gamelogger.info(f"{len(units)} units loaded.") # LOAD UPGRADES _, upgradenames, _ = next(os.walk("static/upgrades")) upgrades: list = [] for upgrade in upgradenames: try: with open(f"static/upgrades/{upgrade}/{upgrade}.json", "rb") as f: upgradedata: dict = json.load(f) # Read sprites try: with open(f"static/upgrades/{upgrade}/{upgradedata['sprite_location']}", "rb") as f: sprites[upgradedata["id"]] = base64.b64encode(f.read()).decode("utf-8") except OSError as e: gamelogger.exception( f"Could not read sprite for: {upgrade}", exc_info=e) upgrades.append(upgradedata) except OSError as e: gamelogger.exception(f"Could not read upgrade: {upgrade}", exc_info=e) gamelogger.info(f"{len(upgrades)} upgrades loaded.") # LOAD MAPS with open("static/gamedata.json", "rb") as f: mapdata = json.load(f) def process_move(movedata: dict, session: Session) -> str: buttons = movedata["buttons"] for i in range(0, 2): unitcount = 0 for x in range(len(buttons[0])): for y in range(len(buttons[0])): if buttons[i][x][y]["data"]["unit"]: print("I see a unit", buttons[i][x][y]["data"]["unit"]["id"]) if buttons[i][x][y]["data"]["state"] == "occupied": unitcount += 1 if unitcount == 0 and movedata["turn"] > 0: print(f"player {i+1} has lost!") session.winner = 1-i movedata["gameover"] = True return json.dumps(movedata)
3,990
ndim_posegraph.py
joeaortiz/gbp
50
2170213
""" N-dim Pose Graph Estimation. Linear problem where we are estimating the position in N-dim space of a number of nodes. Linear factors connect each node to the M closest nodes in the space. The linear factors measure the distance between the nodes in each of the N dimensions. """ import numpy as np import argparse from gbp import gbp from gbp.factors import linear_displacement np.random.seed(0) parser = argparse.ArgumentParser() parser.add_argument("--n_varnodes", type=int, default=50, help="Number of variable nodes.") parser.add_argument("--dim", type=int, default=6, help="Dimensionality of space nodes exist in (dofs of variables)") parser.add_argument("--M", type=int, default=10, help="Each node is connected to its k closest neighbours by a measurement.") parser.add_argument("--gauss_noise_std", type=float, default=1., help="Standard deviation of Gaussian noise added to measurement model (pixels)") parser.add_argument("--n_iters", type=int, default=50, help="Number of iterations of GBP") args = parser.parse_args() print('Configs: \n', args) # Create priors priors_mu = np.random.rand(args.n_varnodes, args.dim) * 10 # grid goes from 0 to 10 along x and y axis prior_sigma = 3 * np.eye(args.dim) prior_lambda = np.linalg.inv(prior_sigma) priors_lambda = [prior_lambda] * args.n_varnodes priors_eta = [] for mu in priors_mu: priors_eta.append(np.dot(prior_lambda, mu)) # Generate connections between variables gt_measurements, noisy_measurements = [], [] measurements_nodeIDs = [] num_edges_per_node = np.zeros(args.n_varnodes) n_edges = 0 for i, mu in enumerate(priors_mu): dists = [] for j, mu1 in enumerate(priors_mu): dists.append(np.linalg.norm(mu - mu1)) for j in np.array(dists).argsort()[1:args.M + 1]: # As closest node is itself mu1 = priors_mu[j] if [j, i] not in measurements_nodeIDs: # To avoid double counting n_edges += 1 gt_measurements.append(mu - mu1) noisy_measurements.append(mu - mu1 + np.random.normal(0., args.gauss_noise_std, args.dim)) measurements_nodeIDs.append([i, j]) num_edges_per_node[i] += 1 num_edges_per_node[j] += 1 graph = gbp.FactorGraph(nonlinear_factors=False) # Initialize variable nodes for frames with prior for i in range(args.n_varnodes): new_var_node = gbp.VariableNode(i, args.dim) new_var_node.prior.eta = priors_eta[i] new_var_node.prior.lam = priors_lambda[i] graph.var_nodes.append(new_var_node) for f, measurement in enumerate(noisy_measurements): new_factor = gbp.Factor(f, [graph.var_nodes[measurements_nodeIDs[f][0]], graph.var_nodes[measurements_nodeIDs[f][1]]], measurement, args.gauss_noise_std, linear_displacement.meas_fn, linear_displacement.jac_fn, loss=None, mahalanobis_threshold=2) graph.var_nodes[measurements_nodeIDs[f][0]].adj_factors.append(new_factor) graph.var_nodes[measurements_nodeIDs[f][1]].adj_factors.append(new_factor) graph.factors.append(new_factor) graph.update_all_beliefs() graph.compute_all_factors() graph.n_var_nodes = args.n_varnodes graph.n_factor_nodes = len(noisy_measurements) graph.n_edges = 2 * len(noisy_measurements) print(f'Number of variable nodes {graph.n_var_nodes}') print(f'Number of edges per variable node {args.M}') print(f'Number of dofs at each variable node {args.dim}\n') mu, sigma = graph.joint_distribution_cov() # Get batch solution for i in range(args.n_iters): graph.synchronous_iteration() print(f'Iteration {i} // Energy {graph.energy():.4f} // ' f'Av distance of means from MAP {np.linalg.norm(graph.get_means() - mu):4f}')
3,953
__app__/crop/warnings.py
alan-turing-institute/CROP
9
2170306
import time from datetime import datetime, timedelta import pandas as pd from numpy import mean from sqlalchemy import and_ from __app__.crop.structure import ( TypeClass, SensorClass, SensorLocationClass, ReadingsZensieTRHClass, LocationClass, DataWarningsClass, ) from __app__.crop.utils import query_result_to_array from __app__.crop.constants import ( CONST_ADVANTICSYS, SQL_ENGINE, SQL_DBNAME, SQL_HOST, SQL_CONNECTION_STRING_CROP, ) from __app__.crop.db import connect_db, session_open, session_close db_name = "app_db" CONNECTION_STRING = "postgresql://cropdbadmin@cropapptestsqlserver:<EMAIL>:5432" # Try to connect to a database that exists success, log, engine = connect_db(CONNECTION_STRING, SQL_DBNAME) def db_query_tmpr_day_zenzie(session, location_zone, date_range): """ Function to query temperature readings from the Crop dabase's zenzie sensors located in the propagation area of the farm location_zone (str): the zone of the farm to query """ query = session.query( ReadingsZensieTRHClass.temperature, ReadingsZensieTRHClass.humidity, # ReadingsZensieTRHClass.sensor_id, ).filter( and_( LocationClass.zone == location_zone, SensorLocationClass.location_id == LocationClass.id, # propagation location # SensorLocationClass.location_id == location_id, ReadingsZensieTRHClass.sensor_id == SensorLocationClass.sensor_id, ReadingsZensieTRHClass.time_created >= date_range, ) ) readings = session.execute(query).fetchall() # TODO: r = query_result_to_array(readings) return readings def too_cold_in_propagation_room(readings, location_zone): """ Function to calculate if the temperature is too low in an area of the farm readings: list of temperature values queried from the db """ if len(readings) < 5: print("Missing data in %s - check sensor battery" % (location_zone)) else: average_temp_ = [item[0] for _, item in enumerate(readings)] average_temp = mean(average_temp_) min_temp = 23 if average_temp < min_temp: # issue warning print("Temperature is low in %s, add heater" % (location_zone)) return average_temp elif average_temp > 50: print(average_temp) return average_temp def too_humid_in_propagation_room(readings, location_zone): """ Function to calculate if the humitidy is too high in an area of the farm readings: list of humidity values queried from the db """ if len(readings) < 5: print("Missing data in %s - check sensor battery" % (location_zone)) else: average_hum_ = [item[1] for _, item in enumerate(readings)] average_hum = mean(average_hum_) max_hum = 80 if average_hum >= max_hum: # issue warning print("Too humid in %s room - ventilate or dehumidify" % (location_zone)) return average_hum def check_issues_in_farm(session): start_date = datetime.now() - timedelta(hours=24) propagation_zone = "Propagation" readings = db_query_tmpr_day_zenzie(session, propagation_zone, start_date) too_cold_in_propagation_room(readings, propagation_zone) too_humid_in_propagation_room(readings, propagation_zone) def issue_warnings(): None def upload_warnings(session, warning): start_time = time.time() session = session_open(engine) for idx, row in warning.iterrows(): data = DataWarningsClass( type_id=type_id, timestamp=idx, priority=prior, log=warning_log, # temperature=row["Temperature"], # humidity=row["Humidity"], ) session.add(data) if __name__ == "__main__": session = session_open(engine) check_issues_in_farm(session) session_close(session)
3,977
packages_src/turtle_tag_simulator/turtle_tag_simulator/tagger_control_system.py
martin0004/ros2_turtle_tag_simulator
0
2169907
#!/usr/bin/env python3 from geometry_msgs.msg import Twist import numpy as np import rclpy from rclpy.node import Node from turtle_tag_simulator_interfaces.msg import Turtle from turtle_tag_simulator_interfaces.msg import Turtles from turtle_tag_simulator_interfaces.srv import DeleteTurtle from turtlesim.msg import Pose class TaggerControlSystemNode(Node): def __init__(self): super().__init__("tagger_control_system") # Note: the turtlesim Pose message contains both pose information (x, y, theta) # AND velocity information (linear_velocity, angular_velocity) # Turtle tagger actual pose self.actual_pose = Pose() self.subscriber_actual_pose_ = self.create_subscription(Pose, "turtle1/pose", self.update_actual_pose_, 10) # Turtle tagger target pose # - will be pose of another turtle which is "hunted" by the turtle tagger self.target_pose = Pose() self.timer_target_pose = self.create_timer(0.1, self.update_target_pose_) # Names & poses of turtle players currently in the simulation self.players_ = Turtles() self.subscriber_players_ = self.create_subscription(Turtles, "players", self.update_players_, 10) # Check if a player was tagged self.timer_ = self.create_timer(0.1, self.check_if_player_tagged_) # Command velocity to turtle tagger self.publisher_cmd_vel_ = self.create_publisher(Twist, "/turtle1/cmd_vel", 10) self.timer_cmd_vel_ = self.create_timer(0.1, self.publish_cmd_vel_) # Distance below which the tagger is assumed to have tagged a player self.d_tagged = 0.5 # Angle error below which a linear velocity command is sent to the tagger self.e_theta_threshold = 0.25 # rad # Gains for P controllers self.k_p_theta = 2.0 self.k_p_d = 2.0 # Info message self.get_logger().info("Turtle tagger control system started.") def update_actual_pose_(self, msg): """Update internal actual pose of turtle tagger.""" self.actual_pose = msg def update_target_pose_(self): """Identifies the pose of the player closest to the tagger.""" # Initialize target pose and distance to target pose target_pose = Pose() target_pose.x = self.actual_pose.x target_pose.y = self.actual_pose.y target_pose.theta = 0.0 # Placeholder value - target pose theta not used by control system d_target_pose = np.Inf # Update target pose with pose of closest player turtle for player in self.players_.turtles: d = ( (self.actual_pose.x-player.x)**2 + (self.actual_pose.y-player.y)**2 )**0.5 if d < d_target_pose: # Target pose x and y values target_pose.x = player.x target_pose.y = player.y # Target pose theta value # (set equal to the angle would have if it were at its actual x-y # location and rotated to point at the target location) delta_x = target_pose.x - self.actual_pose.x delta_y = target_pose.y - self.actual_pose.y target_pose.theta = np.arctan2(delta_y, delta_x) # Updated distance to target pose d_target_pose = d self.target_pose = target_pose def update_players_(self, msg): """Update internal list of turler players currently in the simulator.""" self.players_ = msg def check_if_player_tagged_(self): """Checks if the tagger is close enough of a player to tag it.""" for player in self.players_.turtles: d = ( (self.actual_pose.x - player.x)**2 + (self.actual_pose.y - player.y)**2 )**0.5 # If tagger close enough of player, player assumed tagged and deleted. if d < self.d_tagged: client = self.create_client(DeleteTurtle, "delete_turtle") while not client.wait_for_service(1.0): self.get_logger().warn("Waiting for delete_turtle service...") request = DeleteTurtle.Request() request.name = player.name client.call_async(request) # No need to manipulate response, so no future object. def publish_cmd_vel_(self): """Derive and publish command velocity for turtle tagger.""" cmd_vel = Twist() # Pose angle error e_theta = self.target_pose.theta - self.actual_pose.theta; # Correction to keep only shortest rotation angle if e_theta < -np.pi: e_theta += 2*np.pi elif e_theta > np.pi: e_theta -= 2*np.pi # Angular velocity command (P controller) # - command only on z since this is a 2D simulation cmd_vel.angular.z = self.k_p_theta * e_theta # Linear velocity command (P controller) # - turtlesim linear command velocities are expressed in the turtle body frame # therefore we only derive velocity x d = ( (self.target_pose.x - self.actual_pose.x)**2 + (self.target_pose.y - self.actual_pose.y)**2 )**0.5 cmd_vel.linear.x = self.k_p_d * d # Step function (set linear velocity command to 0 if angle error high) if abs(e_theta) >= self.e_theta_threshold: cmd_vel.linear.x = 0.0 # Publish command velocity self.publisher_cmd_vel_.publish(cmd_vel) def main(args=None): rclpy.init(args=args) node = TaggerControlSystemNode() rclpy.spin(node) rclpy.shutdown() if __name__ == "__main__": main()
5,720
genbib.py
hesusruiz/PublicPermissionedBlockchain
0
2170863
# Generate a bibliography from each cite in an asciidoc source document import json import sys import shutil import re import requests from collections import OrderedDict # The output file to be included in the corresponding section of the source document biblioFile = "bibliography.adoc" # Markers for bibliography section bibBegin = "//JRMBIB-BEGIN" bibEnd = "//JRMBIB-END" # The headers for the API call to Zotero headers = { "Content-Type": "application/json", "Accept": "application/json" } # The payload as a template where "citekey" should be replaced by the actual citekey payloadTemplate = """{ "jsonrpc": "2.0", "method": "item.bibliography", "params": [ ["citekey"], {"id": "http://www.zotero.org/styles/ieee"} ] }""" # Get all unique citekeys from a file def get_all_citekeys(inputFile): # Read the input file in memory (should not be a problem with normal documents) with open(inputFile, encoding='utf-8-sig') as f: sourceDocument = f.read() # Compile the regex string search with the format of the citekey: <<citekey>> p = re.compile('<<(.+?)>>') # Find all citeKeys in the source document # Some of them will be figures or tables, but it does not matter citeKeys = p.findall(sourceDocument) citeKeys = list(OrderedDict.fromkeys(citeKeys)) return citeKeys # Ask Zotero for the formatted bibliography item def find_citekey(citekey): # Replace the citekey in the template, to build the actual request payload = payloadTemplate.replace("citekey", citekey) # Ask Zotero via the local web API. Zotero must be running in the local machine r = requests.post('http://127.0.0.1:23119/better-bibtex/json-rpc', data = payload, headers = headers) # Check if we found anything if not r.ok: return None # Return the result field inside the JSON-RPC reply result = r.json()["result"] return result def generate_bibliography_lines(citeKeys): # Initialize the bibliography list biblio_lines = [] # Process each key for citeKey in citeKeys: # Get the bibliography item biblio_item = find_citekey(citeKey) if biblio_item is None: print(f"{citeKey}: No data available") continue # Build the prefix for Asciidoc bibliography prefix = f"- [[[{citeKey}]]] " # Strip the standard prefix from the IEE format: [1] biblio_item = prefix + biblio_item.replace("[1]", "") # Append the line biblio_lines.append(biblio_item) return biblio_lines # Check if we have received the name of the source file # Otherwise print usage if len(sys.argv) < 2: print("Need name of asciidoc file") exit(1) inputFileName = sys.argv[1] # Check that the filename ends in "-source.asc" if not inputFileName.endswith("-source.asc"): print("Name should end in -source.asc") exit(1) outputFileName = inputFileName.replace("-source", "") citeKeys = get_all_citekeys(inputFileName) bibLines = generate_bibliography_lines(citeKeys) insideBibSection = False # Open input and output files with open(outputFileName, "w", encoding='utf-8-sig') as outFile: with open(inputFileName, encoding='utf-8-sig') as inFile: # Process each line of the input file for line in inFile: # Write the line if not inside the bibliography section if not insideBibSection: outFile.write(line) # Check if we are entering the bibliography section if line.startswith(bibBegin): insideBibSection = True continue # Check if we are exiting the bibliography section if line.startswith(bibEnd): # Write the new bibliography lines for bl in bibLines: outFile.write(bl) # Write the marker outFile.write(line) # Mark that we are exiting insideBibSection = False
4,061
th2_common/schema/filter/strategy/impl/default_filter_strategy.py
ConnectDIY/th2-common-py
0
2170255
# Copyright 2020-2020 Exactpro (Exactpro Systems Limited) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List from google.protobuf.message import Message from th2_common.schema.filter.strategy.filter_strategy import FilterStrategy from th2_common.schema.message.configuration.field_filter_configuration import FieldFilterConfiguration from th2_common.schema.message.configuration.router_filter import RouterFilter from th2_common.schema.strategy.field_extraction.impl.th2_batch_msg_field_extraction import Th2BatchMsgFieldExtraction class DefaultFilterStrategy(FilterStrategy): def __init__(self, extract_strategy=Th2BatchMsgFieldExtraction()) -> None: self.extract_strategy = extract_strategy def verify(self, message: Message, router_filter: RouterFilter = None, router_filters: List[RouterFilter] = None): if router_filters is None: msg_field_filters = dict(router_filter.get_message()) msg_field_filters.update(router_filter.get_metadata()) return self.check_values(self.extract_strategy.get_fields(message), msg_field_filters) else: if len(router_filters) == 0: return True for fields_filter in router_filters: if self.verify(message=message, router_filter=fields_filter): return True return False def check_values(self, message_fields: {str: str}, field_filters: {str: FieldFilterConfiguration}) -> bool: for field_name in field_filters.keys(): field_filter = field_filters[field_name] msg_field_value = message_fields[field_name] if not self.check_value(msg_field_value, field_filter): return False return True def check_value(self, value1, filter_configuration: FieldFilterConfiguration): if len(value1) == 0: return False value2 = filter_configuration.value if filter_configuration.operation == "EQUAL": return value1 == value2 elif filter_configuration.operation == "NOT_EQUAL": return value1 != value2 elif filter_configuration.operation == "EMPTY": return len(value1) == 0 elif filter_configuration.operation == "NOT_EMPTY": return len(value1) != 0 else: return False
2,889
src/python/runner/Filepaths.py
dsyme/ADBench
58
2170036
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import os def filepath_to_basename(filepath): filename = os.path.basename(filepath) return os.path.splitext(filename)[0] def modulepath_to_basename(filepath): filename = os.path.basename(filepath) # python module name should contain "GMM", "BA", "Hand" or "LSTM" at the end pos = max( filename.rfind("GMM"), filename.rfind("BA"), filename.rfind("Hand"), filename.rfind("LSTM") ) basename = filename[: pos] return basename def filepath_to_dirname(filepath): dirname = os.path.dirname(filepath) if not dirname: dirname = "." return dirname
701
bronto/schemas.py
gjo/bronto
0
2168499
# -*- coding: utf-8 -*- import colander as c class InvalidationKey(c.SchemaNode): schema_type = c.String class InvalidationKeyList(c.SequenceSchema): invalidation_key = InvalidationKey(name='invalidationKey') class CacheInvalidateSchema(c.MappingSchema): invalidation_keys = InvalidationKeyList(name='invalidationKeys') cache_invalidate_schema = CacheInvalidateSchema() invalidation_key_list_schema = InvalidationKeyList()
444
app/src/configuration/loader.py
syamamura0x00/SlackEventAPIBase
0
2170570
import os ENV_NAME_FLASK_DEBUG = "FLASK_DEBUG" class _Config(object): def __init__(self): self.FLASK_DEBUG = self._load(ENV_NAME_FLASK_DEBUG) def _load(self, env_name): return os.getenv(env_name)
224
ATAC-Seq_Analysis/ATAC_Seq_Sam_Subsampler.py
ascendo/REcount
1
2169894
#START HERE ############################### import os import sys import numpy as np import random from shutil import copyfile folder_name = "<FOLDER_CONTAINING_SAM_FILES>" os.chdir(folder_name) os.mkdir("subsampled") out_dir = os.path.join(folder_name,"subsampled") data_file_names = os.listdir(folder_name) #depth = 20000000 depth = 250000 files = [] for i in data_file_names: if i[-10:] == "nodupl.sam": files.append((folder_name + "/" + i)) read_counts = [] for i in files: count = 0 with open(i) as input: for line in input: count += 1 read_counts.append(((count-23))/2) #subsample for i, item in enumerate(files): fname = item save_name1 = (fname[:-4] + "_subsampled.sam") if read_counts[i] < depth: dst = os.path.join(out_dir,save_name1.split("/")[-1]) src = item copyfile(src, dst) else: c = list(range(1, int(read_counts[i]-1))) inds = random.sample(c, depth) inds.sort() inds.reverse() #Note: pop from right side is much more time efficient fname = item save_name = (fname[:-4] + "_subsampled.sam") save_name1 = os.path.join(out_dir,save_name.split("/")[-1]) save_file1 = open(save_name1, "w") newtab = '\t' newline = '\n' with open(fname) as input: count = 0 for line in input: count += 1 if line[0] == "@": #header save_file1.write(line) count -= 1 elif inds == []: break elif count == (inds[-1]*2-1): save_file1.write(line) elif count == (inds[-1]*2): inds.pop(-1) save_file1.write(line) save_file1.close()
1,537
quantities/gradient_temperature_position.py
chenmich/Quantities
1
2167781
from .quantity_type import QuantityType from .quantity import Quantity from .units import Unit from .length import LengthType from .thermodynamic_temperature import ThermodynamicTemperatureType class kelvin_per_meter(Unit): profile = { "name":"<NAME>", "symbol":"K*m-1", "express_by_SI_base":"K*m-1", "express_by_SI":"" } class GradientTemperaturePositionType(QuantityType): pri_unit = kelvin_per_meter SI_conherent_unit = pri_unit @classmethod def register_type(cls): cls.source[(ThermodynamicTemperatureType, '/', LengthType)] = cls cls.source[(cls, '*', LengthType)] = ThermodynamicTemperatureType cls.source[(LengthType, '*', cls)] = ThermodynamicTemperatureType cls.source[(ThermodynamicTemperatureType, '/', cls)] = LengthType class GradientTemperaturePosition(Quantity): def __init__(self, value, unit=None): super.__init__(value, GradientTemperaturePositionType, unit)
982
pythonCodes/patankarMethods.py
accdavlo/HighOrderODESolvers
0
2170236
## Modified Patankar 1st order scheme def patankar(prod_dest, tspan, u0): ''' Input: prod_dest is the function that returns the matrices p_{i,j}(c) and d_{i,j}(c) tspan is the time vector u0 is the initial condition ''' dim=len(u0) # Dimension of the problem Nt=len(tspan) # Length of time span U=np.zeros((dim,Nt)) # Solution vector p=np.zeros((dim,dim)) # Temporary production matrix d=np.zeros((dim,dim)) # Temporary destruction matrix U[:,0]=u0 for it in range(1,Nt): # Loop over timesteps dt=tspan[it]-tspan[it-1] p,d =prod_dest(U[:,it-1]) # Computing the production and destruction at the previous timestep for i in range(dim): # Adding all the terms lhs = 1. # Initializing the lhs coefficients rhs = U[i,it-1] # Initializing the rhs for j in range(dim): lhs = lhs + dt*d[i,j]/U[i,it-1] rhs = rhs + dt*p[i,j] U[i,it] = rhs/lhs # Solve the final system return tspan, U ## Modified Patankar 1st order scheme def mPEuler(prod_dest, tspan, u0): ''' Input: prod_dest is the function that returns the matrices p_{i,j}(c) and d_{i,j}(c) tspan is the time vector u0 is the initial condition ''' dim=len(u0) # Dimension of the problem Nt=len(tspan) # Length of time span U=np.zeros((dim,Nt)) # Solution vector p=np.zeros((dim,dim)) # Temporary production matrix d=np.zeros((dim,dim)) # Temporary destruction matrix U[:,0]=u0 for it in range(1,Nt): # Loop over timesteps dt=tspan[it]-tspan[it-1] p,d =prod_dest(U[:,it-1]) # Computing the production and destruction at the previous timestep MM = np.eye(dim) # Initializing the mass matrix for i in range(dim): # Adding all the terms for j in range(dim): MM[i,j] = MM[i,j] - dt*p[i,j]/U[j,it-1] MM[i,i] = MM[i,i] + dt*d[i,j]/U[i,it-1] U[:,it] = np.linalg.solve(MM,U[:,it-1]) # Solve the final system return tspan, U
2,172
sort.py
iandees/usps-collection-boxes
20
2170832
from itertools import groupby import argparse import json import os import sys if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('input', type=argparse.FileType('r'), default=sys.stdin, help='The input ndjson from the USPS collection box spider') parser.add_argument('output', type=argparse.FileType('w'), default=sys.stdout, help='The output ndjson to write the sorted data to') parser.add_argument('--prefix-dir', help='Specify this to also write data into files by prefix of the ID for each collection box. The value is the directory to write to.') parser.add_argument('--prefix-chars', type=int, default=2, help='The number of characters of the collection box ID to use as prefix') args = parser.parse_args() features = [] for line in args.input: features.append(json.loads(line)) for feature in sorted(features, key=lambda f: f['properties']['ref']): args.output.write(json.dumps(feature, sort_keys=True) + '\n') if args.prefix_chars and args.prefix_dir: for k, features in groupby(sorted(features, key=lambda f: f['properties']['ref']), key=lambda i: i['properties']['ref'][:args.prefix_chars]): with open(os.path.join(args.prefix_dir, '%s.ndjson' % k), 'w') as f: for feature in features: f.write(json.dumps(feature, sort_keys=True) + '\n')
1,396
dfirtrack_main/admin.py
cclauss/dfirtrack
273
2170577
from django.contrib import admin from dfirtrack_main.models import ( Analysisstatus, Analystmemo, Case, Casepriority, Casestatus, Casetype, Company, Contact, Division, Dnsname, Domain, Domainuser, Entry, Headline, Ip, Location, Note, Notestatus, Os, Osarch, Osimportname, Reason, Recommendation, Reportitem, Serviceprovider, System, Systemstatus, Systemtype, Systemuser, Tag, Task, Taskname, ) # all registered models will show up in admin app admin.site.register(Analysisstatus) admin.site.register(Analystmemo) admin.site.register(Case) admin.site.register(Casepriority) admin.site.register(Casestatus) admin.site.register(Casetype) admin.site.register(Company) admin.site.register(Contact) admin.site.register(Division) admin.site.register(Dnsname) admin.site.register(Domain) admin.site.register(Domainuser) admin.site.register(Entry) admin.site.register(Headline) admin.site.register(Ip) admin.site.register(Location) admin.site.register(Note) admin.site.register(Notestatus) admin.site.register(Os) admin.site.register(Osarch) admin.site.register(Osimportname) admin.site.register(Reason) admin.site.register(Recommendation) admin.site.register(Reportitem) admin.site.register(Serviceprovider) admin.site.register(System) admin.site.register(Systemstatus) admin.site.register(Systemtype) admin.site.register(Systemuser) admin.site.register(Tag) admin.site.register(Task) admin.site.register(Taskname)
1,537
env/Lib/site-packages/OpenGL/raw/GLES2/OES/texture_cube_map_array.py
5gconnectedbike/Navio2
210
2170907
'''Autogenerated by xml_generate script, do not edit!''' from OpenGL import platform as _p, arrays # Code generation uses this from OpenGL.raw.GLES2 import _types as _cs # End users want this... from OpenGL.raw.GLES2._types import * from OpenGL.raw.GLES2 import _errors from OpenGL.constant import Constant as _C import ctypes _EXTENSION_NAME = 'GLES2_OES_texture_cube_map_array' def _f( function ): return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OES_texture_cube_map_array',error_checker=_errors._error_checker) GL_IMAGE_CUBE_MAP_ARRAY_OES=_C('GL_IMAGE_CUBE_MAP_ARRAY_OES',0x9054) GL_INT_IMAGE_CUBE_MAP_ARRAY_OES=_C('GL_INT_IMAGE_CUBE_MAP_ARRAY_OES',0x905F) GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES=_C('GL_INT_SAMPLER_CUBE_MAP_ARRAY_OES',0x900E) GL_SAMPLER_CUBE_MAP_ARRAY_OES=_C('GL_SAMPLER_CUBE_MAP_ARRAY_OES',0x900C) GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES=_C('GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_OES',0x900D) GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES=_C('GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_OES',0x900A) GL_TEXTURE_CUBE_MAP_ARRAY_OES=_C('GL_TEXTURE_CUBE_MAP_ARRAY_OES',0x9009) GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES=_C('GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_OES',0x906A) GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES=_C('GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_OES',0x900F)
1,275
part_a/a_functions.py
cconnerolson/aerospace_assignment_6
1
2169991
from sympy import * def converge_a(T_jj = 3000, d_T = 1, d_gamma = 1): gamma_jj = schomate(T_jj) while d_T > 0.001 or d_gamma > 0.001: T_ii = T_ratio(gamma_jj) gamma_ii = schomate(T_ii) d_T, d_gamma = abs((T_ii - T_jj) / T_jj), abs((gamma_ii - gamma_jj) / gamma_jj) T_jj, gamma_jj = T_ii, gamma_ii return(T_ii, gamma_ii) def schomate(T): """ Shchomate equation to calculate specific heat of water vapor for a given temperature T. :param T: Temperature [K] :return: Heat Capacity [J/mol*K] """ t = T / 1000 if 500 <= T < 1700: a, b, c, d, e = [30.092, 6.832514, 6.793425, -2.53448, 0.082139] elif T == 1700: return 2.7175 elif 1700 < T <= 6000: a, b, c, d, e = [41.96426, 8.622053, -1.49978, 0.098119, -11.1576] else: raise ValueError('Input temperature outside valid domain') c_p = (a + (b * t) + (c * t**2) + (d * t**3) + (e / t**2)) / 18 R = 0.4615 c_v = c_p - R g = c_p / c_v return g def T_ratio(gamma_star, T_0 = 3_200): """ :param gamma: :param T_0: inlet temperature, given. :return: """ return T_0 / (1 + ((gamma_star - 1) / 2)) def A_eq_g(gam_e=1.2, gam_s=1.1758101459676122): g_e = Symbol('γₑ') g_s = Symbol('γ*') A_r = Symbol('Aₑ/A*') M_e = Symbol('Mₑ') e1 = sqrt(g_s / g_e) e2 = 1 / M_e e3 = (1 + ((g_e - 1) / 2) * M_e**2)**((g_e + 1) / (2 * (g_e - 1))) e4 = ((g_s + 1) / 2)**((g_s + 1) / (2 * (g_s - 1))) eq = (e1 * e2 * e3) / e4 - A_r eq = eq.subs([(g_e, gam_e), (g_s, gam_s)]) return eq fn = A_eq_g() # print(fn) def A_eq_A(A_ratio, eq=fn): return eq.subs([('Aₑ/A*', A_ratio)]) A_eq_A(1, fn)
1,572
pulumi/aws/kic-image-build/ingress_controller_image_puller_args.py
resurfaceio/kic-reference-architectures
72
2170716
from typing import Optional import pulumi @pulumi.input_type class IngressControllerImagePullerArgs: """Arguments needed for instantiating the IngressControllerImagePullerProvider""" def __init__(self, image_name: Optional[pulumi.Input[str]] = None): self.__dict__ = dict() pulumi.set(self, 'image_name', image_name) @property @pulumi.getter def image_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "image_name")
480
tests/integration/test_master_coordinator.py
dswiecki/karapace
0
2170441
""" karapace - master coordination test Copyright (c) 2019 Aiven Ltd See LICENSE for details """ from contextlib import closing from karapace.config import set_config_defaults from karapace.master_coordinator import MasterCoordinator from tests.utils import get_random_port, KafkaServers, new_random_name, TESTS_PORT_RANGE import asyncio import pytest import requests import time import ujson class Timeout(Exception): pass def init_admin(config): mc = MasterCoordinator(config=config) mc.start() return mc def is_master(mc: MasterCoordinator) -> bool: """True if `mc` is the master. This takes care of a race condition were the flag `master` is set but `master_url` is not yet set. """ return bool(mc.sc and mc.sc.are_we_master and mc.sc.master_url) def has_master(mc: MasterCoordinator) -> bool: """True if `mc` has a master.""" return bool(mc.sc and not mc.sc.are_we_master and mc.sc.master_url) @pytest.mark.timeout(60) # Github workflows need a bit of extra time @pytest.mark.parametrize("strategy", ["lowest", "highest"]) def test_master_selection(kafka_servers: KafkaServers, strategy: str) -> None: # Use random port to allow for parallel runs. port1 = get_random_port(port_range=TESTS_PORT_RANGE, blacklist=[]) port2 = get_random_port(port_range=TESTS_PORT_RANGE, blacklist=[port1]) port_aa, port_bb = sorted((port1, port2)) client_id_aa = new_random_name("master_selection_aa_") client_id_bb = new_random_name("master_selection_bb_") group_id = new_random_name("group_id") config_aa = set_config_defaults( { "advertised_hostname": "127.0.0.1", "bootstrap_uri": kafka_servers.bootstrap_servers, "client_id": client_id_aa, "group_id": group_id, "port": port_aa, "master_election_strategy": strategy, } ) config_bb = set_config_defaults( { "advertised_hostname": "127.0.0.1", "bootstrap_uri": kafka_servers.bootstrap_servers, "client_id": client_id_bb, "group_id": group_id, "port": port_bb, "master_election_strategy": strategy, } ) with closing(init_admin(config_aa)) as mc_aa, closing(init_admin(config_bb)) as mc_bb: if strategy == "lowest": master = mc_aa slave = mc_bb else: master = mc_bb slave = mc_aa # Wait for the election to happen while not is_master(master): time.sleep(0.3) while not has_master(slave): time.sleep(0.3) # Make sure the end configuration is as expected master_url = f'http://{master.config["host"]}:{master.config["port"]}' assert master.sc.election_strategy == strategy assert slave.sc.election_strategy == strategy assert master.sc.master_url == master_url assert slave.sc.master_url == master_url def test_no_eligible_master(kafka_servers: KafkaServers) -> None: client_id = new_random_name("master_selection_") group_id = new_random_name("group_id") config_aa = set_config_defaults( { "advertised_hostname": "127.0.0.1", "bootstrap_uri": kafka_servers.bootstrap_servers, "client_id": client_id, "group_id": group_id, "port": get_random_port(port_range=TESTS_PORT_RANGE, blacklist=[]), "master_eligibility": False, } ) with closing(init_admin(config_aa)) as mc: # Wait for the election to happen, ie. flag is not None while not mc.sc or mc.sc.are_we_master is None: time.sleep(0.3) # Make sure the end configuration is as expected assert mc.sc.are_we_master is False assert mc.sc.master_url is None async def test_schema_request_forwarding(registry_async_pair): master_url, slave_url = registry_async_pair max_tries, counter = 5, 0 wait_time = 0.5 subject = new_random_name("subject") schema = {"type": "string"} other_schema = {"type": "int"} # Config updates for subj_path in [None, subject]: if subj_path: path = f"config/{subject}" else: path = "config" for compat in ["FULL", "BACKWARD", "FORWARD", "NONE"]: resp = requests.put(f"{slave_url}/{path}", json={"compatibility": compat}) assert resp.ok while True: if counter >= max_tries: raise Exception("Compat update not propagated") resp = requests.get(f"{master_url}/{path}") if not resp.ok: print(f"Invalid http status code: {resp.status_code}") continue data = resp.json() if "compatibilityLevel" not in data: print(f"Invalid response: {data}") counter += 1 await asyncio.sleep(wait_time) continue if data["compatibilityLevel"] != compat: print(f"Bad compatibility: {data}") counter += 1 await asyncio.sleep(wait_time) continue break # New schema updates, last compatibility is None for s in [schema, other_schema]: resp = requests.post(f"{slave_url}/subjects/{subject}/versions", json={"schema": ujson.dumps(s)}) assert resp.ok data = resp.json() assert "id" in data, data counter = 0 while True: if counter >= max_tries: raise Exception("Subject schema data not propagated yet") resp = requests.get(f"{master_url}/subjects/{subject}/versions") if not resp.ok: print(f"Invalid http status code: {resp.status_code}") counter += 1 continue data = resp.json() if not data: print(f"No versions registered for subject {subject} yet") counter += 1 continue assert len(data) == 2, data assert data[0] == 1, data print("Subject schema data propagated") break # Schema deletions resp = requests.delete(f"{slave_url}/subjects/{subject}/versions/1") assert resp.ok counter = 0 while True: if counter >= max_tries: raise Exception("Subject version deletion not propagated yet") resp = requests.get(f"{master_url}/subjects/{subject}/versions/1") if resp.ok: print(f"Subject {subject} still has version 1 on master") counter += 1 continue assert resp.status_code == 404 print(f"Subject {subject} no longer has version 1") break # Subject deletion resp = requests.get(f"{master_url}/subjects/") assert resp.ok data = resp.json() assert subject in data resp = requests.delete(f"{slave_url}/subjects/{subject}") assert resp.ok counter = 0 while True: if counter >= max_tries: raise Exception("Subject deletion not propagated yet") resp = requests.get(f"{master_url}/subjects/") if not resp.ok: print("Could not retrieve subject list on master") counter += 1 continue data = resp.json() assert subject not in data break
7,406
Orses_Validator_Core/NonNewBlockValidator.py
snwokenk/Orses_Core
0
2170455
""" used to validate blocks that are not new. it does not automatically propagate valid blocks """ from Orses_Validator_Core.BaseBlockValidator import BaseBlockValidator # todo: create validation logic class NonNewBlockValidator(BaseBlockValidator): def __init__(self, block, admin_inst,is_newly_created=False, q_object=None): super().__init__( block=block, admin_inst=admin_inst, is_newly_created=is_newly_created, q_object=q_object ) def validate(self): return True # for now return true
578
playground.py
aditya2kx/GooglePythonExcercises
0
2164861
#!/usr/bin/python -tt import sys def main(): print(repeat('"Yipee ', True)) def repeat(s, exclaim): result = s * 3 if exclaim: result = result + '!!!' return result if __name__ == '__main__': main()
232
growthstreet/settings.py
rollokb/django-channels-celery-websocket-example
7
2170015
""" Django settings for growthstreet project. Generated by 'django-admin startproject' using Django 1.8. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get( 'SECRET_KEY', '<KEY> ) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get('DEBUG', True) ALLOWED_HOSTS = [ 'localhost', 'lvh.me', os.environ.get('AWS_HOST', ''), ] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'channels', 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.google', 'customers', 'loans', ) MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ] ROOT_URLCONF = 'growthstreet.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ 'templates/' ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.request', 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] redis_host = os.environ.get('REDIS_HOST', 'localhost') # Channel layer definitions # http://channels.readthedocs.org/en/latest/deploying.html#setting-up-a-channel-backend CHANNEL_LAYERS = { "default": { "BACKEND": "asgi_redis.RedisChannelLayer", "CONFIG": { "hosts": [(redis_host, 6379)], }, "ROUTING": "growthstreet.routing.channel_routing", }, } WSGI_APPLICATION = 'growthstreet.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases POSTGRES_HOST = os.environ.get('POSTGRES_HOST', 'localhost') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'postgres', 'USER': 'postgres', 'PASSWORD': 'postgres', 'HOST': 'db', 'PORT': '5432', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True SITE_ID = 1 # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) LOGGING = { 'disable_existing_loggers': False, 'version': 1, 'handlers': { 'console': { # logging handler that outputs log messages to terminal 'class': 'logging.StreamHandler', 'level': 'DEBUG', # message level to be written to console }, }, 'loggers': { '': { # this sets root level logger to log debug and higher level # logs to console. All other loggers inherit settings from # root level logger. 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, # this tells logger to send logging message # to its parent (will send if set to True) }, 'django.db': { # django also has database level logging }, }, } AUTH_USER_MODEL = 'customers.Customer' INTERNAL_IPS = [ 'lvh.me', 'localhost', ] COMPANIES_HOUSE_API_KEY = '<KEY>' TWILLO_API_KEY = "AC12c8fb8a45caae7c9c4e35a40acc3c98" TWILLO_AUTH_TOKEN = os.environ.get('TWILLO_AUTH_TOKEN') TWILLO_FROM_NUMBER = "+441915801275" CELERY_BROKER_URL = 'redis://{}:6379/0'.format(redis_host) CELERY_REDIS_HOST = redis_host CELERY_REDIS_PORT = 6379 CELERY_REDIS_DB = 0 CELERY_RESULT_BACKEND = 'redis' # use json format for everything CELERY_ACCEPT_CONTENT = ['json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' # django-allauth ACCOUNT_EMAIL_VERIFICATION = 'none'
5,193
python/base/core/dmo/business_exception.py
jiportilla/ontology
0
2169833
# !/usr/bin/env python # -*- coding: UTF-8 -*- """ Created: 2019-05-05 Owner: <EMAIL> """ class BusinessException(Exception): def __init__(self, business_unit="unknown", message="unknown"): self.message = message self.business_unit = business_unit
271
word_freq.py
PantsuitUp/Whack2017
0
2170530
import string import nltk sample_1 = "At my current position, I am part of the team that coordinates the company lunch-and-learn sessions. Each week, we meet to brainstorm who would be exciting guest speakers. We all work together to ensure a diverse mix of speakers, aiming to appeal to a wide swath of people in the company. Because everyone on the team comes from different areas within the company, we have all learned so much about big ideas, from marketing to tech." sample_2 = "At my current position, I am part of like the team that coordinates like the company lunch-and-learn sessions. Each week, we meet to try to brainstorm who would be exciting guest speakers. We all like work together to try to ensure a diverse mix of speakers, aiming to appeal to try to like a wide swath of people in the company. Because everyone on the team comes from like different areas within the company, we have all like learned so much about big ideas, from marketing to like tech" def word_freq(text): """ Creates and returns a dictionary of words mapped to their frequencies as well as the total number of words """ word_dict = dict() text_wo_punc = text.translate(None, string.punctuation) # download nltk packages first time running program try: word_list = nltk.pos_tag(nltk.word_tokenize(text_wo_punc)) except: nltk.download('punkt') nltk.download('averaged_perceptron_tagger') word_list = nltk.pos_tag(nltk.word_tokenize(text_wo_punc)) for word, pos in word_list: if word in word_dict: word_dict[word] += 1 elif pos != 'CC' and pos != 'IN' and pos != 'TO' and pos != 'DT': # exclude prepositions, conjunctions, etc word_dict[word] = 1 return word_dict, len(word_list) def overused_words(word_dict, total_words): """ Finds words that makes up more than 1% of a person's answers and returns them as a list of overused words """ overused_words_list = [] if total_words > 500: threshold = .01 else: threshold = .05 for word, count in word_dict.iteritems(): if (count / float(total_words)) > threshold: overused_words_list.append(word) return overused_words_list def ownership_measure(word_dict): """ Calculates and returns measure of ownership as judged by frequency of first_person vs second_person pronouns used """ first_person = 0 second_person = 0 if "I" in word_dict: first_person += word_dict["I"] if "me" in word_dict: first_person += word_dict["me"] if "my" in word_dict: first_person += word_dict["my"] if "we" in word_dict: second_person += word_dict["we"] if "us" in word_dict: second_person += word_dict["us"] if "our" in word_dict: second_person += word_dict["our"] return (float(first_person) / (second_person + first_person)) if __name__ == "__main__": word_dict, total_words = word_freq(sample_2) # print overused_words(word_dict, total_words) # print ownership_measure(word_dict)
2,891
code/Solutions TP1/tva.py
christophesaintjean/IntroProgS1_2020
0
2169955
prix_ht_str = input("Prix HT ?") prix_ht = float(prix_ht_str) prix_ttc = prix_ht * 1.206 print("Prix TTC : ", prix_ttc)
119
ToDo/migrations/0002_auto_20200606_0929.py
chumbajr/todoapp
1
2169856
# Generated by Django 3.0.5 on 2020-06-06 09:29 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('ToDo', '0001_initial'), ] operations = [ migrations.AlterField( model_name='todo', name='start_time', field=models.DateTimeField(verbose_name=django.utils.timezone.now), ), ]
432
tape/models/ModelBuilder.py
nickbhat/tape-1
42
2170587
from typing import Dict, Type, List, Optional from sacred import Ingredient from tensorflow.keras import Model from tape.data_utils import PFAM_VOCAB from .AbstractTapeModel import AbstractTapeModel from .Transformer import Transformer, transformer_hparams from .Resnet import Resnet, resnet_hparams from .BidirectionalLSTM import BidirectionalLSTM, lstm_hparams from .BeplerModel import BeplerModel, bepler_hparams from .UniRepModel import UniRepModel, unirep_hparams from .OneHotModel import OneHotModel from .OneHotEvolutionaryModel import OneHotEvolutionaryModel class ModelBuilder: models: Dict[str, Type[Model]] = { 'transformer': Transformer, 'resnet': Resnet, 'lstm': BidirectionalLSTM, 'bepler': BeplerModel, 'unirep': UniRepModel, 'one_hot': OneHotModel, 'one_hot_evolutionary': OneHotEvolutionaryModel} hparams: List[Ingredient] = [ transformer_hparams, resnet_hparams, lstm_hparams, bepler_hparams, unirep_hparams] @staticmethod def build_model(model_name: str) -> Model: if model_name.lower() == 'bidirectional_lstm': model_name = 'lstm' n_symbols = len(PFAM_VOCAB) model_type = ModelBuilder.models[model_name.lower()] return model_type(n_symbols) @staticmethod def add_model(model_name: str, model: Type[AbstractTapeModel], hparams: Optional[Ingredient] = None) -> None: if not issubclass(model, AbstractTapeModel): raise TypeError("Model is not a subclass of AbstractTapeModel") if hparams is not None and not isinstance(hparams, Ingredient): raise TypeError("hparams object is not a sacred Ingredient") ModelBuilder.models[model_name] = model if hparams is not None: ModelBuilder.hparams.append(hparams)
1,900
chainermn/datasets/empty_dataset.py
zaltoprofen/chainer
3,705
2169726
def create_empty_dataset(dataset): """Creates an empty dataset for models with no inputs and outputs. This function generates an empty dataset, i.e., ``__getitem__()`` only returns ``None``. Its dataset is compatible with the original one. Such datasets used for models which do not take any inputs, neither return any outputs. We expect models, e.g., whose ``forward()`` is starting with ``chainermn.functions.recv()`` and ending with ``chainermn.functions.send()``. Args: dataset: Dataset to convert. Returns: ~chainer.datasets.TransformDataset: Dataset consists of only patterns in the original one. """ return [()] * len(dataset)
709
packages/watchmen-dqc/src/watchmen_dqc/topic_profile/topic_profile_service.py
Indexical-Metrics-Measure-Advisory/watchmen
0
2169163
from datetime import datetime from json import loads from logging import getLogger from typing import Any, Dict, List, Optional from pandas_profiling import ProfileReport from watchmen_auth import PrincipalService from watchmen_data_kernel.common import DataKernelException from watchmen_data_kernel.meta import TopicService from watchmen_data_kernel.service import ask_topic_data_service, ask_topic_storage from watchmen_data_kernel.topic_schema import TopicSchema from watchmen_dqc.common import DqcException from watchmen_dqc.util import build_data_frame, convert_data_frame_type_by_topic from watchmen_model.admin import is_raw_topic from watchmen_model.common import TopicId from watchmen_model.dqc import TopicProfile from watchmen_model.pipeline_kernel import TopicDataColumnNames from watchmen_storage import ColumnNameLiteral, EntityCriteriaExpression, EntityCriteriaOperator from watchmen_utilities import ArrayHelper logger = getLogger(__name__) def get_topic_service(principal_service: PrincipalService) -> TopicService: return TopicService(principal_service) def get_topic_schema( topic_id: TopicId, principal_service: PrincipalService) -> TopicSchema: topic_service = get_topic_service(principal_service) topic = get_topic_service(principal_service).find_by_id(topic_id) if topic is None: raise DataKernelException(f'Topic[id={topic_id}] not found.') schema = topic_service.find_schema_by_name(topic.name, principal_service.get_tenant_id()) if schema is None: raise DataKernelException(f'Topic[name={topic.name}] not found.') return schema class TopicProfileService: def __init__(self, principal_service: PrincipalService): self.principalService = principal_service def find(self, topic_id: TopicId, start_time: datetime, end_time: datetime) -> Optional[TopicProfile]: schema = get_topic_schema(topic_id, self.principalService) if is_raw_topic(schema.get_topic()): raise DqcException(f'Raw topic[name={schema.get_topic().name}] is not supported for profiling.') storage = ask_topic_storage(schema, self.principalService) service = ask_topic_data_service(schema, storage, self.principalService) criteria = [ EntityCriteriaExpression( left=ColumnNameLiteral(columnName=TopicDataColumnNames.TENANT_ID.value), right=self.principalService.get_tenant_id()), EntityCriteriaExpression( left=ColumnNameLiteral(columnName=TopicDataColumnNames.UPDATE_TIME.value), operator=EntityCriteriaOperator.GREATER_THAN_OR_EQUALS, right=start_time), EntityCriteriaExpression( left=ColumnNameLiteral(columnName=TopicDataColumnNames.UPDATE_TIME.value), operator=EntityCriteriaOperator.LESS_THAN_OR_EQUALS, right=end_time) ] data = service.find(criteria) columns = [ TopicDataColumnNames.ID.value, *ArrayHelper(schema.get_topic().factors).map(lambda x: x.name).to_list(), TopicDataColumnNames.TENANT_ID.value, TopicDataColumnNames.INSERT_TIME.value, TopicDataColumnNames.UPDATE_TIME.value ] def row_to_list(row: Dict[str, Any]) -> List[Any]: return ArrayHelper(columns).map(lambda x: row.get(x)).to_list() data_frame = build_data_frame(ArrayHelper(data).map(row_to_list).to_list(), columns) data_frame = convert_data_frame_type_by_topic(data_frame, schema.get_topic()) if data_frame.empty or len(data_frame.index) == 1: return None else: logger.info(f'memory_usage {data_frame.memory_usage(deep=True).sum()} bytes') profile = ProfileReport(data_frame, title=f'{schema.get_topic().name} data profile report', minimal=True) json_data = profile.to_json() json_constants_map = { '-Infinity': float('-Infinity'), 'Infinity': float('Infinity'), 'NaN': None, } return loads(json_data, parse_constant=lambda x: json_constants_map[x])
3,769
dotastats/middleware/error.py
romanalexander/opendota
14
2170412
from django.shortcuts import render from dotastats.exceptions import SteamAPIError class SteamErrorMiddleware(object): def process_exception(self, request, exception): try: if isinstance(exception, SteamAPIError): return render(request, '500.html', {'error': 'SteamAPI: ' + exception.errormessage}) except: # Need to ignore all errors here, or we'll get stuck in loop. pass return None
454
jaxns/examples/frozen_flow/build_prior.py
fehiepsi/jaxns
0
2170643
from jax import numpy as jnp from jaxns.gaussian_process import TomographicKernel from jaxns.prior_transforms import DeterministicTransformPrior, MVNDiagPrior, UniformPrior, GaussianProcessKernelPrior, \ MVNPrior, PriorChain def build_frozen_flow_prior(X, kernel, tec_to_dtec, x0): v_dir = DeterministicTransformPrior('v_dir', lambda n: n / jnp.linalg.norm(n), (3,), MVNDiagPrior('n', jnp.zeros(3), jnp.ones(3), tracked=False), tracked=False) v_mag = UniformPrior('v_mag', 0., 0.5, tracked=False) v = DeterministicTransformPrior('v', lambda v_dir, v_mag: v_mag * v_dir, (3,), v_dir, v_mag, tracked=True) X_frozen_flow = DeterministicTransformPrior('X', lambda v: X[:, 0:6] - jnp.concatenate([v, jnp.zeros(3)]) * X[:, 6:7], X[:, 0:6].shape, v, tracked=False) K = GaussianProcessKernelPrior('K', TomographicKernel(x0, kernel, S_marg=20, S_gamma=10), X_frozen_flow, UniformPrior('height', 100., 300.), UniformPrior('width', 50., 150.), UniformPrior('l', 0., 20.), UniformPrior('sigma', 0., 2.), tracked=False) tec = MVNPrior('tec', jnp.zeros((X.shape[0],)), K, ill_cond=True, tracked=False) dtec = DeterministicTransformPrior('dtec', tec_to_dtec, tec.to_shape, tec, tracked=False) prior_chain = PriorChain() \ .push(dtec) \ .push(UniformPrior('uncert', 0., 5.)) return prior_chain
1,771
apps/oozie/src/oozie/migrations/0003_initial.py
kokosing/hue
5,079
2169949
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-06-06 18:55 from __future__ import unicode_literals from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('oozie', '0002_initial'), ] operations = [ migrations.AddField( model_name='subworkflow', name='sub_workflow', field=models.ForeignKey(blank=True, default=None, help_text='The sub-workflow application to include. You must own all the sub-workflows.', null=True, on_delete=django.db.models.deletion.CASCADE, to='oozie.Workflow', verbose_name='Sub-workflow'), ), migrations.AddField( model_name='history', name='submitter', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='dataoutput', name='dataset', field=models.OneToOneField(help_text='The pattern of the output data we want to generate.', on_delete=django.db.models.deletion.CASCADE, to='oozie.Dataset', verbose_name='The dataset representing the format of the data output.'), ), migrations.AddField( model_name='datainput', name='dataset', field=models.OneToOneField(help_text='The pattern of the input data we want to process.', on_delete=django.db.models.deletion.CASCADE, to='oozie.Dataset', verbose_name='The dataset representing format of the data input.'), ), migrations.AddField( model_name='node', name='workflow', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oozie.Workflow'), ), migrations.AddField( model_name='dataset', name='coordinator', field=models.ForeignKey(help_text='The coordinator associated with this data.', on_delete=django.db.models.deletion.CASCADE, to='oozie.Coordinator', verbose_name='Coordinator'), ), migrations.AddField( model_name='dataoutput', name='coordinator', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oozie.Coordinator'), ), migrations.AddField( model_name='datainput', name='coordinator', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oozie.Coordinator'), ), migrations.AddField( model_name='bundledcoordinator', name='bundle', field=models.ForeignKey(help_text='The bundle regrouping all the coordinators.', on_delete=django.db.models.deletion.CASCADE, to='oozie.Bundle', verbose_name='Bundle'), ), migrations.AddField( model_name='bundledcoordinator', name='coordinator', field=models.ForeignKey(help_text='The coordinator to batch with other coordinators.', on_delete=django.db.models.deletion.CASCADE, to='oozie.Coordinator', verbose_name='Coordinator'), ), ]
3,272
deepqmc/sampler/walkers.py
NLESC-JCER/DeepQMC
6
2170319
import torch import numpy as np from torch.distributions import MultivariateNormal class Walkers(object): def __init__(self, nwalkers=100, nelec=1, ndim=1, init=None): """Walkers of the MC sampling Keyword Arguments: nwalkers {int} -- number of walkers (default: {100}) nelec {int} -- number of electrons (default: {1}) ndim {int} -- number of dimension for each electron (default: {1}) init {dict} -- method and data to initialize the walkers (default: {None}) see Molecule.domain() """ self.nwalkers = nwalkers self.ndim = ndim self.nelec = nelec self.init_domain = init self.pos = None self.status = None self.cuda = False self.device = torch.device('cpu') def initialize(self, pos=None): """Initalize the position of the walkers Args: method (str, optional): how to initialize the positions. Defaults to 'uniform'. pos ([type], optional): existing position of the walkers. Defaults to None. Raises: ValueError: if the method is not recognized """ if self.cuda: self.device = torch.device('cuda') if pos is not None: if len(pos) > self.nwalkers: pos = pos[-self.nwalkers:, :] self.pos = pos else: if 'center' in self.init_domain.keys(): self.pos = self._init_center() elif 'min' in self.init_domain.keys(): self.pos = self._init_uniform() elif 'mean' in self.init_domain.keys(): self.pos = self._init_multivar() elif 'atom_coords' in self.init_domain.keys(): self.pos = self._init_atomic() else: raise ValueError('Init walkers not recognized') def _init_center(self): """Initialize the walkers at the center of the molecule Returns: torch.tensor -- positions of the walkers """ eps = 1E-6 pos = -eps + 2 * eps * \ torch.rand(self.nwalkers, self.nelec * self.ndim) return pos.type( torch.get_default_dtype()).to( device=self.device) def _init_uniform(self): """Initialize the walkers in a box covering the molecule Returns: torch.tensor -- positions of the walkers """ pos = torch.rand(self.nwalkers, self.nelec * self.ndim) pos *= (self.init_domain['max'] - self.init_domain['min']) pos += self.init_domain['min'] return pos.type( torch.get_default_dtype()).to( device=self.device) def _init_multivar(self): """Initialize the walkers in a sphere covering the molecule Returns: torch.tensor -- positions of the walkers """ multi = MultivariateNormal( torch.tensor(self.init_domain['mean']), torch.tensor(self.init_domain['sigma'])) pos = multi.sample((self.nwalkers, self.nelec)).type( torch.get_default_dtype()) pos = pos.view(self.nwalkers, self.nelec * self.ndim) return pos.to(device=self.device) def _init_atomic(self): """Initialize the walkers around the atoms Returns: torch.tensor -- positions of the walkers """ pos = torch.zeros(self.nwalkers, self.nelec * self.ndim) idx_ref, nelec_tot = [], 0 nelec_placed, natom = [], 0 for iat, nelec in enumerate(self.init_domain['atom_nelec']): idx_ref += [iat] * nelec nelec_tot += nelec natom += 1 for iw in range(self.nwalkers): nelec_placed = [0] * natom idx = torch.tensor(idx_ref) idx = idx[torch.randperm(nelec_tot)] xyz = torch.tensor( self.init_domain['atom_coords'])[ idx, :] for ielec in range(nelec_tot): _idx = idx[ielec] if nelec_placed[_idx] == 0: s = 1. / self.init_domain['atom_num'][_idx] elif nelec_placed[_idx] < 5: s = 2. / (self.init_domain['atom_num'][_idx] - 2) else: s = 3. / (self.init_domain['atom_num'][_idx] - 10) xyz[ielec, :] += np.random.normal(scale=s, size=(1, 3)) nelec_placed[_idx] += 1 pos[iw, :] = xyz.view(-1) return pos
4,677
py/share.py
StefanLazea/servy
0
2170853
import sys from utils import validate_email, get_argument, print_permissions, display_loading_message, hide_loading_message_with_error, write_error from drive import get_spreadsheet, share_spreadsheet def share_command(): if "-l" in sys.argv: ss = get_spreadsheet() print_permissions(ss) elif "-a" in sys.argv: email = get_argument(sys.argv, "-a") if validate_email(email): try: display_loading_message("Adding permission", "Permission added") ss = get_spreadsheet() share_spreadsheet(ss, email) hide_loading_message_with_error(False) except Exception: hide_loading_message_with_error(True) else: write_error("Invalid email: " + email) elif "-d" in sys.argv: email = get_argument(sys.argv, "-d") if validate_email(email): try: display_loading_message("Removing permission", "Permission removed") ss = get_spreadsheet() ss.remove_permissions(email) hide_loading_message_with_error(False) except Exception: hide_loading_message_with_error(True) else: write_error("Invalid email: " + email)
1,298
container_most_water.py
ahmedrebei/LeetCodeRepository
0
2169485
class Solution: def maxArea(self, height) -> int: n = len(height) output = 0 i=0 j=n-1 while(i<j): if height[i] < height[j]: output = max(output, (j-i)*height[i]) i+=1 else: output = max(output, (j-i)*height[j]) j-=1 return output if __name__ == "__main__": s = Solution() print(s.maxArea([1,8,6,2,5,4,8,3,7]))
458
esdlvalidator/api/service/validation.py
ESDLMapEditorESSIM/ESDLValidator
0
2170537
import json from werkzeug.datastructures import FileStorage from esdlvalidator.core.esdl import utils from esdlvalidator.core.exceptions import UnknownESDLFileType from esdlvalidator.validation.abstract_repository import SchemaRepository from esdlvalidator.validation.validator import Validator class ValidationService: """Service for handling all requests to the validation endpoint""" def __init__(self, schemaRepository: SchemaRepository): self.__repo = schemaRepository self.__validator = Validator() self.esdl = None def validate(self, file: FileStorage, schemaIds: list, validateXsd: bool): """Validate an uploaded file against the given schemas Args: file (FileStorage): Uploaded file schemaIds: List of schema id's to validate against. example [1,2] Returns: result: JSON result of the validation Raises: SchemaNotFound: One of the validation schemas was not found UnknownESDLFileType: Type of uploaded file is not supported InvalidESDL: ESDL could not be loaded by the system """ if not self.__allowed_file(file.filename): raise UnknownESDLFileType schemas = self.__repo.get_by_ids(schemaIds) esdlString = self.__get_esdl_string(file) result = self.__validator.validate(esdlString, schemas, validateXsd) # ToDo: fix need for toJSON and then back jsonString = result.toJSON() return json.loads(jsonString) def validateContents(self, esdlContents: str, schemaIds: list, validateXsd: bool): """Validate an uploaded file contents against the given schemas Args: esdlContents (String): Uploaded file contents schemaIds: List of schema id's to validate against. example [1,2] Returns: result: JSON result of the validation Raises: SchemaNotFound: One of the validation schemas was not found UnknownESDLFileType: Type of uploaded file is not supported """ schemas = self.__repo.get_by_ids(schemaIds) result = self.__validator.validate(esdlContents, schemas, validateXsd) # ToDo: fix need for toJSON and then back jsonString = result.toJSON() return json.loads(jsonString) def __allowed_file(self, filename): """Allowed esdl file extensions""" return "." in filename and \ filename.rsplit(".", 1)[1].lower() in ["esdl", "xml"] def __get_esdl_string(self, file): """Get a string from the uploaded file""" fileBytes = file.read() esdlString = fileBytes.decode("utf-8") return esdlString
2,733
controk_webservice/employees/views.py
controk-sys/http-server
0
2167450
from django.shortcuts import get_object_or_404 from rest_framework import viewsets from rest_framework.decorators import detail_route from rest_framework.response import Response from controk_webservice.employees.serializers import Employee, EmployeeInfoSerializer, EmployeeSerializer class EmployeesViewSet(viewsets.ReadOnlyModelViewSet): queryset = Employee.objects.all() serializer_class = EmployeeSerializer @detail_route(methods=['GET']) def info(self, request, pk): employee = get_object_or_404(self.queryset.select_related('address'), pk=pk) return Response(EmployeeInfoSerializer(employee).data)
641
data_preparation/2_extracting_annotated_data.py
sfvnDTU/deep_detektor
3
2169658
""" Takes annotated data from DR and creates dataset. """ import re import sqlite3 from collections import Counter from pathlib import Path from editdistance import eval as editdistance from data_preparation.classes.annotated_data_cleaner import DebattenAnnotatedDataCleaner from data_preparation.data_preparation_utility import clean_str from project_paths import ProjectPaths # Set paths annotated_data_dir = ProjectPaths.dr_annotated_subtitles_dir # Path where DR stores annotated data. storage_dir = ProjectPaths.tensor_provider # Open cleaner in directory annotatedData = DebattenAnnotatedDataCleaner(annotated_data_dir) file_paths = annotatedData.getFilePaths() # Get data and labels of annotated programs data, labels = annotatedData.getAllCleanedProgramSentences(disp=True) # Number of observations N = len(data) # Conversion from file-name to program ID (manually inspected in databases) program_name2id = { "1": 7308025, "2": 2294023, "3": 2315222, "4": 2337314, "5": 2359717, "6": 2304494, "7": 2348260, "8": 3411204, "9": 3570949, "10": 3662558, "8567181": 8567181, "8567636": 8567636, "8568658": 8568658, "8568906": 8568906, "8610238": 8610238, "8635201": 8635201, "8665813": 8665813, "8689224": 8689224, "8720741": 8720741, "9284846": 9284846, } # Program 2337314 sentence 1 is incorrect in annotated dataset ! ################################################################# # annotated_programs.db sentence_id_skips = { (2315222, 259), (2315222, 260) } # Prepare data for database (strings and removing single-word claims) pattern = re.compile("^[\S]+$") sentence_id = 0 database_data = [] c_program = None for row_nr, row in enumerate(data): sentence_id += 1 program_id = program_name2id[row[0].strip()] sentence = clean_str(row[2]) claim = str(row[4]) claim_idx = str(row[3]) claim_flag = row[4] is not None if c_program is None: c_program = program_id elif c_program != program_id: sentence_id = 1 c_program = program_id if (program_id, sentence_id) in sentence_id_skips: sentence_id += 1 # if not pattern.match(str(row[4])) or row[4] is None: database_data.append([ program_id, sentence_id, sentence, claim, claim_idx, claim_flag ]) print("\nCreating database for all programs") print("\tRemoving pre-existing database.") database_path = Path(storage_dir, "annotated_programs.db") if database_path.is_file(): database_path.unlink() print("\tConnection") connection = sqlite3.connect(str(database_path)) cursor = connection.cursor() print("\tCreating table") cursor.execute( "CREATE TABLE programs (" "program_id INTEGER NOT NULL," "sentence_id INTEGER NOT NULL," "sentence TEXT NOT NULL," "claim TEXT," "claim_idx TEXT," "claim_flag INTEGER NOT NULL," "PRIMARY KEY (program_id, sentence_id)" ")" ) print("\tInserting rows") insert_command = "INSERT INTO programs (program_id, sentence_id, sentence, claim, claim_idx, claim_flag)" \ " VALUES (?, ?, ?, ?, ?, ?)" cursor.executemany(insert_command, database_data) print("\tCommitting and closing.") connection.commit() cursor.close() connection.close() # TODO: Detection of leading and trailing spaces may have been removed - were they necessary at this point? ################################################################# # Inspection database # Data from annotated dataset annotated_data_sentences = {(row[0], row[1]): row[2] for row in database_data} n_sentences_in_annotated_programs = Counter() for row in database_data: n_sentences_in_annotated_programs[row[0]] += 1 # Data from web-crawl connection = sqlite3.connect(str(Path(ProjectPaths.tensor_provider, "all_programs.db"))) cursor = connection.cursor() cursor.execute("SELECT program_id, sentence_id, sentence FROM programs") crawl_data = cursor.fetchall() cursor.close() connection.close() crawl_data_sentences = {(row[0], row[1]): row[2] for row in crawl_data if row[0] in n_sentences_in_annotated_programs} n_sentences_in_crawl_programs = Counter() for row in crawl_data_sentences: if row[0] in n_sentences_in_annotated_programs: n_sentences_in_crawl_programs[row[0]] += 1 # Create inspection database inspection_path = Path(storage_dir, "inspection_programs.db") if inspection_path.is_file(): inspection_path.unlink() connection = sqlite3.connect(str(inspection_path)) cursor = connection.cursor() cursor.execute( "CREATE TABLE programs (" "program_id INTEGER NOT NULL," "sentence_id INTEGER NOT NULL," "crawl_sentence TEXT NOT NULL," "annotated_sentence TEXT NOT NULL," "overlap REAL," "edit_distance INTEGER," "PRIMARY KEY (program_id, sentence_id)" ")" ) # Make inspection-values rows = [] break_count = 0 for program_id in n_sentences_in_annotated_programs.keys(): sentence_id = 0 while True: sentence_id += 1 key = (program_id, sentence_id) if key not in annotated_data_sentences and key not in crawl_data_sentences: break_count += 1 if break_count > 4: break continue break_count = 0 annotated_sentence = annotated_data_sentences.get(key, "") crawl_sentence = crawl_data_sentences.get(key, "") distance = editdistance(annotated_sentence, crawl_sentence) if crawl_sentence: relative_distance = (len(crawl_sentence) - distance) / len(crawl_sentence) else: relative_distance = None rows.append([program_id, sentence_id, crawl_sentence, annotated_sentence, relative_distance, distance]) insert_command = "INSERT INTO programs (program_id, sentence_id, crawl_sentence, " \ "annotated_sentence, overlap, edit_distance)" \ " VALUES (?, ?, ?, ?, ?, ?)" cursor.executemany(insert_command, rows) connection.commit() cursor.close() connection.close()
6,044
test/pytch/py/project/talking_banana.py
krishanu-dey/skulpt
2
2170427
import pytch class Banana(pytch.Sprite): Costumes = ["yellow-banana.png"] @pytch.when_I_receive("talk") def talk(self): self.say("Hello world") @pytch.when_I_receive("silence") def fall_silent(self): self.say("") @pytch.when_I_receive("talk-briefly") def talk_briefly(self): self.say_for_seconds("Mumble", 0.5) print("/mumble") @pytch.when_I_receive("say-goodbye") def say_goodbye(self): self.say_for_seconds("Bye!", 1.0) @pytch.when_I_receive("hide") def disappear(self): self.hide() @pytch.when_I_receive("show") def appear(self): self.show()
660
main.py
Hudlle/password_saver
0
2167822
import mysql.connector import sys import pyperclip import login import crypto import random try: db = login.getConnection() cursor = db.cursor() except: sys.exit() QNewPassword = "INSERT INTO passwords (websiteName, password, username, eMail, telefonNumber, websiteAddress) VALUES (%s,%s,%s,%s,%s,%s)" QChangeEntry = "INSERT INTO passwords (passwordId, websiteName, password, username, eMail, telefonNumber, websiteAddress) VALUES (%s,%s,%s,%s,%s,%s,%s)" QDeleteEntryWebsiteName = "DELETE FROM passwords WHERE websiteName = %s" QDeleteEntryPasswordId = "DELETE FROM passwords WHERE passwordId = %s" QSelectWhere = "SELECT * FROM passwords WHERE websiteName = %s" QSelectAll = "SELECT * FROM passwords" def newEntry(): print("--> New Password") websiteName = input("Website Name : ") cursor.execute(QSelectAll) for i in cursor: if i[1] == websiteName: print("Cannot create Password with the website name '" + websiteName + "'.") return password = input("Password : ") password = <PASSWORD>) username = input("Username : ") eMail = input("E-Mail : ") telefonNumber = input("Telefon Number : ") websiteAddress = input("Website Address : ") val = (websiteName, password, username, eMail, telefonNumber, websiteAddress) cursor.execute(QNewPassword, val) db.commit() print("S: Command executed successfully.") def changeEntry(): print("--> Change Entry") websiteName = input("Website Name : ") cursor.execute(QSelectWhere, (websiteName, )) counter = 0 for i in cursor: print("") printRow(i) print("") print("[1] Website Name\n[2] Password\n[3] Username\n[4] E-Mail\n[5] Telefon Number\n[6] Website Address\n[B] Back") answer = input("> ") if answer != "B": changeCommands[answer](i) print("S: Command executed successfully.") counter += 1 if counter < 1: print("E: There is no entry with the website name : '" + websiteName + "'.") def changeWebsiteName(row): print("[B] Back") websiteName = input("New Website Name : ") if websiteName == "B": return val = (row[0], websiteName, row[2], row[3], row[4], row[5], row[6]) cursor.execute(QDeleteEntryPasswordId, (row[0], )) cursor.execute(QChangeEntry, val) db.commit() print("S: Command executed successfully.") def changePassword(row): print("[B] Back") password = input("New Password : ") if password == "B": return password = <PASSWORD>(password) val = (row[0], row[1], password, row[3], row[4], row[5], row[6]) cursor.execute(QDeleteEntryPasswordId, (row[0], )) cursor.execute(QChangeEntry, val) db.commit() def changeUsername(row): print("[B] Back") username = input("New Username : ") if username == "B": return val = (row[0], row[1], row[2], username, row[4], row[5], row[6]) cursor.execute(QDeleteEntryPasswordId, (row[0], )) cursor.execute(QChangeEntry, val) db.commit() def changeEMail(row): print("[B] Back") eMail = input("New E-Mail : ") if eMail == "B": return val = (row[0], row[1], row[2], row[3], eMail, row[5], row[6]) cursor.execute(QDeleteEntryPasswordId, (row[0], )) cursor.execute(QChangeEntry, val) db.commit() def changeTelefonNumber(row): print("[B] Back") telefonNumber = input("New Telefon Number : ") if telefonNumber == "B": return val = (row[0], row[1], row[2], row[3], row[4], telefonNumber, row[6]) cursor.execute(QDeleteEntryPasswordId, (row[0], )) cursor.execute(QChangeEntry, val) db.commit() def changeWebsiteAddress(row): print("[B] Back") websiteAddress = input("New Website Address : ") if websiteAddress == "B": return val = (row[0], row[1], row[2], row[3], row[4], row[5], websiteAddress) cursor.execute(QDeleteEntryPasswordId, (row[0], )) cursor.execute(QChangeEntry, val) db.commit() changeCommands = { "1" : changeWebsiteName, "2" : changePassword, "3" : changeUsername, "4" : changeEMail, "5" : changeTelefonNumber, "6" : changeWebsiteAddress } def deleteEntry(): print("--> Delete Entry") websiteName = input("Website Name : ") cursor.execute(QSelectWhere, (websiteName, )) counter = 0 for i in cursor: if str(i[1]) == websiteName: cursor.execute(QDeleteEntryWebsiteName, (websiteName, )) db.commit() counter += 1 print("S: " + websiteName + " deleted successfully.") if counter < 1: print("E: There is no password according to the website name : '" + websiteName + "'.") def printEntry(): print("--> Print Entry") websiteName = input("Website Name : ") print("") cursor.execute(QSelectWhere, (websiteName, )) counter = 0 for i in cursor: printRow(i) print("") answer = input("Copy Password To Clipboard? [y/n] : ") if answer == "y": pyperclip.copy(crypto.decryptMessage(i[2])) print("S: Command executed successfully.") counter += 1 if counter < 1: print("E: There is no entry with the website name : '" + websiteName + "'.") def printTable(): print("--> Print Table") cursor.execute(QSelectAll) entrys = cursor.fetchall() print("Total Password Count : ", cursor.rowcount, "\n") for i in entrys: printRow(i) print("") print("S: Command executed successfully.") def printRow(row): print("Website Name : " + row[1]) print("----------------------------") print(f"Password : {len(crypto.decryptMessage(row[2])) * '*'}") print("Username : " + row[3]) print("E-Mail : " + row[4]) print("Telefon Number : " + row[5]) print("Website Address : " + row[6]) def genPassword(): try: n = int(input("How long sould the password be? : ")) except: print("E: Not a number. Try again.") genPassword() return alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "s", "t", "u", "v", "w", "x", "y", "z"] extra = ["!", "§", "$", "%", "&", "/", "(", ")", "=", "?", "<", ">"] while True: pw = [] for i in range(n): pw.append(alphabet[random.randint(0, len(alphabet) - 1)]) pw.append(extra[random.randint(0, len(extra) - 1)]) pw.append(str(random.randint(0, 9))) while len(pw) > n: pw.pop() random.shuffle(pw) password = ''.join(pw) print(f"Generated Password: {password}") answer = input("Copy Password To Clipboard? [y/n] : ") if answer == "y": pyperclip.copy(password) print("S: Command executed successfully.") return else: answer = input("Generate again or leave? [g/l] : ") if answer == "g": continue else: return def quitApp(): db.commit() print("Bye.") sys.exit() commands = { "1" : newEntry, "2" : changeEntry, "3" : deleteEntry, "4" : printEntry, "5" : printTable, "6" : genPassword, } def menu(): print("\n--> Main Menu") print("[1] Create New Entry\n[2] Change An Existing Entry\n[3] Delete An Entry\n[4] Print An Entry\n[5] Print All Entrys\n[6] Generate A Password\n[Q] Quit\n") answer = input("> ") if answer == "Q" or answer == "q": quitApp() return keys = list(commands.keys()) for i in keys: if answer == i: commands[answer]() menu() return print(f"E: {answer} Is Not A Valid Input. Try Again.") menu() menu()
7,836
Main Project/atom_creator.py
Jasc01/Lewis-Structure_Oxacids-and-Hidracids
0
2170603
import atom as Atom #Array para contar la electronegatividad arrayElectroNeg = ["Mn", "Cr", "B", "Se", "S", "I", "Br", "N", "Cl", "O", "F", "H"] #H is the max because is never a central atom ''' Busca un patró en el string que le llega Básicamente, divide el string, en los elementos (por ejemplo, H, Cl, Cr) Y si vienen números, entonces en el Atom está el atributo "quantity", el cual cambia de acuerdo a este número. ''' def createAtom(strAtom): atomArray = [] tempAtom = None for i in range(len(strAtom)): if strAtom[i].isupper(): try: if strAtom[i+1].islower(): name = strAtom[i] + strAtom[i+1] else: name = strAtom[i] except IndexError: name = strAtom[i] tempAtom = Atom.Atom(name, getElectroNegativity(name), getDoubleDots(name), getSingleDots(name)) try: if not strAtom[i+1].isdigit(): atomArray.append(tempAtom) except IndexError: atomArray.append(tempAtom) else: if strAtom[i].isdigit(): tempAtom.quantity = int(strAtom[i]) atomArray.append(tempAtom) return atomArray def getElectroNegativity(name): return arrayElectroNeg.index(name) ''' Obtiene la cantidad de pares de electrones de valencia ''' def getDoubleDots(name): #Switch - case for double dots in Elements if name == "H" or name == "B": return 0 elif name == "N": return 1 elif name == "O" or name == "S" or name == "Se" or name == "Cr": return 2 elif name == "Cl" or name == "F" or name == "I" or name == "Br" or name == "Mn": return 3 ''' Obtiene la cantidad de electrones de <NAME> ''' def getSingleDots(name): #Switch - case for single dots in Elements if name == "H": return 1 elif name == "N" or name == "B": return 3 elif name == "O" or name == "S" or name == "Se" or name == "Cr": return 2 elif name == "Cl" or name == "F" or name == "I" or name == "Br" or name == "Mn": return 1
2,082
tcfcli/common/statistics.py
tencentyun/scfcli
103
2169041
# -*- coding: utf-8 -*- import os import platform if platform.python_version() >= '3': from configparser import ConfigParser else: from ConfigParser import ConfigParser class CliConfigParser(ConfigParser): def optionxform(self, optionstr): return optionstr class StatisticsConfigure(object): def __init__(self): self.data_attr = ConfigParser() self.data_file = os.path.join(os.path.expanduser('~'), '.scf_statistics.ini') def read_data(self): if not os.path.isfile(self.data_file): self.write_data() self.data_attr.read(self.data_file) def write_data(self): self.data_attr.write(open(self.data_file, "w")) def delete_data(self): os.remove(self.data_file) def get_data(self, section=None, options=None): try: data = {} if section == None: for eve_section in self.data_attr.sections(): data[eve_section] = self.data_attr.items(eve_section) elif section != None and options == None: data[section] = self.data_attr.items(section) elif section != None and options != None: data[section] = self.data_attr.getint(section, options) return data except Exception as e: # self.delete_data() return False def set_data(self, section, options, value): try: if not self.data_attr.has_section(section) and section: # 检查是否存在section self.data_attr.add_section(section) self.data_attr.set(section, options, value) return True except Exception as e: return False def get_args(self, input_args): try: command = [] args = [] is_command = True for eve_input in input_args[1:]: if not str(eve_input).startswith("-") and is_command: command.append(eve_input) else: is_command = False if is_command == False and str(eve_input).startswith("-"): args.append(eve_input) section = " ".join(command) self.read_data() if not args: args.append("no_args") args.append("command_count") #print(section, args) for eve_args in args: #print(eve_args) try: value = self.get_data(section, eve_args) #print(value) if value == False: value = 0 else: value = int(value[section]) value = value + 1 # print(value) self.set_data(section, eve_args, str(value)) except Exception as e: pass self.write_data() except Exception as e: # print(e) pass
3,025
events/AntiSnipe.py
asy-init/deuterium
3
2169090
from discord.ext import commands class AntiSnipe(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_message_delete(self, message): if message.author.id == <PASSWORD>: await message.channel.send(f"AntiSnipe ➞ Triggered\n", delete_after=0.01) @commands.Cog.listener() async def on_message_edit(self, before, after): if before.author.id == <PASSWORD>: edit_msg = await before.channel.send(f"AntiSnipe ➞ Trigger") await edit_msg.edit(content="AntiSnipe ➞ Triggered", delete_after=0.01) def setup(bot): bot.add_cog(AntiSnipe(bot))
658
amfe/io/postprocessing/amfe_postprocess_mesh_converter.py
ma-kast/AMfe
0
2170408
# # Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS, # BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>. # # Distributed under 3-Clause BSD license. See LICENSE file for more information. # """ AMfe mesh converter for I/O module. """ import numpy as np import pandas as pd from amfe.io.mesh.base import MeshConverter from amfe.io.mesh.constants import VOLUME_ELEMENTS_3D __all__ = [ 'AmfePostprocessMeshConverter' ] class AmfePostprocessMeshConverter(MeshConverter): """ Converter for Postprocessing Meshes in Postprocessors This Converter is usually used by Postprocessorconverters to read the mesh. It is usually instatiated automatically within the Postprocessorconverters. Attributes ---------- _currentnodeid : int Integer describing the current local node id. This is needed to build the node array. _dimension : int desribes the dimension of the mesh _el_df_indices : list list of elementids _el_df_eleshapes : list list of strings describing the shapes of the elements _el_df_connectivity : list list of ndarrays desribing the connectivity of the elements (regarding real elementids) _groups : dict dict with groups _nodes : ndarray ndarray that contains the node coordinates (rows: nodes, columns: x,y,z-coordinates) _no_of_nodes : int number of nodes that will be returned _no_of_elements : int number of elements that will be returned """ def __init__(self, verbose=False): super().__init__() self._verbose = verbose self._dimension = None self._no_of_nodes = None self._no_of_elements = None self._nodes = np.empty((0, 4), dtype=float) self._currentnodeid = 0 self._groups = dict() self._tags = dict() # df information self._el_df_indices = list() self._el_df_eleshapes = list() self._el_df_connectivity = list() return def build_no_of_nodes(self, no): """ Build number of nodes (optional) This function usually is optional. It can be used to enhance performance of the building process. This function can be used to preallocate arrays that contain the node coordinates Parameters ---------- no : int number of nodes in the mesh Returns ------- None """ # This function is only used for preallocation # It is not necessary to call, but useful if information about no_of_nodes exists self._no_of_nodes = no if self._nodes.shape[0] == 0: self._nodes = np.zeros((no, 4), dtype=float) return def build_no_of_elements(self, no): """ Build number of elements (optional) This function usually is optional. It can be used to enhance performance of the building process. This function can be used to preallocate arrays that contain the element information Parameters ---------- no : int number of elements in the mesh Returns ------- None """ # This function is not used # If someone wants to improve performance he/she can add preallocation functionality for elements self._no_of_elements = no # preallocation... return def build_mesh_dimension(self, dim): """ Builds the dimensino of the mesh (optional) If this method has not been called during build process, a mesh dimension of 3 is assumed Parameters ---------- dim : int {2, 3} dimension of the mesh Returns ------- None """ self._dimension = dim return def build_node(self, idx, x, y, z): """ Builds a node Parameters ---------- idx : int ID of the node x : float X coordinate of the node y : float Y coordinate of the node z : float Z coordinate of the node Returns ------- None """ # amfeid is the row-index in nodes array amfeid = self._currentnodeid # Check if preallocation has been done so far if self._no_of_nodes is not None: # write node in preallocated array self._nodes[amfeid, :] = [idx, x, y, z] else: # append node if array is not preallocated with full node dimension self._nodes = np.append(self._nodes, np.array([idx, x, y, z], dtype=float, ndmin=2), axis=0) self._currentnodeid += 1 return def build_element(self, idx, etype, nodes): """ Builds an element Parameters ---------- idx : int ID of an element etype : str valid amfe elementtype (shape) string nodes : iterable iterable of ints describing the connectivity of the element Returns ------- None """ # update df information self._el_df_connectivity.append(np.array(nodes, dtype=int)) self._el_df_indices.append(idx) self._el_df_eleshapes.append(etype) return def build_group(self, name, nodeids=None, elementids=None): """ Builds a group, i.e. a collection of nodes and elements Parameters ---------- name: str Name identifying the node group. nodeids: list List with node ids. elementids: list List with element ids. Returns ------- None """ # append group information group = {name: {'nodes': nodeids, 'elements': elementids}} self._groups.update(group) return def build_tag(self, tag_dict): """ Builds a tag with following dict given in tag_dict Parameters ---------- tag_dict : dict dict with following format: { tagname1 : { tagvalue1 : [elementids], tagvalue2 : [elementids], ... }, tagname2 : { tagvalue1 : [elementids], tagvalue2 : [elementids] ... }, ... } Returns ------- None """ # append tag information self._tags.update(tag_dict) return None def return_mesh(self): """ Returns the Mesh as dict container This function must be called after the building proccess is done. Returns ------- meshcontainer : dict Meshcontainer described with a dict with following keys: 'nodes': nodes_df, (nodes dataframe) 'elements': el_df, (elements dataframe) 'groups': self._groups, (groups) 'dimension': self._dimension (dimension) The tags are included in the elements dataframe """ # Check dimension of model if self._dimension is None: if not VOLUME_ELEMENTS_3D.intersection(set(self._el_df_eleshapes)): # No 3D element in eleshapes, thus: self._dimension = 2 else: self._dimension = 3 # If dimension = 2 cut the z coordinate x = self._nodes[:, 1] y = self._nodes[:, 2] no_of_nodes = self._nodes.shape[0] if self._dimension == 2: z = np.zeros(no_of_nodes) else: z = self._nodes[:, 3] iloc = np.arange(no_of_nodes) nodes_df = pd.DataFrame({'x': x, 'y': y, 'z': z, 'iloc': iloc}, index=np.array(self._nodes[:, 0], dtype=int)) # write properties # The iconnectivity is the row based connectivity (pointing to row indices in a nodes array insted of nodeids) iconnectivity = np.arange(len(self._el_df_indices)) data = {'shape': self._el_df_eleshapes, 'connectivity': self._el_df_connectivity, 'iconnectivity': iconnectivity} el_df = pd.DataFrame(data, index=self._el_df_indices) # Write tags into the dataframe for tag_name, tag_value_dict in self._tags.items(): el_df[tag_name] = None if tag_value_dict is not None: for tag_value, elem_list in tag_value_dict.items(): try: el_df.loc[elem_list, (tag_name)] = tag_value except: temp_list = el_df[tag_name].tolist() for elem in elem_list: temp_list[elem] = tag_value el_df[tag_name] = temp_list # Building the meshcontainer for return meshcontainer = {'nodes': nodes_df, 'elements': el_df, 'groups': self._groups, 'dimension': self._dimension, } return meshcontainer
9,286
show_applicant.py
DivyaGSun/BRITE_REU_database
1
2169869
#!/usr/bin/python import sys import cgi import cgitb import sqlite3 reload(sys) #import sys sys.setdefaultencoding('utf-8') cgitb.enable() # html print("Content-type: text/html\n") print('<meta charset="utf-8">') print("<html><head>") print("<title>BRITE REU Applicants</title>") print(''' <link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/nav.css"> <link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/reviewer.css"> </head>''') print("<body>") print('''<div id="bg-image">''') print('''<div id ="topnav"> <a class="active" href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/show_applicant.py">Applicant List</a> <a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/applicant_stats.py">Applicant Statistics</a> <a href="#about">My Past Reviews</a> <a href="#contact">About/Contact</a> </div>''') print("<h1>Applicant Information</h1>") print("<h3>Select applicant ID to write review | Click on ? for types of Filtering</h3>") #did not include form action right now #print('''<form action="https://bioed.bu.edu/cgi-bin/students_21/jpatel2/show_applicant.py" method="post" > # </form>''') print('<table id= Applicant class="dataframe">') print("<tr><th>Applicant ID</th><th>Full Application</th><th>First Name</th><th>Last Name</th><th>Country</th><th>First Gen</th><th>School</th><th>Standing</th><th>Major</th><th>GPA</th><th>Date Submitted</th><th>Review Status</th></tr>") #query to print applicant data query1 = "SELECT aid, documents, firstname, lastname, country, firstgen, institution, standing, major, gpa, submitdate, reviewstatus FROM Applicant;" connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() try: #execute query c.execute(query1) #get results to above standard query results = c.fetchall() except Exception: print("<p><font> color=red><b>Error</b></font></p>") #added proper URL for reference to reviewer page for row in results: print('''<tr><td><a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/reviewer.py?AID=%s">%s</a></td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>''' % (row[0],row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8],row[9], row[10], row[11])) c.close() connection.close() print("</table>") #print("</body>") print('''<script src="https://bioed.bu.edu/students_21/group_proj/group_K/tablefilter/tablefilter.js"></script>''') print('''<script data-config=""> var filtersConfig = { base_path: 'https://bioed.bu.edu/students_21/divyas3/tablefilter/', auto_filter: { delay: 110 //milliseconds }, filters_row_index: 1, state: true, alternate_rows: true, rows_counter: true, btn_reset: true, status_bar: true, msg_filter: 'Filtering...' }; var tf = new TableFilter(Applicant, filtersConfig); tf.init(); </script>''') print("</body> </html>") #print("</html>")
3,174
api/test_songs.py
olefrank/ngsongslist
0
2170839
import unittest, json from songs import Songs class SongsTests(unittest.TestCase): def test_get_songs(self): songs_library = Songs(Songs.TEST_FILE) songs = json.loads(songs_library.get_songs()) #check the count self.assertEqual(6, len(songs)) #check ordering self.assertEqual("Lycanthropic Metamorphosis", songs[0]['title']) self.assertEqual("The Yousicians", songs[0]['artist']) self.assertEqual("Wishing In The Night", songs[1]['title']) self.assertEqual("You've Got The Power", songs[2]['title']) self.assertEqual("Opa Opa Ta Bouzoukia", songs[3]['title']) self.assertEqual(5, songs[3]['rating']) self.assertEqual("Awaki-Waki", songs[4]['title']) self.assertEqual(4.8, songs[4]['rating']) self.assertEqual("Mr Fastfinger", songs[4]['artist']) self.assertEqual("A New Kennel", songs[5]['title']) def test_get_songs_paging(self): songs_library = Songs(Songs.TEST_FILE) songs = json.loads(songs_library.get_songs(skip=1, count=2)) self.assertEqual(2, len(songs)) self.assertEqual("Wishing In The Night", songs[0]['title']) self.assertEqual("You've Got The Power", songs[1]['title']) songs = json.loads(songs_library.get_songs(skip=5, count=2)) self.assertEqual(1, len(songs)) self.assertEqual("A New Kennel", songs[0]['title']) songs = json.loads(songs_library.get_songs(skip=6, count=20)) self.assertEqual(0, len(songs)) songs = json.loads(songs_library.get_songs(skip=0, count=0)) self.assertEqual(0, len(songs)) def test_get_average_difficulty(self): songs_library = Songs(Songs.TEST_FILE) avg_difficulty = json.loads(songs_library.get_average_difficulty())['avg_difficulty'] self.assertEqual(12.93, avg_difficulty) def test_search(self): songs_library = Songs(Songs.TEST_FILE) songs = json.loads(songs_library.search_songs('Lycanthropic')) self.assertEqual("Lycanthropic Metamorphosis", songs[0]['title']) self.assertEqual(1, len(songs)) results = json.loads(songs_library.search_songs('metamorphosis')) self.assertEqual(1, len(songs)) self.assertEqual("Lycanthropic Metamorphosis", songs[0]['title']) songs = json.loads(songs_library.search_songs('The YOUsicians')) self.assertEqual(5, len(songs)) self.assertEqual("Lycanthropic Metamorphosis", songs[0]['title']) self.assertEqual("Wishing In The Night", songs[1]['title']) self.assertEqual("You've Got The Power", songs[2]['title']) self.assertEqual("Opa Opa Ta Bouzoukia", songs[3]['title']) self.assertEqual("A New Kennel", songs[4]['title']) songs = json.loads(songs_library.search_songs('xx')) self.assertEqual(0, len(songs)) songs = json.loads(songs_library.search_songs('')) self.assertEqual(6, len(songs)) #execute the tests if __name__ == '__main__': unittest.main()
3,048
PyHive/VcfIntegration/SNPTools_poprob.py
elowy01/igsr_analysis
3
2170326
import eHive import os from VCFIntegration.SNPTools import SNPTools class SNPTools_poprob(eHive.BaseRunnable): """Run SNPTools poprob on a VCF containing biallelic SNPs""" def run(self): vcf_g = SNPTools(vcf=self.param_required('vcf_file'), snptools_folder=self.param_required('snptools_folder')) outprefix = os.path.split(self.param_required('outprefix'))[1] if self.param_is_defined('work_dir'): if not os.path.isdir(self.param('work_dir')): os.makedirs(self.param('work_dir')) prob_f = "" if self.param_is_defined('verbose'): prob_f = vcf_g.run_poprob(outprefix=outprefix, rawlist=self.param_required('rawlist'), outdir=self.param_required('work_dir'), verbose=True) else: prob_f = vcf_g.run_poprob(outprefix=outprefix, rawlist=self.param_required('rawlist'), outdir=self.param_required('work_dir'), verbose=False) self.param('prob_f', prob_f) def write_output(self): self.warning('Work is done!') self.dataflow({'prob_f': self.param('prob_f')}, 1)
1,349
conversion_tools/speclib_to_mgf.py
xiaoping-yang/ms2pip_c
14
2170879
#!/usr/bin/env python3 """ Convert MSP and SPTXT spectral library files. Writes three files: mgf with the spectra; PEPREC with the peptide sequences; meta with additional metainformation. Arguments: arg1 path to spectral library file arg2 prefix for spec_id """ import re import sys import logging AMINO_MASSES = { "A": 71.037114, "C": 103.009185, "D": 115.026943, "E": 129.042593, "F": 147.068414, "G": 57.021464, "H": 137.058912, "I": 113.084064, "K": 128.094963, "L": 113.084064, "M": 131.040485, "N": 114.042927, "P": 97.052764, "Q": 128.058578, "R": 156.101111, "S": 87.032028, "T": 101.047679, "V": 99.068414, "W": 186.079313, "Y": 163.063329, } PROTON_MASS = 1.007825035 WATER_MASS = 18.010601 def setup_logging(): """Initiate logging.""" root_logger = logging.getLogger() handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(asctime)s %(levelname)s %(module)s %(message)s") ) root_logger.addHandler(handler) root_logger.setLevel(logging.INFO) def parse_peprec_mods(mods, ptm_list): """Parse PEPREC modification string out of MSP Mod string.""" if mods.split("/")[0] != "0": num_mods = mods[0] mod_list = [mod.split(",") for mod in mods.split("/")[1:]] peprec_mods = [] for location, aa, name in mod_list: if not (location == "0" and name == "iTRAQ"): location = str(int(location) + 1) peprec_mods.append(location) peprec_mods.append(name) if name not in ptm_list: ptm_list[name] = 1 else: ptm_list[name] += 1 peprec_mods = "|".join(peprec_mods) else: peprec_mods = "-" return peprec_mods def validate(spec_id, peptide, charge, mods, reported_mw): """Validate amino acids and reported peptide mass.""" invalid_aas = ["B", "J", "O", "U", "X", "Z"] if any(aa in invalid_aas for aa in peptide): logging.warning("Peptide with non-canonical amino acid found: %s", peptide) elif ( mods.split("/")[0] == "0" ): # Cannot validate mass of peptide with unknown modification calculated = WATER_MASS + sum([AMINO_MASSES[x] for x in peptide]) reported = float(reported_mw) * float(charge) - float(charge) * PROTON_MASS if abs(calculated - reported) > 0.5: logging.warning( "Reported MW does not match calculated mass for spectrum %s", spec_id ) def parse_speclib(speclib_filename, title_prefix, speclib_format="msp"): """Parse MSP file.""" filename = ".".join(speclib_filename.split(".")[:-1]) fpip = open(filename + ".peprec", "w") fpip.write("spec_id modifications peptide charge\n") fmgf = open(filename + ".mgf", "w") fmeta = open(filename + ".meta", "w") with open(speclib_filename) as f: mod_dict = {} spec_id = 1 peak_sep = None peptide = None charge = None parentmz = None mods = None purity = None HCDenergy = None read_spec = False mgf = "" for row in f: if read_spec: # Infer peak int/mz separator if not peak_sep: if "\t" in row: peak_sep = "\t" elif " " in row: peak_sep = " " else: raise ValueError("Invalid peak separator") line = row.rstrip().split(peak_sep) # Read all peaks, so save to output files and set read_spec to False if row[0].isdigit(): # Continue reading spectrum mgf += " ".join([line[0], line[1]]) + "\n" continue # Last peak reached, finish up spectrum else: validate(spec_id, peptide, charge, mods, parentmz) peprec_mods = parse_peprec_mods(mods, mod_dict) fpip.write( "{}{} {} {} {}\n".format( title_prefix, spec_id, peprec_mods, peptide, charge ) ) fmeta.write( "{}{} {} {} {} {} {}\n".format( title_prefix, spec_id, charge, peptide, parentmz, purity, HCDenergy, ) ) buf = "BEGIN IONS\n" buf += "TITLE=" + title_prefix + str(spec_id) + "\n" buf += "CHARGE=" + str(charge) + "\n" buf += "PEPMASS=" + parentmz + "\n" fmgf.write("{}{}END IONS\n\n".format(buf, mgf)) spec_id += 1 read_spec = False mgf = "" if row.startswith("Name:"): line = row.rstrip().split(" ") tmp = line[1].split("/") peptide = tmp[0].replace("(O)", "") if speclib_format == "sptxt": peptide = re.sub(r"\[\d*\]|[a-z]", "", peptide) charge = tmp[1].split("_")[0] continue elif row.startswith("Comment:"): line = row.rstrip().split(" ") for i in range(1, len(line)): if line[i].startswith("Mods="): tmp = line[i].split("=") mods = tmp[1] if line[i].startswith("Parent="): tmp = line[i].split("=") parentmz = tmp[1] if line[i].startswith("Purity="): tmp = line[i].split("=") purity = tmp[1] if line[i].startswith("HCD="): tmp = line[i].split("=") HCDenergy = tmp[1].replace("eV", "") continue elif row.startswith("Num peaks:") or row.startswith("NumPeaks:"): read_spec = True continue fmgf.close() fpip.close() fmeta.close() return spec_id, mod_dict def main(): """Run CLI.""" # Get arguments speclib_filename = sys.argv[1] title_prefix = sys.argv[2] speclib_ext = speclib_filename.split(".")[-1] if speclib_ext.lower() == "sptxt": speclib_format = "sptxt" elif speclib_ext.lower() == "msp": speclib_format = "msp" else: raise ValueError("Unknown spectral library format: `%s`" % speclib_ext) logging.info("Converting %s to MGF, PEPREC and meta file", speclib_filename) num_peptides, mod_dict = parse_speclib( speclib_filename, title_prefix, speclib_format=speclib_format ) logging.info( "Finished!\nSpectral library contains %i peptides and the following modifications: %s", num_peptides, mod_dict, ) if __name__ == "__main__": setup_logging() main()
7,284
AVAPy/data_wizard/utils/__init__.py
antvis/AVAPy
2
2170254
# pylint: disable=C0103 # flake8: noqa """ Util functions for data_wizard """ from AVAPy.data_wizard.utils.json import * from AVAPy.data_wizard.utils.typeinfer import *
170
source/emp_evaluation_system/migrations/0022_algorithm.py
LukasLandwich/energy_management_panel
0
2169212
# Generated by Django 3.1.3 on 2021-02-03 07:16 import datetime from django.db import migrations, models import django.utils.timezone from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('emp_evaluation_system', '0021_auto_20210202_1539'), ] operations = [ migrations.CreateModel( name='Algorithm', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='The name of the algorithm. Shown in admin page overview and at the frontend algorithm coparison page.', max_length=128)), ('backend_identifier', models.SlugField(help_text='The identifier is used to call the simulation API. Therefore, it has to be the exact same as the algorithm identifier at the backend!', max_length=64)), ('start_time', models.DateTimeField(default=datetime.datetime(2021, 2, 4, 7, 16, 30, 112370, tzinfo=utc), help_text='A starting time for the algorithm simulation.')), ('has_end_time', models.BooleanField(help_text='If the algorithm simulation has no specified end time, the simulation will use the acutual time when running as end time.')), ('end_time', models.DateTimeField(default=django.utils.timezone.now, help_text="A end time for the algorithm simulation. Only used when 'has end time' is checked.")), ('description', models.TextField(blank=True, default=None, help_text='Give a description for other users. Will only be shown in admin context.', null=True)), ], ), ]
1,679
modules/controls.py
mattmaniak/Termyy
0
2170177
import sys import termios import tty import modules.menu as menu import modules.render as render class Chars: w = ('w', 119) s = ('s', 115) a = ('a', 97) d = ('d', 100) capital_p = ('P', 80) enter = 13 def menu_event(pressed_key): if pressed_key in Chars.w: # Y axis is inverted in comparison to math. if menu.selected_button <= 1: menu.selected_button = 1 else: menu.selected_button -= 1 elif pressed_key in Chars.s: if menu.selected_button >= 2: menu.selected_button = 2 else: menu.selected_button += 1 elif menu.selected_button == 1 and ord(pressed_key) == Chars.enter: render.flushFrame() # New game button. termios.tcflush(sys.stdin, termios.TCIOFLUSH) # Flush input buffer. return 1 elif menu.selected_button == 2 and ord(pressed_key) == Chars.enter: render.flushFrame() # Exit button. exit(0) def game_event(pressed_key): if pressed_key in Chars.w: if render.Player.y <= 0: render.Player.y = 0 else: render.Player.y -= 1 elif pressed_key in Chars.s: if render.Player.y >= render.Map.height - render.Player.height: render.Player.y = render.Map.height - render.Player.height else: render.Player.y += 1 elif pressed_key in Chars.a: if render.Player.x <= 0: render.Player.x = 0 else: render.Player.x -= 1 elif pressed_key in Chars.d: if render.Player.x >= render.Map.width - render.Player.width: render.Player.x = render.Map.width - render.Player.width else: render.Player.x += 1 elif pressed_key in Chars.capital_p: # Exit key. render.flushFrame() return True def key_event(type): # https://code.activestate.com/recipes/134892/ file_descriptor = sys.stdin.fileno() old_settings = termios.tcgetattr(file_descriptor) try: tty.setraw(sys.stdin.fileno()) pressed_key = sys.stdin.read(1) finally: termios.tcsetattr(file_descriptor, termios.TCSADRAIN, old_settings) if type == "menu": return menu_event(pressed_key) elif type == "game": return game_event(pressed_key)
2,310
docs/tutorial/pytorch/alexnet_fashion_mnist/fashion_mnist.py
intel/neural-compressor
172
2170199
import torch from torchvision import datasets, transforms def download_dataset(): transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5), (0.5)) ]) train_dataset = datasets.FashionMNIST('./data', train=True, download=True, transform=transform) test_dataset = datasets.FashionMNIST('./data', train=False, transform=transform) return train_dataset, test_dataset def data_loader(batch_size=200): train_dataset, test_dataset = download_dataset() train_loader = torch.utils.data.DataLoader(train_dataset, batch_size = batch_size, shuffle = True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size = batch_size, shuffle = True) return train_loader, test_loader def main(): train_loader, test_loader = data_loader(batch_size=100) print(train_loader.batch_size* len(train_loader)) print(test_loader.batch_size* len(test_loader)) if __name__ == "__main__": main()
1,036
grokking-the-coding-interview/bfs/Reverse-Level-Order-Traversal-(easy).py
huandrew99/LeetCode
36
2170307
""" LC 107 Given a binary tree, populate an array to represent its level-by-level traversal in reverse order, i.e., the lowest level comes first. You should populate the values of all nodes in each level from left to right in separate sub-arrays. """ from collections import deque class TreeNode: def __init__(self, val): self.val = val self.left, self.right = None, None def traverse(root): if not root: return [] res = [] levels = [[root]] # traverse all while levels[-1]: levels.append([]) for node in levels[-2]: # now it's -2 if node.left: levels[-1].append(node.left) if node.right: levels[-1].append(node.right) # backward levels.pop() while levels: nodes = levels.pop() res.append([node.val for node in nodes]) return res def main(): # [[9, 10, 5], [7, 1], [12]] root = TreeNode(12) root.left = TreeNode(7) root.right = TreeNode(1) root.left.left = TreeNode(9) root.right.left = TreeNode(10) root.right.right = TreeNode(5) print("Reverse level order traversal: " + str(traverse(root))) main() """ Time O(N) Space O(N) """
1,141
astronex/directions.py
jaratma/astro-nex
1
2170026
# -*- coding: utf-8 -*- import math from datetime import datetime, timedelta, date, time from pytz import timezone import pysw from utils import parsestrtime def solar_rev(boss): date, time = parsestrtime(boss.state.curr_chart.date) d,m,y = [int(i) for i in date.split("/")] nowyear = boss.state.date.dt.year julday = pysw.julday(nowyear,m,d,0.0) sun = boss.state.curr_chart.planets[0] s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) while sunnow > sun: julday -= 0.1 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) while sunnow < sun: julday += 0.01 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) julday -= 0.01 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) while sunnow < sun: julday += 0.001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) julday -= 0.001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) while sunnow < sun: julday += 0.0001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) julday -= 0.0001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) while sunnow < sun: julday += 0.00001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) julday -= 0.00001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) while sunnow < sun: julday += 0.000001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) julday -= 0.000001 s,sunnow,e = pysw.calc(julday,0,boss.state.epheflag) sol = pysw.revjul(julday) zone = boss.state.curr_chart.zone dt = boss.state.date.getnewdt(sol) boss.da.panel.set_date_only(dt) def sec_prog(boss): chart = boss.state.curr_chart if not chart.date: chart = boss.state.now date = strdate_to_date(chart.date) nowyear = boss.state.date.dt.year birthyear = date.year yearsfrombirth = nowyear - birthyear progdate = date + timedelta(yearsfrombirth) if not boss.da.sec_alltimes: dt = combine_date(progdate) boss.state.calcdt.setdt(dt) boss.state.setprogchart(chart) birthday = synthbirthday(date,nowyear) boss.da.panel.set_date_only(birthday) else: nowdate = boss.state.date.dt prev_birthday = synthbirthday(date,nowyear) next_birthday = synthbirthday(date,nowyear+1) delta = nowdate - prev_birthday if delta.days < 0: next_birthday = prev_birthday prev_birthday = synthbirthday(date,nowyear-1) delta = nowdate - prev_birthday yearsfrombirth -= 1 yeardelta = next_birthday - prev_birthday wholedelta = delta.days*86400+delta.seconds wholeyeardelta = yeardelta.days*86400+yeardelta.seconds frac = wholedelta/float(wholeyeardelta) oneday_ahead = date + timedelta(yearsfrombirth+1) daydelta = (oneday_ahead - progdate) daydelta = timedelta(daydelta.days*frac,daydelta.seconds*frac) inbetween_progdate = progdate + daydelta dt = combine_date(inbetween_progdate) boss.state.calcdt.setdt(dt) boss.state.setprogchart(chart) #curr.setloc(city,code) #curr.calcdt.setdt(datetime.datetime.combine(self.date,self.time)) #curr.setchart() def strdate_to_date(strdate): date,_,time = strdate.partition('T') try: y,mo,d = [ int(x) for x in date.split('-')] except ValueError: print date zone, time = time[8:], time[:5] try: zone.index(':') delta, zone = zone[:6], zone[6:] d1, d2 = delta[1:3], delta[4:6] tot = int(d1)+int(d2)/60.0 except ValueError: delta, zone = zone[:5], zone[5:] d1, d2 = delta[1:3], delta[3:5] tot = int(d1)+int(d2) sign = {'+': 1, '-': -1}[delta[0]] delta = tot*sign h,m = [int(x) for x in time.split(':')] #h = (h + m/60.0) - delta #m = int((h - int(h))*60) return datetime(y,mo,d,int(h),m,0,tzinfo=timezone('UTC')) def combine_date(dt): newdate = date(dt.year,dt.month,dt.day) newtime = time(dt.hour,dt.minute,dt.second) return datetime.combine(newdate,newtime) def synthbirthday(date,nowyear): h = date.hour m = date.minute s = date.second y = nowyear mo = date.month d = date.day return datetime(y,mo,d,h,m,s,tzinfo=timezone('UTC'))
4,347
src/when_ml_pipeline_meets_hydra/api/deployment.py
omry/When-ML-pipeline-meets-Hydra
0
2170228
import json def foo(cluster_info): print("========== Run deployment's 'foo' subcommand ==========") print(f"cluster_info:\n{json.dumps(dict(cluster_info), indent=2)}") print("Do something here!") def bar(cluster_info): print("========== Run deployment's 'bar' subcommand ==========") print(f"cluster_info:\n{json.dumps(dict(cluster_info), indent=2)}") print("Do something here!")
408
bot/tests/conftest.py
sh4rpy/volodya
0
2170219
import os import pytest from django.conf import settings from dotenv import load_dotenv from users.models import TelegramUser load_dotenv() @pytest.fixture(scope='session') def django_db_setup(): settings.DATABASES['default'] = { 'ENGINE': 'django.db.backends.postgresql', 'NAME': os.getenv('DB_NAME', 'postgres'), 'USER': os.getenv('DB_USER', 'postgres'), 'HOST': os.getenv('DB_HOST', 'db'), 'PORT': os.getenv('DB_PORT', 5432), 'PASSWORD': os.getenv('DB_PASSWORD', '<PASSWORD>'), } @pytest.fixture def get_telegram_admin_user_id(): return TelegramUser.objects.filter(is_admin=True).first().telegram_id @pytest.fixture def get_telegram_user_id(): return TelegramUser.objects.filter(is_admin=False).first().telegram_id
792
app/views.py
patrickbeeson/has-it-ever-been
1
2170789
import os import requests from geopy.geocoders import Nominatim from flask import Flask, render_template, flash from . import app from .forms import LocationForm app.config.from_object(os.environ['APP_SETTINGS']) WUNDERGROUND_BASE_URL = app.config['WUNDERGROUND_BASE_URL'] WUNDERGROUND_API_KEY = app.config['WUNDERGROUND_API_KEY'] # base urls CONDITIONS_BASE_URL = '{}{}/conditions/q/'.format( WUNDERGROUND_BASE_URL, WUNDERGROUND_API_KEY ) ALMANAC_BASE_URL = '{}{}/almanac/q/'.format( WUNDERGROUND_BASE_URL, WUNDERGROUND_API_KEY ) def geocode_location(location): "Get lat and lon coordinates for a zip code" try: geolocator = Nominatim() location = geolocator.geocode(location) except Exception as e: print('There was a problem geocoding this address: {}'.format(e)) return location def get_current_temp(lat, lon): "Get the current temp for a given location" r = requests.get('{base}{lat},{lon}.json'.format( base=CONDITIONS_BASE_URL, lat=lat, lon=lon) ) json_string = r.json() current_temp = json_string['current_observation']['temp_f'] return int(current_temp) def get_almanac_data(lat, lon): "Get the almanac data for a given location" r = requests.get('{base}{lat},{lon}.json'.format( base=ALMANAC_BASE_URL, lat=lat, lon=lon) ) json_string = r.json() almanac_data = {} almanac_data['record_high'] = json_string['almanac']['temp_high']['record']['F'] almanac_data['record_low'] = json_string['almanac']['temp_low']['record']['F'] almanac_data['record_high_year'] = json_string['almanac']['temp_high']['recordyear'] almanac_data['record_low_year'] = json_string['almanac']['temp_low']['recordyear'] return almanac_data @app.route('/', methods=['GET', 'POST']) def home(): """ Homepage view """ form = LocationForm() if form.validate_on_submit(): temp_choice = form.temp_choice.data location = geocode_location(form.location.data) lat = location.latitude lon = location.longitude print(lat, lon) current_temp = get_current_temp(lat, lon) almanac_data = get_almanac_data(lat, lon) record_high = int(almanac_data['record_high']) record_low = int(almanac_data['record_low']) record_high_year = int(almanac_data['record_high_year']) record_low_year = int(almanac_data['record_low_year']) temp_diff_high_above = current_temp - record_high temp_diff_high_below = record_high - current_temp temp_diff_low_above = current_temp - record_low temp_diff_low_below = record_low - current_temp if temp_choice == 'hot': if current_temp >= record_high: flash( """It's never been this hot! Currently, it's {} degrees, which is {} degrees above the record of {}, set in {}.""".format( current_temp, temp_diff_high_above, record_high, record_high_year) ) else: flash( """It's been this hot before. Currently, it's {} degrees, which is {} degrees below the record of {}, set in {}.""".format( current_temp, temp_diff_high_below, record_high, record_high_year) ) else: if current_temp <= record_low: flash( """It's never been this cold before. Currently, it's {} degrees, which is {} degrees below the record of {}, set in {}.""".format( current_temp, temp_diff_low_below, record_low, record_low_year) ) else: flash( """It's been this cold before. Currently, it's {} degrees, which is {} degrees above the record of {}, set in {}.""".format( current_temp, temp_diff_low_above, record_low, record_low_year) ) return render_template( 'index.html', form=form, current_temp=current_temp, record_high=record_high, record_low=record_low ) return render_template('index.html', form=form) @app.errorhandler(404) def page_not_found(error): return render_template('404.html'), 404
4,730
chart/chart/python/docs/extract_tutorial_cmds.py
JoeyBF/sseq
7
2170584
import sys import pathlib import re r = re.compile("\n\n>>> (.|\n)*?\n\n", flags=re.MULTILINE) text = pathlib.Path(sys.argv[1]).read_text() groups = [m.group(0)[2:-2].split("\n") for m in r.finditer(text)] def join_continue_lines(lines): lines = [line for line in lines if line[:4] in (">>> ", "... ")] result = [] cur_line = "" for line in lines: if line.startswith(">>> ") and cur_line: result.append(cur_line[1:]) cur_line = "" cur_line += f"\n{line[4:]}" result.append(cur_line[1:]) return result result = [join_continue_lines(group) for group in groups] import json print(json.dumps(result))
664
galaxyZooNet/utils.py
hungjinh/galaxyZooNet
2
2170945
import yaml from easydict import EasyDict def get_config_from_yaml(file_yaml): '''Get the config from a yaml file Args: file_yaml: path to the config yaml file Return: config (EasyDict) ''' with open(file_yaml, 'r') as file_config: try: config = EasyDict(yaml.safe_load(file_config)) return config except ValueError: print("INVALID yaml file format.") exit(-1) if __name__=='__main__': config = get_config_from_yaml('../configs/resnet50_test.yaml')
574
tests/test_randommock.py
GodspeedYouBlackEmperor/pyalcs
11
2170960
from tests.randommock import RandomMock, SampleMock class TestRandomTest: def test_randommock_returns_values_in_a_given_sequence(self): f = RandomMock([0.1, 0.2, 0.3]) assert 0.1 == f() assert 0.2 == f() assert 0.3 == f() def test_samplemock_returns_list_elements_in_a_given_sequence_1(self): sample_func = SampleMock([2, 0, 1]) assert [15, 3, 14] == sample_func([3, 14, 15], 3) def test_samplemock_returns_list_elements_in_a_given_sequence_2(self): sample_func = SampleMock([2, 0, 1]) assert [15, 3, 14] == sample_func([3, 14, 15, 92, 6], 3) def test_samplemock_returns_list_elements_in_a_given_sequence_3(self): sample_func = SampleMock([1, 15, 2, 15]) assert [14, 3, 15] == sample_func([3, 14, 15], 3) def test_testsample4(self): sample_func = SampleMock([10, 2, 1, 15]) assert [3, 15, 14] == sample_func([3, 14, 15, 92, 6], 3)
954
preprocess.py
swetha-sundar/sentiment-analysis
0
2169815
import pandas as pd import numpy as np train_file = 'data/train.csv' test_file = 'data/test.csv' #Explore the training dataset #Note: For some reason, if you dont specify the encoding as latin-1, the interpreter will throw an UTF-8 encoding error data = pd.read_csv(train_file, error_bad_lines=False, encoding='latin-1') data.columns = ['id', 'sentiment', 'text'] print(data.head(2), "\n\n") #Id information is not useful. So let's remove it #axis=1 indicates columns data = data.drop(labels=['id'], axis=1) print(data.head(10), "\n\n") ''' Observations: 1. Data has a mix of alphabets, numbers and symbols 2. Mix of words with uppercase and lowercase letters 3. We need to normalize the words to their base word. Leaving capitalizaed words in the middle of the tweet can be experimented with as they may hold different feature space like name of the person, country, etc.. 4. No particular order of sentiment and tweets. If data is not randomly distributed then it can introduce bias to a learning model 5. Need to split and shuffle the data to reduce variance (makes sure the model can generalize better on the data) and does not lead to overfitting 6. Need to get an idea of the distribution of data ''' #calculate the number of positive and negative tweets positives = data['sentiment'][data.sentiment == 1] negatives = data['sentiment'][data.sentiment == 0] print('Number of postive tweets {}' .format(len(positives))) print('Number of negative tweets {}' .format(len(negatives))) print('Total Length of the data is: {}' .format(data.shape[0])) #Are there any duplicates in the data? Get the unique counts to identify this print(data.groupby('sentiment').describe())
1,688
talking_heads/hyperparams.py
BUVANEASH/Talking_Heads
10
2170905
# -*- coding: utf-8 -*- #/usr/bin/python3 import os import re from ast import literal_eval class hyperparameters(): def __init__(self): self.use_dlib = True # Dataset self.dataset = "/media/new_hdd1/VoxCeleb-2/Video/dev/mp4" self.data = "/media/new_hdd1/Face_Morp_2.0/Talking_Heads/data" self.preprocessed = os.path.join(self.data,"preprocessed") # logdir self.model = "AK" self.modeldir = "/media/new_hdd1/Face_Morp_2.0/Talking_Heads/results/model/" self.logdir = os.path.join(self.modeldir, "meta") self.fine_logdir = os.path.join(self.modeldir, self.model) # No of training videos self.train_videos = len(os.listdir(self.preprocessed)) # Network Architecture parameters # Encoder channels and self-attention channel self.enc_down_ch = [64,128,256,512] self.enc_self_att_ch = 256 # Decoder channels and self-attention channel self.dec_down_ch = [256,128,64,3] self.dec_self_att_ch = 256 # Residual Block channel self.res_blk_ch = 512 # Embedding Vector self.N_Vec = 512 # Considering input and output channel in a residual block, multiple of 2 because beta and gamma affine parameter. self.split_lens = [self.res_blk_ch]*11*2 + \ [self.res_blk_ch]*2*2 + \ [self.res_blk_ch]*2*2 + \ [self.dec_down_ch[0]]*2*2 + \ [self.dec_down_ch[1]]*2*2 + \ [self.dec_down_ch[2]]*2*2 + \ [self.dec_down_ch[3]]*2 # Activation outputs from VGGFace and VGG19 self.vggface_feat_layers = ['conv1_1','conv2_1','conv3_1','conv4_1','conv5_1'] self.vgg19_feat_layers = ['block1_conv1','block2_conv1','block3_conv1','block4_conv1','block5_conv1'] # Training hyperparameters # Image Size self.img_size = (256, 256, 3) # K-shot learning, self.K = 8#8 # batch size self.batch = 1 # Loss weights self.loss_vgg19_wt = 1e-2 self.loss_vggface_wt = 2e-3 self.loss_fm_wt = 1e1 self.loss_mch_wt = 8e1 self.learning_rate_EG = 5e-5 self.learning_rate_D = 2e-4 self.num_iterations = 10000000 # Logging self.log_step = 10 self.save_step = 1000 self.summary_step = 100 # hyperparams json and resourceconfig json self.hp_json = "/opt/ml/input/config/hyperparameters.json" self.resource_json = "/opt/ml/input/config/resourceConfig.json" def update(self,newdata): for key,value in newdata.items(): setattr(self,key,value) Hyperparams = hyperparameters() def hp_json(hp_json): '''Overrides hyperparams from hyperparameters.json''' print("READING ",Hyperparams.hp_json) with open(hp_json) as f: text = f.read() str_dict = re.sub(r"\"(-?\d+(?:[\.,]\d+)?)\"", r'\1', text) str_dict = str_dict.replace("\"True\"","True").replace("\"False\"","False") return literal_eval(str_dict) def resource_json(resource_json): '''Overrides hyperparams from resourceConfig.json''' print("READING ",Hyperparams.resource_json) with open(resource_json) as f: text = f.read() str_dict = re.sub(r"\"(-?\d+(?:[\.,]\d+)?)\"", r'\1', text) str_dict = str_dict.replace("\"True\"","True").replace("\"False\"","False") return literal_eval(str_dict) if os.path.exists(Hyperparams.hp_json): Hyperparams.update(hp_json(Hyperparams.hp_json)) else: Hyperparams.hp_json = 'hyperparameters.json' if os.path.exists(Hyperparams.hp_json): Hyperparams.update(hp_json(Hyperparams.hp_json)) if os.path.exists(Hyperparams.resource_json): Hyperparams.update(resource_json(Hyperparams.hp_json)) else: Hyperparams.resource_json = 'resourceConfig.json' if os.path.exists(Hyperparams.resource_json): Hyperparams.update(resource_json(Hyperparams.hp_json)) Hyperparams.logdir = os.path.join(Hyperparams.modeldir, "meta") Hyperparams.fine_logdir = os.path.join(Hyperparams.modeldir, Hyperparams.model)
4,411
official/projects/qat/vision/quantization/helper.py
wnorris/models
1
2168541
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quantization helpers.""" from typing import Any, Dict import tensorflow as tf import tensorflow_model_optimization as tfmot from official.projects.qat.vision.quantization import configs _QUANTIZATION_WEIGHT_NAMES = [ 'output_max', 'output_min', 'optimizer_step', 'kernel_min', 'kernel_max', 'add_three_min', 'add_three_max', 'divide_six_min', 'divide_six_max', 'depthwise_kernel_min', 'depthwise_kernel_max', 'reduce_mean_quantizer_vars_min', 'reduce_mean_quantizer_vars_max', 'quantize_layer_min', 'quantize_layer_max', 'quantize_layer_1_min', 'quantize_layer_1_max', 'quantize_layer_2_min', 'quantize_layer_2_max', 'quantize_layer_3_min', 'quantize_layer_3_max', 'post_activation_min', 'post_activation_max', ] _ORIGINAL_WEIGHT_NAME = [ 'kernel', 'depthwise_kernel', 'gamma', 'beta', 'moving_mean', 'moving_variance', 'bias' ] def is_quantization_weight_name(name: str) -> bool: simple_name = name.split('/')[-1].split(':')[0] if simple_name in _QUANTIZATION_WEIGHT_NAMES: return True if simple_name in _ORIGINAL_WEIGHT_NAME: return False raise ValueError('Variable name {} is not supported.'.format(simple_name)) def copy_original_weights(original_model: tf.keras.Model, quantized_model: tf.keras.Model): """Helper function that copy the original model weights to quantized model.""" original_weight_value = original_model.get_weights() weight_values = quantized_model.get_weights() original_idx = 0 for idx, weight in enumerate(quantized_model.weights): if not is_quantization_weight_name(weight.name): if original_idx >= len(original_weight_value): raise ValueError('Not enought original model weights.') weight_values[idx] = original_weight_value[original_idx] original_idx = original_idx + 1 if original_idx < len(original_weight_value): raise ValueError('Not enought quantized model weights.') quantized_model.set_weights(weight_values) class LayerQuantizerHelper(object): """Helper class that handles quantizers.""" def __init__(self, *args, **kwargs): self._quantizers = {} self._quantizer_vars = {} super().__init__(*args, **kwargs) def _all_value_quantizer(self): return tfmot.quantization.keras.quantizers.AllValuesQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False) def _moving_average_quantizer(self): return tfmot.quantization.keras.quantizers.MovingAverageQuantizer( num_bits=8, per_axis=False, symmetric=False, narrow_range=False) def _add_quantizer(self, name, all_value_quantizer=False): if all_value_quantizer: self._quantizers[name] = self._all_value_quantizer() else: self._quantizers[name] = self._moving_average_quantizer() def _apply_quantizer(self, name, inputs, training, **kwargs): return self._quantizers[name]( inputs, training, self._quantizer_vars[name], **kwargs) def _build_quantizer_vars(self): for name in self._quantizers: self._quantizer_vars[name] = self._quantizers[name].build( tensor_shape=None, name=name, layer=self) class NoOpActivation: """No-op activation which simply returns the incoming tensor. This activation is required to distinguish between `keras.activations.linear` which does the same thing. The main difference is that NoOpActivation should not have any quantize operation applied to it. """ def __call__(self, x: tf.Tensor) -> tf.Tensor: return x def get_config(self) -> Dict[str, Any]: """Get a config of this object.""" return {} def __eq__(self, other: Any) -> bool: if not other or not isinstance(other, NoOpActivation): return False return True def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def quantize_wrapped_layer(cls, quantize_config): def constructor(*arg, **kwargs): return tfmot.quantization.keras.QuantizeWrapperV2( cls(*arg, **kwargs), quantize_config) return constructor def norm_by_activation(activation, norm_quantized, norm_no_quantized): if activation not in ['relu', 'relu6']: return norm_quantized else: return norm_no_quantized Conv2DQuantized = quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], False)) Conv2DOutputQuantized = quantize_wrapped_layer( tf.keras.layers.Conv2D, configs.Default8BitConvQuantizeConfig(['kernel'], ['activation'], True)) DepthwiseConv2DQuantized = quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'], False)) DepthwiseConv2DOutputQuantized = quantize_wrapped_layer( tf.keras.layers.DepthwiseConv2D, configs.Default8BitConvQuantizeConfig(['depthwise_kernel'], ['activation'], True)) GlobalAveragePooling2DQuantized = quantize_wrapped_layer( tf.keras.layers.GlobalAveragePooling2D, configs.Default8BitQuantizeConfig([], [], True)) AveragePooling2DQuantized = quantize_wrapped_layer( tf.keras.layers.AveragePooling2D, configs.Default8BitQuantizeConfig([], [], True)) ResizingQuantized = quantize_wrapped_layer( tf.keras.layers.Resizing, configs.Default8BitQuantizeConfig([], [], True)) ConcatenateQuantized = quantize_wrapped_layer( tf.keras.layers.Concatenate, configs.Default8BitQuantizeConfig([], [], True)) UpSampling2DQuantized = quantize_wrapped_layer( tf.keras.layers.UpSampling2D, configs.Default8BitQuantizeConfig([], [], True)) ReshapeQuantized = quantize_wrapped_layer( tf.keras.layers.Reshape, configs.Default8BitQuantizeConfig([], [], True)) # pylint:disable=g-long-lambda BatchNormalizationQuantized = lambda norm_layer: quantize_wrapped_layer( norm_layer, configs.Default8BitOutputQuantizeConfig()) BatchNormalizationNoQuantized = lambda norm_layer: quantize_wrapped_layer( norm_layer, configs.NoOpQuantizeConfig())
6,782
astrosql/deprecated/update.py
ketozhang/astroSQL
0
2170624
""" Update Procedure: 1. Check if the entry exist by basename 2. Check if updated is needed by WCS 3. Write to database """ import os import peeweedb from config import config from pyzaphot import PhotFitsImage from sqlconnector import connect from writer import dict2sql config = config() storepath = config['store'] TABLE = config['mysql']['images_table'] def updater(data, table): db = connect() table = peeweedb.tables[table] try: wcsed = table.get(table.basename == data['basename']).WCSED if not wcsed == 'T': dict2sql(db, table, data) return None except table.DoesNotExist as e: print(e) dict2sql(db, table, data) return None def zaphot_add_one_image_to_db(image, skip=False, delete=False, update=False, table=TABLE): # check image processed or not, if yes, return print("dealing with", image) print("processing image : " + image) imagetmp = PhotFitsImage(image) processed = os.path.isfile(storepath + imagetmp.savepath + imagetmp.uniformname) or os.path.isfile( storepath + imagetmp.savepath + imagetmp.uniformname + '.gz') if processed and not update: print("this image has already been processed") if delete: print("Deleting this image!!!") command = "rm -f {0}".format(image) print(command) # os.system(command) # only do WCS if it is not WCSED if not skip and imagetmp.WCSED != 'T': print('doing wcs here ...') # currently only works for KAIT images command = "Ssolve-field-kait ".format(image) print(command) # os.system(command) imagetmp.extract_zeromagphotinfo() dbinf = imagetmp.get_databaseinfo() updater(dbinf, table) if delete: command = "mv {0} {1}".format( image, storepath + imagetmp.savepath) print(command) # os.system(command) else: command = "cp {0} {1}".format( image, storepath + imagetmp.savepath) print(command) # os.system(command) command = "gzip {0}".format( storepath + imagetmp.savepath + imagetmp.uniformname) print(command) # os.system(command) def main(args): zaphot_add_one_image_to_db(args.image, skip=args.skip, delete=args.delete, update=args.update)
2,347
tests/unit/multi_dimensional_RNN/_test_mdgru_on_2d_grid.py
X-rayLaser/multi-directional-mdrnn
12
2168866
from .test_mdrnn_on_2d_grid import Degenerate2DInputToMDRNNTests, \ OutputShapeGiven2DTests, OutputShapeGiven6DInputTests import tensorflow as tf from mdrnn import MDGRU class Degenerate2DInputToMDGRUTests(Degenerate2DInputToMDRNNTests): def create_mdrnn(self, **kwargs): return MDGRU(**kwargs) def create_keras_rnn(self, **kwargs): return tf.keras.layers.GRU(implementation=1, reset_after=False, **kwargs) class MDGRUOutputShapeGiven2DTests(OutputShapeGiven2DTests): def get_rnn_class(self): return MDGRU class MDGRUOutputShapeGiven6DInputTests(OutputShapeGiven6DInputTests): def get_rnn_class(self): return MDGRU
674