{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \n \"\"\" % datas\n tmp_html.write(html.encode(encoding))\n tmp_html.close()\n webbrowser.open_new_tab(tmp_html.name)\n"},"size":{"kind":"number","value":4083,"string":"4,083"}}},{"rowIdx":127465,"cells":{"max_stars_repo_path":{"kind":"string","value":"manage.py"},"max_stars_repo_name":{"kind":"string","value":"ProfesseurGibaud/TestSite"},"max_stars_count":{"kind":"number","value":304,"string":"304"},"id":{"kind":"string","value":"2170971"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport os\nimport sys\n\n# This is not part of the regular manage.py files, but ensure students\n# don't get blocked because they use the wrong Python version\nif sys.version_info < (3, 6):\n sys.exit(\"'Django, an app at a time' requires Python 3.6 or greater\")\n\n# This is a hack to allow \"ignore_this_directory\" to be added to the PYTHON PATH\n# and is not part of the usual manage.py file\nfrom project import settings\nsys.path.append(str(settings.BASE_DIR / 'ignore_this_directory'))\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"project.settings\")\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n"},"size":{"kind":"number","value":711,"string":"711"}}},{"rowIdx":127466,"cells":{"max_stars_repo_path":{"kind":"string","value":"skpar/dftbutils/taskdict.py"},"max_stars_repo_name":{"kind":"string","value":"by-student-2017/skpar-0.2.4_Ubuntu18.04LTS"},"max_stars_count":{"kind":"number","value":9,"string":"9"},"id":{"kind":"string","value":"2170288"},"content":{"kind":"string","value":"\"\"\"\nProvide mapping between task names available to user and actual functions.\n\"\"\"\nfrom skpar.core.utils import get_logger\nfrom skpar.dftbutils.queryDFTB import get_dftbp_data, get_bandstructure\nfrom skpar.dftbutils.queryDFTB import get_dftbp_evol\nfrom skpar.dftbutils.queryDFTB import get_effmasses, get_special_Ek\nfrom skpar.dftbutils.plot import magic_plot_bs\n\nLOGGER = get_logger(__name__)\n\nTASKDICT = {\n # obtain data from model evaluations\n 'get_data': get_dftbp_data,\n 'get_evol': get_dftbp_evol,\n 'get_bs' : get_bandstructure,\n 'get_meff': get_effmasses,\n 'get_Ek' : get_special_Ek,\n # plot data\n# this one is currently used via the wrapper of PlotTask in ../core/taskdict.py\n 'plot_bs' : magic_plot_bs,\n }\n"},"size":{"kind":"number","value":744,"string":"744"}}},{"rowIdx":127467,"cells":{"max_stars_repo_path":{"kind":"string","value":"HLTrigger/Configuration/python/HLT_75e33/sequences/HLTFastJetForEgamma_cfi.py"},"max_stars_repo_name":{"kind":"string","value":"PKUfudawei/cmssw"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170865"},"content":{"kind":"string","value":"import FWCore.ParameterSet.Config as cms\n\nfrom ..tasks.HLTFastJetForEgammaTask_cfi import *\n\nHLTFastJetForEgamma = cms.Sequence(\n HLTFastJetForEgammaTask\n)\n"},"size":{"kind":"number","value":159,"string":"159"}}},{"rowIdx":127468,"cells":{"max_stars_repo_path":{"kind":"string","value":"scripts/practice/FB/LongestArithmeticSubSequqenceWithDifference.py"},"max_stars_repo_name":{"kind":"string","value":"bhimeshchauhan/competitive_programming"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2168974"},"content":{"kind":"string","value":"\"\"\"\n\nLongest Arithmetic Subsequence\n\nGiven an array nums of integers, return the length of the longest arithmetic subsequence in nums.\n\nRecall that a subsequence of an array nums is a list nums[i1], nums[i2], ..., nums[ik] with \n0 <= i1 < i2 < ... < ik <= nums.length - 1, and that a sequence seq is arithmetic \nif seq[i+1] - seq[i] are all the same value (for 0 <= i < seq.length - 1). \n\nExample 1:\n\nInput: nums = [3,6,9,12]\nOutput: 4\nExplanation: \nThe whole array is an arithmetic sequence with steps of length = 3.\n\nExample 2:\n\nInput: nums = [9,4,7,2,10]\nOutput: 3\nExplanation: \nThe longest arithmetic subsequence is [4,7,10].\n\nExample 3:\n\nInput: nums = [20,1,15,3,10,5,8]\nOutput: 4\nExplanation: \nThe longest arithmetic subsequence is [20,15,10,5].\n\nConstraints:\n\n2 <= nums.length <= 1000\n0 <= nums[i] <= 500\n\n\"\"\"\n\nfrom typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n \"\"\"\n 6 June 2020.\n DP - Bottom up.\n Look at the solution as to how it was done. \n\n T: O(N^2). The use of the double for loops.\n S: O(N^2). The lengths of the dictionary in dp follows this order: 0, 1, 2, 3,...n. That's N^2.\n\n **The literal running time varies with LC. This same solution ran in 2.1ms and 1.1ms.**\n **The literal space time is consistent at like 150MB which is insanely high but beats 80%. **\n \"\"\"\n\n def longestArithSeqLength(self, nums: List[int]) -> int:\n # Minimum answer is always 2.\n if len(nums) < 2:\n return len(A)\n\n # The DP is a list of dictionaries.\n # dp[i] is the dictionary for item i in nums\n # Each kv pair in dp[i] is delta:lengthOfSubsequence.\n n = len(nums)\n dp = [{} for i in range(n)]\n result = 2\n\n for i in range(1, n):\n for j in range(i):\n delta = nums[i] - nums[j]\n\n # If we've seen this delta with dp[j], then increase the length of the subseq by 1.\n # This is equivalent of dp[i] 'adding on' to the subsequence.\n if delta in dp[j]:\n currentLength = dp[j].get(delta)\n dp[i][delta] = currentLength + 1\n\n # Else, start a new subsequence with just dp[i] and dp[j].\n # Length is always two.\n else:\n dp[i][delta] = 2\n\n # Update max.\n result = max(result, dp[i][delta])\n return result\n\n\nclass Solution:\n def longestArithSeqLength(self, nums: List[int]):\n \"\"\" \n - have a `sequence_cache` hashmap for each element in the array\n with the keys and values: `{sequence_difference: count/length}`\n - iterate in reverse order\n - for each `element_1`:\n - iterate through all the elements to its right, and for each `element_2`:\n - get the `sequence difference`: (`element_1-element_2`)\n - check if staring a sequence with that sequence difference will be greater than what we have seen b4 for the same sequence difference\n - update the longest var to reflect the longest we have seen so far\n \"\"\"\n longest = 0\n seq_cache = [defaultdict(lambda: 1) for num in nums]\n\n for idx_1 in reversed(range(len(nums))):\n for idx_2 in range(idx_1+1, len(nums)):\n seq_diff = nums[idx_2] - nums[idx_1]\n\n # current_seq_len = max(current_seq_len, seq_starting_at_idx_2_len+1)\n seq_cache[idx_1][seq_diff] = max(\n seq_cache[idx_1][seq_diff], seq_cache[idx_2][seq_diff]+1)\n\n longest = max(longest, seq_cache[idx_1][seq_diff])\n\n return longest\n"},"size":{"kind":"number","value":3683,"string":"3,683"}}},{"rowIdx":127469,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/training/sampling/negative_sampling.py"},"max_stars_repo_name":{"kind":"string","value":"CorentinBrtx/car-detection-opencv"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171409"},"content":{"kind":"string","value":"from typing import List, Tuple\n\nimport numpy as np\n\n\ndef generate_negative_samples(\n img: np.ndarray,\n size: Tuple[int, int] = (64, 64),\n n_samples: int = 10,\n bounding_boxes: List[List[int]] = None,\n max_tries: int = 500,\n) -> List[np.ndarray]:\n \"\"\"\n Generate negative samples from a given image by sampling random patches\n while avoiding the specified bounding boxes.\n\n Parameters\n ----------\n img : np.ndarray\n The image to generate negative samples from.\n size : Tuple[int, int], optional\n Size of the samples, by default (64, 64)\n n_samples : int, optional\n Number of samples to generate, by default 10\n bounding_boxes : List[List[int]], optional\n Bounding boxes to avoid, by default None\n max_tries : int, optional\n Maximum tries to generate samples\n (avoid infinite loop if bounding_boxes take too much space), by default 500\n\n Returns\n -------\n negative_samples : List[np.ndarray]\n List of generated negative samples.\n \"\"\"\n if bounding_boxes is None:\n bounding_boxes = []\n\n mask = np.zeros(img.shape[:2], np.uint8)\n mask_with_margin = np.zeros(img.shape[:2], np.uint8)\n\n for box in bounding_boxes:\n x, y, w, h = box\n mask[y : y + h, x : x + w] = 1\n mask_with_margin[max(0, y - size[0]) : y + h, max(0, x - size[1]) : x + w] = 1\n\n mask_with_margin[:, -size[1] :] = 1\n mask_with_margin[-size[0] :, :] = 1\n\n negative_samples = []\n tries = 0\n\n indices = np.transpose(np.nonzero(mask_with_margin == 0))\n\n while len(negative_samples) < n_samples and tries < max_tries:\n tries += 1\n\n top_left = indices[np.random.randint(0, len(indices))]\n\n if (\n mask[top_left[0] : top_left[0] + size[0], top_left[1] : top_left[1] + size[1]].sum()\n == 0\n ):\n negative_samples.append(\n img[top_left[0] : top_left[0] + size[0], top_left[1] : top_left[1] + size[1]]\n )\n\n return negative_samples\n"},"size":{"kind":"number","value":2029,"string":"2,029"}}},{"rowIdx":127470,"cells":{"max_stars_repo_path":{"kind":"string","value":"mc2pbrt/pyanvil/world.py"},"max_stars_repo_name":{"kind":"string","value":"PbrtCraft/mc2pbrt"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2171020"},"content":{"kind":"string","value":"import os\nimport math\nimport zlib\n\nimport pyanvil.nbt as nbt\nimport pyanvil.stream as stream\n\n\nclass BlockState:\n def __init__(self, name, props):\n self.name = name\n self.props = props\n\n def __str__(self):\n return 'BlockState(' + self.name + ',' + str(self.props) + ')'\n\n\nclass Block:\n AIR = None\n\n def __init__(self, state):\n self.state = state\n\n def __str__(self):\n return 'Block(' + str(self.state) + ')'\n\n def get_state(self):\n return self.state\n\n\nBlock.AIR = Block(BlockState('minecraft:air', {}))\n\n\nclass ChunkSection:\n def __init__(self, blocks, raw_section, y_index):\n self.blocks = blocks\n self.raw_section = raw_section\n self.y_index = y_index\n\n def get_block(self, block_pos):\n x = block_pos[0]\n y = block_pos[1]\n z = block_pos[2]\n\n return self.blocks[x + z * 16 + y * 16 ** 2]\n\n\nclass Chunk:\n def __init__(self, xpos, zpos, raw_nbt):\n self.xpos = xpos\n self.zpos = zpos\n self._build(raw_nbt)\n\n def _build(self, raw_nbt):\n sections = {}\n level_node = raw_nbt.get('Level')\n for section in level_node.get('Sections').children:\n if section.has('BlockStates'):\n flatstates = [c.get()\n for c in section.get('BlockStates').children]\n pack_size = int((len(flatstates) * 64) / (16**3))\n states = [\n self._read_width_from_loc(flatstates, pack_size, i) for i in range(16**3)\n ]\n palette = [\n BlockState(\n state.get('Name').get(),\n state.get('Properties').to_dict() if state.has(\n 'Properties') else {}\n ) for state in section.get('Palette').children\n ]\n blocks = [\n Block(palette[state]) for state in states\n ]\n else:\n blocks = [Block.AIR]*(16**3)\n sections[section.get('Y').get()] = ChunkSection(\n blocks, section, section.get('Y').get())\n\n self.sections = sections\n self.biome_table = [b.get() for b in level_node.get('Biomes').children]\n\n def _read_width_from_loc(self, long_list, width, possition):\n offset = possition * width\n # if this is split across two nums\n if (offset % 64) + width > 64:\n # Find the lengths on each side of the split\n side1len = 64 - ((offset) % 64)\n side2len = ((offset + width) % 64)\n # Select the sections we want from each\n side1 = self._read_bits(\n long_list[int(offset/64)], side1len, offset % 64)\n side2 = self._read_bits(\n long_list[int((offset + width)/64)], side2len, 0)\n # Join them\n comp = (side2 << side1len) + side1\n return comp\n else:\n comp = self._read_bits(\n long_list[int(offset/64)], width, offset % 64)\n return comp\n\n def _read_bits(self, num, width, start):\n # create a mask of size 'width' of 1 bits\n mask = (2 ** width) - 1\n # shift it out to where we need for the mask\n mask = mask << start\n # select the bits we need\n comp = num & mask\n # move them back to where they should be\n comp = comp >> start\n\n return comp\n\n def get_block(self, block_pos):\n return self.get_section(block_pos[1]).get_block([n % 16 for n in block_pos])\n\n def get_biome(self, block_pos):\n z = block_pos[2] % 16\n x = block_pos[0] % 16\n return self.biome_table[z*16 + x]\n\n def get_section(self, y):\n key = int(y/16)\n if key not in self.sections:\n self.sections[key] = ChunkSection(\n [Block.AIR]*4096,\n nbt.CompoundTag('None'),\n key\n )\n return self.sections[key]\n\n def __str__(self):\n return \"Chunk(\" + str(self.xpos) + \",\" + str(self.zpos) + \")\"\n\n\nclass World:\n def __init__(self, file_name, save_location=''):\n self.file_name = file_name\n self.save_location = save_location\n self.chunks = {}\n\n def get_block(self, block_pos):\n chunk_pos = self._get_chunk(block_pos)\n chunk = self.get_chunk(chunk_pos)\n return chunk.get_block(block_pos)\n\n def get_biome(self, block_pos):\n chunk_pos = self._get_chunk(block_pos)\n chunk = self.get_chunk(chunk_pos)\n return chunk.get_biome(block_pos)\n\n def get_chunk(self, chunk_pos):\n if chunk_pos not in self.chunks:\n self._load_chunk(chunk_pos)\n\n return self.chunks[chunk_pos]\n\n def _load_chunk(self, chunk_pos):\n chunk_location = os.path.join(\n self.save_location, self.file_name, \"region\", self._get_region_file(chunk_pos))\n with open(chunk_location, mode='rb') as region:\n locations = [[\n int.from_bytes(region.read(3), byteorder='big',\n signed=False) * 4096,\n int.from_bytes(region.read(1), byteorder='big',\n signed=False) * 4096\n ] for i in range(1024)]\n\n timestamps = region.read(4096)\n\n chunk = self._load_binary_chunk_at(\n region, locations[((chunk_pos[0] % 32) + (chunk_pos[1] % 32) * 32)][0])\n self.chunks[chunk_pos] = chunk\n\n def _load_binary_chunk_at(self, region_file, offset):\n region_file.seek(offset)\n datalen = int.from_bytes(region_file.read(\n 4), byteorder='big', signed=False)\n compr = region_file.read(1)\n decompressed = zlib.decompress(region_file.read(datalen))\n data = nbt.parse_nbt(stream.InputStream(decompressed))\n chunk_pos = (data.get('Level').get('xPos').get(),\n data.get('Level').get('zPos').get())\n chunk = Chunk(\n chunk_pos[0],\n chunk_pos[1],\n data\n )\n return chunk\n\n def _get_region_file(self, chunk_pos):\n return 'r.' + '.'.join([str(x) for x in self._get_region(chunk_pos)]) + '.mca'\n\n def _get_chunk(self, block_pos):\n return (math.floor(block_pos[0] / 16), math.floor(block_pos[2] / 16))\n\n def _get_region(self, chunk_pos):\n return (math.floor(chunk_pos[0] / 32), math.floor(chunk_pos[1] / 32))\n"},"size":{"kind":"number","value":6474,"string":"6,474"}}},{"rowIdx":127471,"cells":{"max_stars_repo_path":{"kind":"string","value":"students/k3342/laboratory_works/Salnikova_Nadezhda/laboratory_work_1/flights/forms.py"},"max_stars_repo_name":{"kind":"string","value":"TonikX/ITMO_ICT_-WebProgramming_2020"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"id":{"kind":"string","value":"2171105"},"content":{"kind":"string","value":"from django import forms\nfrom django.forms import ModelForm, Textarea\nfrom django.contrib.auth.models import User\nfrom flights.models import Client, Comment\n\n\nclass RegisterUserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('username', 'password')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs['class'] = 'form-control'\n\n def save(self, commit=True):\n user = super().save(commit=False)\n user.set_password(self.cleaned_data[\"password\"])\n if commit:\n user.save()\n return user\n\n\nclass ClientRegister(forms.ModelForm):\n class Meta:\n model = Client\n fields = ['first_name', 'last_name', 'date_of_birth', 'bonus_card']\n\n\nclass AddComment(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ['flight', 'comment_type', 'text']\n\n labels = {\n 'flight': ('Chose a flight to leave a comment'),\n 'type_of_comment': ('Choose the comment type'),\n 'text': ('Type your comment'),\n }\n\n widgets = {\n \"text\": Textarea(attrs={'cols': 70, 'rows': 10}),\n }\n"},"size":{"kind":"number","value":1203,"string":"1,203"}}},{"rowIdx":127472,"cells":{"max_stars_repo_path":{"kind":"string","value":"MyDetector/generatefakesubmission.py"},"max_stars_repo_name":{"kind":"string","value":"lkk688/WaymoObjectDetection"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2170969"},"content":{"kind":"string","value":"import tensorflow.compat.v1 as tf\nfrom pathlib import Path\nimport json\nimport argparse\nimport tqdm\nimport uuid\n\nfrom waymo_open_dataset import dataset_pb2\nfrom waymo_open_dataset import label_pb2\nfrom waymo_open_dataset.protos import metrics_pb2\nfrom waymo_open_dataset.protos import submission_pb2\n\ndef _fancy_deep_learning(frame):\n \"\"\"Creates a prediction objects file.\"\"\"\n o_list = []\n\n for camera_labels in frame.camera_labels:\n if camera_labels.name != 1: #Only use front camera\n continue\n for gt_label in camera_labels.labels:\n o = metrics_pb2.Object()\n # The following 3 fields are used to uniquely identify a frame a prediction\n # is predicted at.\n o.context_name = frame.context.name\n # The frame timestamp for the prediction. See Frame::timestamp_micros in\n # dataset.proto.\n o.frame_timestamp_micros = frame.timestamp_micros\n # This is only needed for 2D detection or tracking tasks.\n # Set it to the camera name the prediction is for.\n o.camera_name = camera_labels.name\n\n # Populating box and score.\n box = label_pb2.Label.Box()\n box.center_x = gt_label.box.center_x\n box.center_y = gt_label.box.center_y\n box.length = gt_label.box.length\n box.width = gt_label.box.width\n o.object.box.CopyFrom(box)\n # This must be within [0.0, 1.0]. It is better to filter those boxes with\n # small scores to speed up metrics computation.\n o.score = 0.9\n # Use correct type.\n o.object.type = gt_label.type\n o_list.append(o)\n\n return o_list\n\nfrom glob import glob\nimport os\nif __name__ == \"__main__\":\n PATH='/data/cmpe295-liu/Waymo'\n #validation_folders = [\"validation_0000\"]\n validation_folders = [\"validation_0000\",\"validation_0001\",\"validation_0002\",\"validation_0003\",\"validation_0004\",\"validation_0005\",\"validation_0006\",\"validation_0007\"] #[\"validation_0007\",\"validation_0006\",\"validation_0005\",\"validation_0004\",\"validation_0003\",\"validation_0002\",\"validation_0001\",\"validation_0000\"]\n data_files = [path for x in validation_folders for path in glob(os.path.join(PATH, x, \"*.tfrecord\"))]\n print(data_files)#all TFRecord file list\n print(len(data_files))\n #dataset = tf.data.TFRecordDataset([str(x.absolute()) for x in Path(data_files)])\n dataset = [tf.data.TFRecordDataset(FILENAME, compression_type='') for FILENAME in data_files]#create a list of dataset for each TFRecord file\n print(\"Dataset type:\",type(dataset))\n frames = [] #store all frames = total number of TFrecord files * 40 frame(each TFrecord)\n objects = metrics_pb2.Objects()\n for i, data_file in enumerate(dataset):\n print(\"Datafile: \",i)#Each TFrecord file\n for idx, data in enumerate(data_file): #Create frame based on Waymo API, 199 frames per TFrecord (20s, 10Hz)\n# if idx % 5 != 0: #Downsample every 5 images, reduce to 2Hz, total around 40 frames\n# continue\n frame = dataset_pb2.Frame()\n frame.ParseFromString(bytearray(data.numpy()))\n o_list = _fancy_deep_learning(frame)\n frames.append(frame)\n for o in o_list:\n objects.objects.append(o)\n\n #https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/protos/submission.proto\n submission = submission_pb2.Submission()\n submission.task = submission_pb2.Submission.DETECTION_2D \n submission.account_name = ''\n submission.authors.append('')\n submission.affiliation = 'None'\n submission.unique_method_name = 'fake'\n submission.description = 'none'\n submission.method_link = \"empty method\"\n submission.sensor_type = submission_pb2.Submission.CAMERA_ALL\n submission.number_past_frames_exclude_current = 0\n submission.number_future_frames_exclude_current = 0\n submission.inference_results.CopyFrom(objects)\n submission.docker_image_source = '' #// Link to the latency submission Docker image stored in Google Storage bucket\n #object_types // Object types this submission contains. By default, we assume all types.\n #latency_second Self-reported end to end inference latency in seconds\n \n outputfilepath='/home/010796032/MyRepo/submissionoutput/fake_valfrontcameraall.bin'\n f = open(outputfilepath, 'wb')\n f.write(submission.SerializeToString())\n f.close()"},"size":{"kind":"number","value":4516,"string":"4,516"}}},{"rowIdx":127473,"cells":{"max_stars_repo_path":{"kind":"string","value":"local_test/bus_schedule/app/post/post.py"},"max_stars_repo_name":{"kind":"string","value":"NYUSHer/Widgets"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170725"},"content":{"kind":"string","value":"from app.post import post\nfrom flask import jsonify\nfrom util.util import query_fetch, query_mod, PostList, query_dict_fetch, ErrorResponse\nfrom instance.config import VERBOSE, DB\nfrom util.util import token_required, replace\nfrom flask import request\n\n\n###########################################\n# #\n# Authorized Code #\n# #\n###########################################\n\n@post.route('/list', methods=['POST'])\n@token_required\ndef get_list():\n offset = int(request.form.get('offset'))\n size = int(request.form.get('size'))\n # offset = (int(temp)+1)*int(size)\n sql = \"SELECT pid, title, content, authorid, user_avatar, user_name FROM \" \\\n \"posts INNER JOIN users ON users.user_id = posts.authorid ORDER BY priority DESC, pid DESC LIMIT {} OFFSET {}\".format(size, offset)\n if VERBOSE:\n print('get list query:' + sql)\n indicator = query_dict_fetch(sql, DB)\n if indicator:\n response = PostList()\n response.data['offset'] = offset\n response.data['size'] = size\n response.data['count'] = str(len(indicator))\n response.data['postlist'] = indicator\n else:\n response = ErrorResponse()\n response.error['errorCode'] = '105'\n response.error['errorMsg'] = 'No post found.'\n return jsonify(response.__dict__)\n\n\n@post.route('/submit', methods=['POST'])\n@token_required\ndef post_submit():\n post_title = replace(request.form.get('title'))\n post_category = replace(request.form.get('category'))\n post_tags = replace(request.form.get('tags'))\n post_content = replace(request.form.get('content'))\n post_by = request.headers.get('userid')\n if VERBOSE:\n print(post_title, post_category, post_tags, post_content, post_by)\n\n # No empty title\n if post_title == \"\":\n response = ErrorResponse()\n response.error['errorCode'] = '108'\n response.error['errorMsg'] = 'title cannot be empty'\n return jsonify(response.__dict__)\n\n # No empty content\n elif post_content == \"\":\n response = ErrorResponse()\n response.error['errorCode'] = '108'\n response.error['errorMsg'] = 'content cannot be empty'\n return jsonify(response.__dict__)\n\n # Modify Existing Post\n elif request.form.get('pid') is not None and request.form.get('pid').isdigit():\n post_id = request.form.get('pid')\n # Check if user_id and post_by matches\n sql = \"SELECT authorid FROM posts WHERE pid = '{}'\".format(post_id)\n if VERBOSE:\n print(sql)\n indicator = query_fetch(sql, DB)\n user_id = request.headers.get('userid')\n response = PostList()\n if indicator['authorid'] == int(user_id):\n sql = \"UPDATE posts SET title='{}', category='{}', tags='{}', content='{}', timestamp = (CURRENT_TIMESTAMP) WHERE pid='{}'\"\\\n .format(post_title, post_category, post_tags, post_content, post_id)\n if VERBOSE:\n print(sql)\n query_mod(sql, DB)\n response.data['pid'] = post_id\n # New Post\n elif request.form.get('pid') is None:\n sql = \"INSERT INTO posts(title, content, tags, category, authorid) VALUES ('{}', '{}', '{}', '{}', '{}')\" \\\n .format(post_title, post_content, post_tags, post_category, post_by)\n\n if VERBOSE:\n print(\"insert query:\" + sql)\n query_mod(sql, DB)\n\n # Get the generated post_id\n sql = \"SELECT pid FROM posts WHERE category = '{}' AND content = '{}' AND authorid = '{}'\" \\\n .format(post_category, post_content, post_by)\n if VERBOSE:\n print(\"get post_id query:\" + sql)\n indicator = query_fetch(sql, DB)\n response = PostList()\n if indicator:\n response.data['pid'] = indicator['pid']\n else:\n response = ErrorResponse()\n response.error['errorCode'] = '106'\n response.error['errorMsg'] = 'How did you wind up here??'\n return jsonify(response.__dict__)\n\n\n@post.route('/get', methods=['POST'])\n@token_required\ndef post_get():\n post_id = request.form.get('pid')\n sql = \"SELECT title, category, tags, content FROM posts WHERE pid = '{}'\".format(post_id)\n if VERBOSE:\n print(\"post get query:\" + sql)\n indicator = query_fetch(sql, DB)\n response = PostList()\n if indicator:\n response.data['pid'] = post_id\n response.data['title'] = indicator['title']\n response.data['category'] = indicator['category']\n \"\"\"\n NOTE: Tags must be deserialized first.\n Split with comma\n e.g. post_tags = 'dog, 2017, happy, weekend'\n \"\"\"\n response.data['tags'] = indicator['tags']\n response.data['content'] = indicator['content']\n else:\n response = ErrorResponse()\n response.error['errorCode'] = '105'\n response.error['errorMsg'] = 'Post does not exist'\n return jsonify(response.__dict__)\n\n\n@post.route('/delete', methods=['POST'])\n@token_required\ndef post_delete():\n post_by = request.headers.get('userid')\n post_id = request.form.get('pid')\n # Check if requested post exists\n sql = \"SELECT * FROM posts WHERE pid='{}'\".format(post_id)\n if VERBOSE:\n print(\"delete post pid check\" + sql)\n check = query_fetch(sql, DB)\n if check is None:\n response = ErrorResponse()\n response.error['errorCode'] = '105'\n response.error['errorMsg'] = 'post does not exist'\n return jsonify(response.__dict__)\n # Check if user have authorization to delete\n sql = \"SELECT authorid FROM posts WHERE pid='{}'\".format(post_id)\n if VERBOSE:\n print(\"delete post authorization check\" + sql)\n indicator = query_fetch(sql, DB)\n # Authorid and userid matchs and have authority to delete post\n if indicator['authorid'] == int(post_by):\n # Delete the post\n sql = \"DELETE FROM posts WHERE authorid = '{}' AND pid = '{}'\"\\\n .format(post_by, post_id)\n if VERBOSE:\n print(\"delete post\" + sql)\n query_mod(sql, DB)\n response = PostList()\n response.data['pid'] = post_id\n # No authority to delete post\n else:\n response = ErrorResponse()\n response.error['errorCode'] = '104'\n response.error['errorMsg'] = 'No authority.'\n return jsonify(response.__dict__)\n"},"size":{"kind":"number","value":6408,"string":"6,408"}}},{"rowIdx":127474,"cells":{"max_stars_repo_path":{"kind":"string","value":"package/utils/file.py"},"max_stars_repo_name":{"kind":"string","value":"MikeCun/PersonReID"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170065"},"content":{"kind":"string","value":"import os\nimport os.path as osp\nimport shutil\nimport json\nimport numpy as np\nimport glob\nimport sys\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelse:\n import pickle\n\n\ndef may_make_dir(path):\n \"\"\"\n Args:\n path: a dir, e.g. result of `osp.dirname()`\n Note:\n `osp.exists('')` returns `False`, while `osp.exists('.')` returns `True`!\n \"\"\"\n # This clause has mistakes:\n # if path is None or '':\n\n if path in [None, '']:\n return\n if not osp.exists(path):\n os.makedirs(path)\n\n\ndef load_pickle(path, verbose=True):\n \"\"\"Check and load pickle object.\n According to this post: https://stackoverflow.com/a/41733927, cPickle and\n disabling garbage collector helps with loading speed.\"\"\"\n assert osp.exists(path), \"File not exists: {}\".format(path)\n # gc.disable()\n with open(path, 'rb') as f:\n ret = pickle.load(f)\n # gc.enable()\n if verbose:\n print('Loaded pickle file {}'.format(path))\n return ret\n\n\ndef save_pickle(obj, path, verbose=True):\n \"\"\"Create dir and save file.\"\"\"\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'wb') as f:\n pickle.dump(obj, f, protocol=2)\n if verbose:\n print('Pickle file saved to {}'.format(path))\n\n\ndef load_json(path):\n \"\"\"Check and load json file.\"\"\"\n assert osp.exists(path), \"Json file not exists: {}\".format(path)\n with open(path, 'r') as f:\n ret = json.load(f)\n print('Loaded json file {}'.format(path))\n return ret\n\n\ndef save_json(obj, path):\n \"\"\"Create dir and save file.\"\"\"\n may_make_dir(osp.dirname(osp.abspath(path)))\n with open(path, 'w') as f:\n json.dump(obj, f)\n print('Json file saved to {}'.format(path))\n\n\ndef read_lines(file):\n with open(file) as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines if l.strip()]\n return lines\n\n\ndef copy_to(p1, p2):\n # Only when the copy can go on without error do we create destination dir.\n if osp.exists(p1):\n may_make_dir(osp.dirname(p2))\n shutil.copy(p1, p2)\n\n\ndef get_files_by_pattern(root, pattern='a/b/*.ext', strip_root=False):\n \"\"\"Optionally to only return matched sub paths.\"\"\"\n ret = glob.glob(osp.join(root, pattern))\n if strip_root:\n ret = [r[len(root) + 1:] for r in ret]\n return ret\n\n\ndef walkdir(folder, exts=None, sub_path=False, abs_path=False):\n \"\"\"Walk through each files in a directory.\n Reference: https://github.com/tqdm/tqdm/wiki/How-to-make-a-great-Progress-Bar\n Args:\n exts: file extensions, e.g. '.jpg', or ['.jpg'] or ['.jpg', '.png']\n sub_path: whether to exclude `folder` in the resulting paths, remaining sub paths\n abs_path: whether to return absolute paths\n \"\"\"\n if isinstance(exts, str):\n exts = [exts]\n for dirpath, dirs, files in os.walk(folder):\n for filename in files:\n if (exts is None) or (os.path.splitext(filename)[1] in exts):\n path = os.path.join(dirpath, filename)\n if sub_path:\n path = path[len(folder) + 1:]\n elif abs_path:\n path = os.path.abspath(path)\n yield path\n\n\ndef strip_root(path):\n \"\"\"a/b/c -> b/c\"\"\"\n sep = os.sep\n path = sep.join(path.split(sep)[1:])\n return path\n"},"size":{"kind":"number","value":3325,"string":"3,325"}}},{"rowIdx":127475,"cells":{"max_stars_repo_path":{"kind":"string","value":"habanero/request_class.py"},"max_stars_repo_name":{"kind":"string","value":"Maocx/habanero"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170401"},"content":{"kind":"string","value":"import time\n\nimport requests\nimport json\nimport re\n\nfrom .filterhandler import filter_handler\nfrom .habanero_utils import switch_classes,check_json,is_json,parse_json_err,make_ua,filter_dict,rename_query_filters\nfrom .exceptions import *\n\nclass Request(object):\n '''\n Habanero: request class\n\n This is the request class for all requests\n '''\n def __init__(self, mailto, url, path, query = None, filter = None,\n offset = None, limit = None, sample = None, sort = None,\n order = None, facet = None, select = None, cursor = None,\n cursor_max = None, agency = False, **kwargs):\n self.mailto = mailto\n self.url = url\n self.path = path\n self.query = query\n self.filter = filter\n self.offset = offset\n self.limit = limit\n self.sample = sample\n self.sort = sort\n self.order = order\n self.facet = facet\n self.select = select\n self.cursor = cursor\n self.cursor_max = cursor_max\n self.agency = agency\n self.kwargs = kwargs\n\n def _url(self):\n tmpurl = self.url + self.path\n return tmpurl.strip(\"/\")\n\n def do_request(self):\n filt = filter_handler(self.filter)\n if self.select.__class__ is list:\n self.select = ','.join(self.select)\n\n if not isinstance(self.cursor_max, (type(None), int)):\n raise ValueError(\"cursor_max must be of class int\")\n\n payload = {'query':self.query, 'filter':filt, 'offset':self.offset,\n 'rows':self.limit, 'sample':self.sample, 'sort':self.sort,\n 'order':self.order, 'facet':self.facet, 'select':self.select,\n 'cursor':self.cursor}\n payload = dict((k, v) for k, v in payload.items() if v)\n # add query filters\n payload.update(filter_dict(self.kwargs))\n # rename query filters\n payload = rename_query_filters(payload)\n start_time = time.time()\n js = self._req(payload = payload)\n print(\"First request in \", time.time() - start_time)\n cu = js['message'].get('next-cursor')\n max_avail = js['message']['total-results']\n res = self._redo_req(js, payload, cu, max_avail)\n return res\n\n def _redo_req(self, js, payload, cu, max_avail):\n print(\"cu\", cu)\n print(\"payload\",payload)\n print(\"max_avail\", max_avail)\n print(self.cursor_max)\n if(cu.__class__.__name__ != 'NoneType' and self.cursor_max > len(js['message']['items'])):\n res = [js]\n total = len(js['message']['items'])\n while(cu.__class__.__name__ != 'NoneType' and self.cursor_max > total and total < max_avail):\n payload['cursor'] = cu\n start_time = time.time()\n out = self._req(payload = payload)\n print(\"Internal request in \", time.time() - start_time)\n cu = out['message'].get('next-cursor')\n res.append(out)\n total = sum([ len(z['message']['items']) for z in res ])\n # This code is not built for resuming with a cursor!\n if(len(out[\"message\"]['items'])<1):\n break\n return res\n else:\n return js\n\n def _req(self, payload):\n try:\n r = requests.get(self._url(), params = payload, headers = make_ua(self.mailto))\n r.raise_for_status()\n except requests.exceptions.HTTPError:\n try:\n f = r.json()\n raise RequestError(r.status_code, f['message'][0]['message'])\n except:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(e)\n check_json(r)\n return r.json()\n"},"size":{"kind":"number","value":3403,"string":"3,403"}}},{"rowIdx":127476,"cells":{"max_stars_repo_path":{"kind":"string","value":"share/pegasus/init/mpi-hw/daxgen.py"},"max_stars_repo_name":{"kind":"string","value":"fengggli/pegasus"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171271"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom Pegasus.DAX3 import *\nimport sys\nimport pwd\nimport os\nimport time\nfrom Pegasus.DAX3 import *\n\n# The name of the DAX file is the first argument\nif len(sys.argv) != 2:\n sys.stderr.write(\"Usage: %s DAXFILE\\n\" % (sys.argv[0]))\n sys.exit(1)\ndaxfile = sys.argv[1]\n\nUSER = pwd.getpwuid(os.getuid())[0]\n\n# Create a abstract dag\ndax = ADAG(\"mpi-hello-world\")\n\n# Add some workflow-level metadata\ndax.metadata(\"creator\", \"%s@%s\" % (USER, os.uname()[1]))\ndax.metadata(\"created\", time.ctime())\n\n# Add input file to the DAX-level replica catalog\nfin = File(\"f.in\")\n\n# optional if you want to put the file locations in the DAX\n# for tutorial we are picking up from --input-dir option to\n# pegasus-plan\n# fin.addPFN(PFN(\"file://\" + os.getcwd() + \"/input/f.in\", \"bluewaters\"))\n# dax.addFile(fin)\n\n\n# Add the mpi hello world job\nmpi_hw_job = Job(namespace=\"pegasus\", name=\"mpihw\" )\nfout = File(\"f.out\")\nmpi_hw_job.addArguments(\"-i \", fin )\nmpi_hw_job.addArguments(\"-o \", fout )\nmpi_hw_job.uses(fin, link=Link.INPUT)\nmpi_hw_job.uses(fout, link=Link.OUTPUT)\n\n# tell pegasus it is an MPI job\nmpi_hw_job.addProfile( Profile( \"globus\", \"jobtype\", \"mpi\"))\n\n# add profiles indicating PBS specific parameters for BLUEWATERS\n\n# pegasus.cores\nmpi_hw_job.addProfile( Profile(\"pegasus\", \"cores\", \"32\" ))\n# pegasus.nodes\nmpi_hw_job.addProfile( Profile(\"pegasus\", \"nodes\", \"2\" ))\n# pegasus.ppn\nmpi_hw_job.addProfile( Profile(\"pegasus\", \"ppn\", \"16\" ))\n\n# pegasus.runtime is walltime in seconds.\nmpi_hw_job.addProfile( Profile(\"pegasus\", \"runtime\", \"300\"))\ndax.addJob(mpi_hw_job)\n\n# Write the DAX to stdout\n#dax.writeXML(sys.stdout)\n\nf = open(daxfile, \"w\")\ndax.writeXML(f)\nf.close()\n"},"size":{"kind":"number","value":1691,"string":"1,691"}}},{"rowIdx":127477,"cells":{"max_stars_repo_path":{"kind":"string","value":"CodingBat/Python/Logic-2/lone_sum.py"},"max_stars_repo_name":{"kind":"string","value":"unobatbayar/codingbat"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171079"},"content":{"kind":"string","value":"\"\"\"\n\n\nGiven 3 int values, a b c, return their sum. However, if one of the values is the same as another of the values, it does not count towards the sum.\n\n\nlone_sum(1, 2, 3) → 6\nlone_sum(3, 2, 3) → 2\nlone_sum(3, 3, 3) → 0\n\n@author unobatbayar\n\n\"\"\"\ndef lone_sum(a, b, c):\n sum = 0\n if a != b and a != c: sum += a\n if b != a and b != c: sum += b\n if c != a and c != b: sum += c\n\n return sum"},"size":{"kind":"number","value":393,"string":"393"}}},{"rowIdx":127478,"cells":{"max_stars_repo_path":{"kind":"string","value":"python/DeepSeaVectorDraw/VectorCommandUnion.py"},"max_stars_repo_name":{"kind":"string","value":"akb825/DeepSea"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2170650"},"content":{"kind":"string","value":"# automatically generated by the FlatBuffers compiler, do not modify\n\n# namespace: DeepSeaVectorDraw\n\nclass VectorCommandUnion(object):\n NONE = 0\n StartPathCommand = 1\n MoveCommand = 2\n LineCommand = 3\n BezierCommand = 4\n QuadraticCommand = 5\n ArcCommand = 6\n ClosePathCommand = 7\n EllipseCommand = 8\n RectangleCommand = 9\n StrokePathCommand = 10\n FillPathCommand = 11\n TextCommand = 12\n TextRangeCommand = 13\n ImageCommand = 14\n\n"},"size":{"kind":"number","value":475,"string":"475"}}},{"rowIdx":127479,"cells":{"max_stars_repo_path":{"kind":"string","value":"clustering/clustering_funcs.py"},"max_stars_repo_name":{"kind":"string","value":"EmreHakguder/SL_VSMs"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171224"},"content":{"kind":"string","value":"import pandas as pd, os, json\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.metrics import silhouette_score\nfrom scipy import cluster\n\ndef cluster_glove(someSemPath, height):\n X = pd.read_csv(someSemPath).set_index(\"video\")\n Z = cluster.hierarchy.ward(X)\n cutree = cluster.hierarchy.cut_tree(Z, height=height)\n clusterLabels = [int(x[0]) for x in cutree]\n clusters_len = len(list(set(clusterLabels)))\n \n if 1 < clusters_len < len(X.index):\n return X.index, silhouette_score(X, cutree.ravel(), metric='euclidean'), clusterLabels, clusters_len\n else:\n return False, False, False, False\n \ndef merge_cluster_data(outPath):\n languages = [\"ASL\", \"BSL\"]\n dims = [\"50d\", \"100d\", \"200d\", \"300d\"]\n masterPath = \"results/clustering/signPairs_byCluster/\"\n \n if not os.path.exists(outPath):\n os.makedirs(outPath)\n\n for language in languages:\n for dim in dims:\n list_height_path = masterPath+language+\"/\"+dim+\"/\"\n heights = [x.split(\"_\")[3] for x in os.listdir(list_height_path) if not x.startswith(\".\")]\n\n for heit in heights:\n clustered_df = pd.DataFrame(columns=[\"language\", \"dim\", \"height\", \"clusterID\", \"signPair\",\n \"semSim\", \"HS_sim\", \"LOC_sim\", \"MOV_sim\", \"ENTIRE_sim\"])\n \n path = list_height_path+language+\"_\"+dim+\"_heightheight_\"+str(heit)+\"_signPairs_byCluster.json\"\n print(path)\n\n with open(path, \"r\") as read_file:\n js_file = json.load(read_file)\n\n for clusterID in js_file: \n for pair in js_file[clusterID]: \n signPair = pair[0] + \" + \" + pair[1]\n clustered_df = clustered_df.append({\"language\":language,\n \"dim\":dim, \n \"height\":heit, \n \"clusterID\":clusterID,\n \"signPair\":signPair,\n \"semSim\":None,\n \"HS_sim\":None,\n \"LOC_sim\":None,\n \"MOV_sim\":None,\n \"ENTIRE_sim\":None}, ignore_index=True)\n\n clustered_df.to_csv(outPath+str(language)+\"_\"+str(dim)+\"_\"+str(heit)+\".csv.gz\", compression=\"gzip\", index=False)\n \n return clustered_df\n \n\n \n "},"size":{"kind":"number","value":2814,"string":"2,814"}}},{"rowIdx":127480,"cells":{"max_stars_repo_path":{"kind":"string","value":"gratipay/models/team/closing.py"},"max_stars_repo_name":{"kind":"string","value":"kant/gratipay.com"},"max_stars_count":{"kind":"number","value":517,"string":"517"},"id":{"kind":"string","value":"2171378"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nclass Closing(object):\n \"\"\"This mixin implements team closing.\n \"\"\"\n\n #: Whether the team is closed or not.\n\n is_closed = False\n\n\n def close(self):\n \"\"\"Close the team account.\n \"\"\"\n with self.db.get_cursor() as cursor:\n cursor.run(\"UPDATE teams SET is_closed=true WHERE id=%s\", (self.id,))\n self.app.add_event( cursor\n , 'team'\n , dict(id=self.id, action='set', values=dict(is_closed=True))\n )\n self.set_attributes(is_closed=True)\n if self.package:\n self.package.unlink_team(cursor)\n"},"size":{"kind":"number","value":775,"string":"775"}}},{"rowIdx":127481,"cells":{"max_stars_repo_path":{"kind":"string","value":"audcon/models.py"},"max_stars_repo_name":{"kind":"string","value":"vikc07/audcon"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171465"},"content":{"kind":"string","value":"from datetime import datetime\nimport pytz\nfrom audcon import db\nfrom gpm import formatting\nfrom audcon import app\n\n\nclass DefaultColumns(object):\n id = db.Column(db.Integer, primary_key=True)\n created_date = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)\n modified_date = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)\n isdeleted = db.Column(db.Integer, default=False)\n\n def created_date_local_tz(self):\n utc = pytz.timezone('UTC')\n local_tz = pytz.timezone(app.config['UI']['TZ'])\n created_date_utc = utc.localize(self.created_date)\n created_date_local_tz = created_date_utc.astimezone(local_tz)\n\n return created_date_local_tz.replace(tzinfo=None)\n\n def modified_date_local_tz(self):\n utc = pytz.timezone('UTC')\n local_tz = pytz.timezone(app.config['UI']['TZ'])\n modified_date_utc = utc.localize(self.modified_date)\n modified_date_local_tz = modified_date_utc.astimezone(local_tz)\n\n return modified_date_local_tz.replace(tzinfo=None)\n\n def duration(self):\n return (self.modified_date - self.created_date).total_seconds()\n\n def duration_formatted(self):\n return formatting.time_pretty((self.modified_date - self.created_date).total_seconds())\n\n\nclass Media(DefaultColumns, db.Model):\n media_file_path = db.Column(db.String(255), nullable=False)\n media_title = db.Column(db.String(255), nullable=False)\n media_fsize = db.Column(db.BigInteger, nullable=False)\n media_format = db.Column(db.String(255), nullable=False)\n media_streams_count = db.Column(db.SmallInteger, nullable=False)\n media_a_streams_count = db.Column(db.SmallInteger, nullable=False)\n media_v_streams_count = db.Column(db.SmallInteger, nullable=False)\n media_s_streams_count = db.Column(db.SmallInteger, nullable=False)\n media_o_streams_count = db.Column(db.SmallInteger, nullable=False)\n media_a_codec = db.Column(db.String(255), nullable=False)\n media_a_sample_fmt = db.Column(db.String(255), nullable=False)\n media_a_sample_rate = db.Column(db.String(255), nullable=False)\n media_a_channels = db.Column(db.String(255), nullable=False)\n media_a_channel_layout = db.Column(db.String(255), nullable=False)\n media_a_bitrate = db.Column(db.String(255), nullable=False)\n media_full_meta = db.Column(db.JSON)\n\n def __repr__(self):\n return self.media_file_path\n\n def fsize_pretty(self):\n return formatting.fsize_pretty(self.media_fsize)\n\n def last_updated(self, formatted=False):\n last_updated = (datetime.utcnow() - self.modified_date).total_seconds()\n if last_updated < 0:\n last_updated = 0\n\n if formatted:\n return formatting.time_pretty(last_updated)\n return last_updated\n\n\nclass RunLog(DefaultColumns, db.Model):\n service = db.Column(db.String(255), nullable=False)\n status = db.Column(db.String(255))\n params = db.Column(db.JSON)\n\n def last_ran(self):\n return (datetime.utcnow() - self.modified_date).total_seconds()\n\n\nclass Queue(DefaultColumns, db.Model):\n media_file_path = db.Column(db.String(255), nullable=False)\n media_output_file_path = db.Column(db.String(255), nullable=False)\n media_output_ffmpeg_params = db.Column(db.String(255), nullable=False)\n media_converted = db.Column(db.Boolean, nullable=False, default=False)\n\n def __repr__(self):\n return self.id\n\n def conversion_status(self):\n if self.media_converted:\n return 'Complete'\n else:\n return 'Pending'\n\n"},"size":{"kind":"number","value":3606,"string":"3,606"}}},{"rowIdx":127482,"cells":{"max_stars_repo_path":{"kind":"string","value":"litterbox/models/google/model_google_slim.py"},"max_stars_repo_name":{"kind":"string","value":"rwightman/tensorflow-litterbox"},"max_stars_count":{"kind":"number","value":49,"string":"49"},"id":{"kind":"string","value":"2170877"},"content":{"kind":"string","value":"# Copyright (C) 2016 . All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# ==============================================================================\n\"\"\"Model wrapper for Google's tensorflow/model/slim models.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow as tf\nfrom collections import OrderedDict\nfrom fabric import model\nfrom models.google.nets import nets_factory\nslim = tf.contrib.slim\n\ngoogle_default_params = {\n 'network': 'inception_resnet_v2',\n 'num_classes': 1000,\n}\n\n\nclass ModelGoogleSlim(model.Model):\n\n def __init__(self, params=google_default_params):\n super(ModelGoogleSlim, self).__init__()\n params = model.merge_params(google_default_params, params)\n\n # model_name must correspond to one of google's network names in nets package,\n # see nets_factory.py for valid names.\n self.network = params['network']\n assert self.network in nets_factory.networks_map\n self.num_classes = params['num_classes']\n assert self.num_classes > 1\n\n def build_tower(self, images, is_training=False, scope=None):\n weight_decay = 0.0001\n network_fn = nets_factory.get_network_fn(\n self.network,\n num_classes=self.num_classes,\n weight_decay=weight_decay,\n is_training=is_training)\n logits, endpoints = network_fn(images)\n\n # HACK get mode variable scope set by google net code from logits op name so it can\n # be removed for smaller Tensorboard tags\n scope_search = re.search('%s_[0-9]*/(\\w+)/' % self.TOWER_PREFIX, logits.op.name)\n if scope_search:\n self.model_variable_scope = scope_search.group(1)\n\n if 'AuxLogits' in endpoints:\n # Grab the logits associated with the side head. Employed during training.\n aux_logits = endpoints['AuxLogits']\n else:\n aux_logits = None\n\n self.add_tower(\n scope,\n endpoints,\n logits,\n aux_logits\n )\n\n # Add summaries for viewing model statistics on TensorBoard.\n self.activation_summaries()\n\n return logits\n\n def add_tower_loss(self, labels, scope=None):\n tower = self.tower(scope)\n num_classes = tower.outputs.get_shape()[-1].value\n labels = slim.one_hot_encoding(labels, num_classes=num_classes)\n\n slim.losses.softmax_cross_entropy(\n tower.outputs, labels, label_smoothing=0.1, weights=1.0)\n\n if 'AuxLogits' in tower.endpoints:\n slim.losses.softmax_cross_entropy(\n tower.aux_outputs, labels,\n label_smoothing=0.1, weights=0.4, scope='aux_loss')\n\n def output_scopes(self, prefix_scope=''):\n scopes = ['logits', 'Logits', 'AuxLogits/Aux_logits', 'AuxLogits/Logits', 'AuxLogits/Conv2d_2b_1x1']\n prefix = prefix_scope + '/' if prefix_scope else ''\n prefix += self.model_variable_scope + '/'\n return [prefix + x for x in scopes]\n\n def get_predictions(self, outputs, processor):\n if processor is not None:\n logits = processor.decode_output(outputs)\n else:\n logits = outputs\n return tf.nn.softmax(logits)\n\n @staticmethod\n def eval_ops(logits, labels, processor):\n \"\"\"Generate a simple (non tower based) loss op for use in evaluation.\n\n Args:\n logits: List of logits from inference(). Shape [batch_size, num_classes], dtype float32/64\n labels: Labels from distorted_inputs or inputs(). batch_size vector with int32/64 values in [0, num_classes).\n \"\"\"\n top_1_op = tf.nn.in_top_k(logits, labels, 1)\n top_5_op = tf.nn.in_top_k(logits, labels, 5)\n loss_op = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='xentropy_eval')\n return OrderedDict([('top 5', top_5_op), ('top 1', top_1_op), ('loss', loss_op)])\n\n def check_norm(self, norm):\n if ('vgg' in self.network or 'resnet' in self.network) and norm != 'caffe_rgb':\n print(\"WARNING: If you are using the pre-trained weights for Google VGG and Resnet models, \"\n \"they were imported from Caffe and expect [0, 255] inputs, not the default [-1, 1]. \"\n \"It is recommended to change the image norm method from '%s' to 'caffe_rgb' with \"\n \"the --image_norm param.\" % norm)\n\n"},"size":{"kind":"number","value":4670,"string":"4,670"}}},{"rowIdx":127483,"cells":{"max_stars_repo_path":{"kind":"string","value":"month05/spider/day07_course/day07_code/02_ydSpder.py"},"max_stars_repo_name":{"kind":"string","value":"chaofan-zheng/tedu-python-demo"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2170756"},"content":{"kind":"string","value":"import requests\nfrom lxml import etree\n\nword = input('请输入要翻译的单词:')\n\npost_url = 'http://m.youdao.com/translate'\npost_data = {\n 'inputtext':word,\n 'type':'AUTO'\n}\n\nhtml = requests.post(url=post_url,data=post_data).text\nparse_html = etree.HTML(html)\nxpath_bds = '//ul[@id=\"translateResult\"]/li/text()'\nresult = parse_html.xpath(xpath_bds)[0]\n\nprint(result)"},"size":{"kind":"number","value":355,"string":"355"}}},{"rowIdx":127484,"cells":{"max_stars_repo_path":{"kind":"string","value":"apps/articles/admin.py"},"max_stars_repo_name":{"kind":"string","value":"Pavel1114/blogger"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2167383"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom apps.articles.models import Article\n\n\n@admin.register(Article)\nclass ArticleAdmin(ModelAdmin):\n list_display = (\"__str__\", \"created\", \"author\")\n search_fields = [\"title\", \"text\"]\n\n def save_model(self, request, obj, form, change):\n if not obj.author:\n obj.author = request.user\n super().save_model(request, obj, form, change)\n"},"size":{"kind":"number","value":443,"string":"443"}}},{"rowIdx":127485,"cells":{"max_stars_repo_path":{"kind":"string","value":"backend/services/finale.py"},"max_stars_repo_name":{"kind":"string","value":"ukibbb/goodmood-inc"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171185"},"content":{"kind":"string","value":"import base64\nimport json\nimport os\n\nimport pandas as pd\nimport requests\n\n# TO DO REFACTORING\n\n\nclass FinaleDownloader:\n def __init__(self, start_date: str = None, end_date: str = None):\n self.start_date = start_date\n self.end_date = end_date\n self.purchase_filter: list[list] = self._decode_filter(\n self.filter_purchase_orders)\n\n @property\n def purchase_orders_url(self):\n return f\"https://app.finaleinventory.com/shuzyrock/doc/report/pivotTable/1630067173922/Reports.xls?format=xls&data=orderItem&attrName=%23%23purchase008&rowDimensions=~mpnM1cDLQGY5mZmZmZrAwMDAwMCZzQEFwMtAZjmZmZmZmsDAwMDAwJnM1sDLQHUdHrhR64XAwMDAAcCZzMTAAMDAwMDAwJnNAc3Ay0Bk9HrhR64UwMDAwMDAmc0B_sDLQHUdHrhR64XAwMDAwMCZzQHUwMtAhOAo9cKPXMDAwMDAwJnMyMDLQGT0euFHrhTAwMDAwMCZzMrAy0Bk9HrhR64UwMDAwMDAmczRwMtAZPR64UeuFMDAwMDAwA&metrics=~kZnNBaXAy0Bk9HrhR64UwMDAwMDA&filters={self.purchase_filter}&reportTitle=Purchase+order+w%2F+detail\"\n\n @property\n def warehouse_stock_url(self):\n return f\"https://app.finaleinventory.com/shuzyrock/doc/report/pivotTable/1630072367983/Reports.xls?format=xls&data=product&attrName=%23%23stock008&rowDimensions=~lpnNAf7Ay0BzDMzMzMzNwMDAwMDAmc0B1MDLQH1eZmZmZmbAwMDAwMCZzQHNwMz-wMDAwMDAmc0B9cDM_sDAwMDAwJnNAgmrU3RkXG5iaW4gSUTLQGMMzMzMzM3AwMDAwMCZzQILrFN0ZFxucGFja2luZ8tAYwzMzMzMzcDAwMDAwA&metrics=~lZnNBuuqVW5pdHNcblFvSMtAZ9AAAAAAAMDAwMDAwJnNBvSvVW5pdHNcblJlc2VydmVky0Bn0AAAAAAAwMDAwMDAmc0G8LBVbml0c1xuUmVtYWluaW5ny0Bn0AAAAAAAwMDAwMDAmc0G5q9Vbml0c1xuT24gb3JkZXLLQGfQAAAAAADAwMDAwMCZzQbisFVuaXRzXG5BdmFpbGFibGXLQGfQAAAAAADAwMDAwMA&filters=W1sicHJvZHVjdFN0YXR1cyIsWyJQUk9EVUNUX0FDVElWRSJdXSxbInByb2R1Y3RDYXRlZ29yeSIsbnVsbF0sWyJwcm9kdWN0TWFudWZhY3R1cmVyIixudWxsXSxbInByb2R1Y3RTdGRCaW5JZCIsbnVsbF0sWyJwcm9kdWN0UHJvZHVjdFVybCIsbnVsbF0sWyJwcm9kdWN0U3RvY2tFZmZlY3RpdmVEYXRlIixudWxsXV0%3D&reportTitle=Stock+for+each+product%2C+in+units\"\n\n def purchase_orders(self):\n session: requests.Session = self._auth_request()\n request_url: str = self.purchase_orders_url\n response: requests.Response = session.get(request_url)\n PurchaseOrdersProcessor(response)\n\n\nclass WarehouseStock(FinaleDownloader):\n def download_warehouse_stock(self):\n session: requests.Session = self._auth_request()\n request_url: str = self.warehouse_stock_url\n response: requests.Response = session.get(request_url)\n dataframe = self._load_response_return_dataframe(response)\n\n @staticmethod\n def _load_response_return_dataframe(\n response: requests.Response) -> pd.DataFrame:\n dataframe = pd.read_excel(response.content)\n dataframe.to_excel(\"apps/static/finale/warehouse_stock.xlsx\")\n return dataframe\n\n\nclass PurchaseOrdersProcessor:\n\n STATUS = \"Status\"\n STATUS_COMPLETED = \"Completed\"\n STATUS_COMMITTED = \"Committed\"\n STATUS_EDITABLE = \"Editable\"\n\n QUANTITY = \"Quantity\"\n PACKING = \"Packing\"\n\n PACKING_QUANTITY_MUL_RES = \"Multiply Result\"\n\n TO_DELETE = (\"Price\", \"Subtotal sum\")\n\n def __init__(self, response: requests.Response):\n self.purchase_orders: pd.DataFrame = pd.read_excel(response.content)\n self.FUNCTIONS = (\n self._delete_completed,\n self._split_packing,\n self._delete_columns,\n self._fill_na_cells,\n self._fill_empty_cells,\n self._multiply_packing_quantity,\n )\n self.process_purchase_orders()\n\n def process_purchase_orders(self):\n [function() for function in self.FUNCTIONS]\n self.purchase_orders.to_excel(\n \"apps/static/finale/purchase_orders.xlsx\")\n\n def _delete_completed(self):\n is_completed = False\n\n for index, status in self.purchase_orders[self.STATUS].items():\n if status == self.STATUS_COMPLETED:\n is_completed = True\n self.purchase_orders.drop(index, inplace=True)\n elif status == self.STATUS_EDITABLE or status == self.STATUS_COMMITTED:\n is_completed = False\n\n elif is_completed:\n self.purchase_orders.drop(index, inplace=True)\n\n self.purchase_orders.reset_index(drop=True, inplace=True)\n\n def _split_packing(self):\n for index, packing in self.purchase_orders[self.PACKING].items():\n if isinstance(packing, str) and packing.startswith(\"cs\"):\n self.purchase_orders[self.PACKING][index] = float(\n packing.split(\" \")[1].split(\"/\")[0])\n\n def _delete_columns(self):\n for name in self.TO_DELETE:\n del self.purchase_orders[name]\n\n def _fill_empty_cells(self):\n\n packing = self.purchase_orders[self.PACKING]\n quantity = self.purchase_orders[self.QUANTITY]\n\n for index, quantity in self.purchase_orders[self.QUANTITY].items():\n if quantity == \"\" and packing[index] != \"\":\n self.purchase_orders[self.QUANTITY][index] = 1\n\n elif packing[index] == \"\" and quantity != \"\":\n self.purchase_orders[self.PACKING][index] = 1\n\n def _fill_na_cells(self):\n self.purchase_orders.fillna(\"\", inplace=True)\n\n def _multiply_packing_quantity(self):\n multiply_result = self.purchase_orders.apply(\n lambda row: row[self.QUANTITY] * row[self.PACKING]\n if not isinstance(row[self.QUANTITY], str) and not isinstance(\n row[self.PACKING], str) else \"\",\n axis=1,\n )\n if not self.purchase_orders.empty:\n self.purchase_orders[\n self.PACKING_QUANTITY_MUL_RES] = multiply_result\n"},"size":{"kind":"number","value":5607,"string":"5,607"}}},{"rowIdx":127486,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/backend/src/couchers/migrations/versions/1c7784767710_add_superuser_column.py"},"max_stars_repo_name":{"kind":"string","value":"foormea/couchers"},"max_stars_count":{"kind":"number","value":226,"string":"226"},"id":{"kind":"string","value":"2171379"},"content":{"kind":"string","value":"\"\"\"add_superuser_column\n\nRevision ID: 1c7784767710\nRevises: \nCreate Date: 2021-06-16 15:20:23.475561\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"1c7784767710\"\ndown_revision = \"425\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\"users\", sa.Column(\"is_superuser\", sa.Boolean(), server_default=\"false\", nullable=False))\n\n\ndef downgrade():\n op.drop_column(\"users\", \"is_superuser\")\n"},"size":{"kind":"number","value":500,"string":"500"}}},{"rowIdx":127487,"cells":{"max_stars_repo_path":{"kind":"string","value":"define.py"},"max_stars_repo_name":{"kind":"string","value":"Vuong02011996/tools_make_labels_object_in_video"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170616"},"content":{"kind":"string","value":"path_video = '/home/vuong/Videos/16:31:47.063371.mp4'\nname_video = path_video.split('/')[-1].split('.')[0] + path_video.split('/')[-1].split('.')[1]\npath_save_data = '/home/vuong/Videos/' + name_video\npath_file_classes = 'classes.txt'\n\n\n'''\nTrain custom object: darknet.exe detector train data/obj.data yolo-obj.cfg yolov4.conv.137\ntrain 2 class truoc.(Cam dung va do xe vs Cam do xe)\n\nB1. Copy file .cfg and change 8 option:\n + batch\n + subdivisions\n + max_batches\n + steps to 80% and 90% of max_batches\n + network size width=416 height=416 or any value multiple of 32\n + line classes=80 to your number of objects (in each of 3 layer)\n + change [filters=255] to filters=(classes + 5)x3 in the 3 [convolutional] before each [yolo] layer\nB2. Create file obj.data in the directory build\\darknet\\x64\\data\\\n classes= 2\n train = data/train.txt\n valid = data/test.txt\n names = data/obj.names\n backup = backup/\n + Create file obj.names the same file class.txt and put to the directory build\\darknet\\x64\\data\\\n + Create file train.txt and test.txt and put to the directory build\\darknet\\x64\\data\\\n data/obj/img1.jpg\n data/obj/img2.jpg\n data/obj/img3.jpg\n ....\nB3. Download pre-trained weights for the convolutional layers and put to the directory build\\darknet\\x64 \n'''"},"size":{"kind":"number","value":1353,"string":"1,353"}}},{"rowIdx":127488,"cells":{"max_stars_repo_path":{"kind":"string","value":"adScheduler.py"},"max_stars_repo_name":{"kind":"string","value":"summerlimes/mykijiji"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171084"},"content":{"kind":"string","value":"import schedule\nimport time\nimport datetime\nfrom adPoster import *\n\nclass adSchedule:\n\n def __init__(self, ad_id, repeat, delay=0):\n self.ad_id = ad_id\n self.repeat = repeat\n self.delay = delay\n self.job_file = \"ad\" + str(self.ad_id) + \"/ad\" + str(self.ad_id) + \".yml\"\n\n def printHeader(self):\n logger(str(datetime.datetime.now()))\n logger(\"AD#\" + str(self.ad_id) + \" is being reposted every \" + str(self.repeat) + \" minutes\\n\")\n logger(\"AD FILE: \" + str(self.job_file) + \"\\n\")\n\n def printFooter(self, rc):\n if (rc==0):\n logger(\"No critical errors. Check ADPOSTER OUTPUTS for details\")\n logger(\"=====================================================\")\n else:\n logger(\"There was an error. Check ADPOSTER ERROR for details\")\n logger(\"=====================================================\")\n\n def start(self):\n # delay\n time.sleep(self.delay*60)\n self.printHeader()\n # running the ad first time\n rc = adPoster(self.job_file, repost=False)\n self.printFooter(rc)\n #schedule\n schedule.every(self.repeat).minutes.do(self.repost)\n\n def repost(self):\n self.printHeader()\n # reposting because repost=True by default [check adPoster]\n rc = adPoster(self.job_file)\n self.printFooter(rc)\n"},"size":{"kind":"number","value":1380,"string":"1,380"}}},{"rowIdx":127489,"cells":{"max_stars_repo_path":{"kind":"string","value":"modules_lib/xbmc/xbmc_server.py"},"max_stars_repo_name":{"kind":"string","value":"hephaestus9/Ironworks"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170109"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom flask import session\nimport netifaces as nif\nfrom ironworks import serverTools\n\n\nclass XBMCServer():\n\n def __init__(self):\n\n \"\"\"Table for the XBMC server config\"\"\"\n\n self.bleex = serverTools.getSystemDb()\n\n self.bleex.beginTransaction()\n\n self.bleex.checkTable(\"xbmc_servers\", [\n {\"name\": \"id\", \"type\": \"INT NOT NULL AUTO_INCREMENT PRIMARY KEY\"},\n {\"name\": \"label\", \"type\": \"text\"},\n {\"name\": \"position\", \"type\": \"text\"},\n {\"name\": \"hostname\", \"type\": \"text\"},\n {\"name\": \"port\", \"type\": \"text\"},\n {\"name\": \"xbmc_username\", \"type\": \"text\"},\n {\"name\": \"xbmc_password\", \"type\": \"text\"},\n {\"name\": \"mac_address\", \"type\": \"text\"},\n {\"name\": \"active_server\", \"type\": \"text\"},\n {\"name\": \"username\", \"type\": \"text\"}])\n\n self.bleex.commitTransaction()\n\n def getNumXbmcServers(self):\n serverList = self.bleex.select(\"xbmc_servers\")\n servers = len(serverList)\n return servers\n\n def getXBMCServers(self, orderBy={\"position\": \"DESC\"}):\n servers = []\n serverDict = {}\n serverList = self.bleex.select(\"xbmc_servers\", where={\"username\": session[\"username\"]}, orderBy=orderBy)\n for server in serverList:\n serverDict[\"id\"] = server[0]\n serverDict[\"label\"] = server[1]\n serverDict[\"position\"] = server[2]\n serverDict[\"hostname\"] = server[3]\n serverDict[\"port\"] = server[4]\n serverDict[\"xbmc_username\"] = server[5]\n serverDict[\"xbmc_password\"] = server[6]\n serverDict[\"mac_address\"] = server[7]\n serverDict[\"active_server\"] = server[8]\n servers.append(serverDict)\n serverDict = {}\n return servers\n\n def getServerById(self, server_id):\n serverDict = {}\n server = self.bleex.select(\"xbmc_servers\", where={\"username\": session[\"username\"], \"id\": server_id})[0]\n serverDict[\"id\"] = server[0]\n serverDict[\"label\"] = server[1]\n serverDict[\"position\"] = server[2]\n serverDict[\"hostname\"] = server[3]\n serverDict[\"port\"] = server[4]\n serverDict[\"xbmc_username\"] = server[5]\n serverDict[\"xbmc_password\"] = server[6]\n serverDict[\"mac_address\"] = server[7]\n serverDict[\"active_server\"] = server[8]\n return serverDict\n\n def deleteServer(self, server_id):\n self.bleex.delete(\"xbmc_servers\", where={\"username\": session[\"username\"], \"id\": server_id})\n return\n\n def setXBMCServer(self, label, hostname, port='8080', xbmc_username=\"\", xbmc_password=\"\", mac_address=\"\", position=\"0\", active_server=\"False\", server={}):\n if server != {}:\n self.bleex.beginTransaction()\n self.bleex.insertOrUpdate(\"xbmc_servers\", server, on={\"mac_address\": mac_address, \"hostname\": hostname, \"username\": session[\"username\"]})\n self.bleex.commitTransaction()\n else:\n if mac_address == \"\":\n mac_address = self.mac_for_ip(hostname)\n\n data = {\"label\": label,\n \"position\": position,\n \"hostname\": hostname,\n \"port\": port,\n \"xbmc_username\": xbmc_username,\n \"xbmc_password\": xbmc_password,\n \"mac_address\": mac_address,\n \"active_server\": active_server,\n \"username\": session[\"username\"]}\n\n self.bleex.beginTransaction()\n self.bleex.insertOrUpdate(\"xbmc_servers\", data, on={\"mac_address\": mac_address, \"hostname\": hostname, \"username\": session[\"username\"]})\n self.bleex.commitTransaction()\n\n def mac_for_ip(self, ip):\n 'Returns a list of MACs for interfaces that have given IP, returns None if not found'\n for i in nif.interfaces():\n addrs = nif.ifaddresses(i)\n try:\n if_mac = addrs[nif.AF_LINK][0]['addr']\n if_ip = addrs[nif.AF_INET][0]['addr']\n except IndexError, KeyError: #ignore ifaces that dont have MAC or IP\n if_mac = if_ip = None\n if if_ip == ip:\n return if_mac\n return \"\"\n"},"size":{"kind":"number","value":4277,"string":"4,277"}}},{"rowIdx":127490,"cells":{"max_stars_repo_path":{"kind":"string","value":"pairs_crypto/binance_analysis/binance_ecm_utils.py"},"max_stars_repo_name":{"kind":"string","value":"factorwonk/fml"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170932"},"content":{"kind":"string","value":"import os\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\n\nfrom datetime import datetime\nfrom numpy.linalg import inv\nfrom scipy.stats import t\n\n\ndef calc_residuals(df):\n x = df.iloc[:, 0] # e.g. BTC/USD\n y = df.iloc[:, 1] # e.g. ETH/USD\n X1 = sm.add_constant(x)\n # Y1 = sm.add_constant(y)\n\n ols1 = sm.OLS(y, X1).fit()\n # ols2 = sm.OLS(x, Y1).fit()\n # calculate residuals here\n residuals = ols1.resid\n # residuals2 = ols2.resid\n return residuals\n\n\ndef test_stationarity(residuals):\n adf_data = pd.DataFrame(residuals)\n adf_data.columns = [\"y\"]\n adf_data[\"drift_constant\"] = 1\n # Lag residual\n adf_data[\"y-1\"] = adf_data[\"y\"].shift(1)\n adf_data.dropna(inplace=True)\n # Diff between residual and lag residual\n adf_data[\"deltay1\"] = adf_data[\"y\"] - adf_data[\"y-1\"]\n # Lag difference\n adf_data[\"deltay-1\"] = adf_data[\"deltay1\"].shift(1)\n adf_data.dropna(inplace=True)\n target_y = pd.DataFrame(adf_data[\"deltay1\"], columns=[\"deltay1\"])\n adf_data.drop([\"y\", \"deltay1\"], axis=1, inplace=True)\n\n # Auto regressing the residuals with lag1, drift constant and lagged 1 delta (delta_et-1)\n adf_regressor_model = sm.OLS(target_y, adf_data)\n adf_regressor = adf_regressor_model.fit()\n\n # Returning the results\n print(adf_data)\n print(adf_regressor.summary())\n return adf_regressor\n\n"},"size":{"kind":"number","value":1381,"string":"1,381"}}},{"rowIdx":127491,"cells":{"max_stars_repo_path":{"kind":"string","value":"software/studio50_fly/examples/old/blob_finder_example.py"},"max_stars_repo_name":{"kind":"string","value":"willdickson/studio50_fly"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170836"},"content":{"kind":"string","value":"import cv2\nimport numpy as np\nfrom studio50_fly.utility import get_monitor_dict\nfrom studio50_fly.blob_finder import BlobFinder\n\nblob_finder = BlobFinder(threshold=30, min_area=50)\n\nframe_width = 1280 \nframe_height = 1024 \n\nmonitor_dict = get_monitor_dict()\nuser_monitor = monitor_dict['HDMI-0']\nproj_monitor = monitor_dict['DP-1']\n\ncv2.namedWindow('projector')\ncv2.moveWindow('projector', proj_monitor.x, proj_monitor.y)\nproj_image = 255*np.ones((proj_monitor.height, proj_monitor.width, 3) ,dtype=np.uint8)\ncv2.imshow('projector', proj_image)\n\ncap = cv2.VideoCapture('/dev/video0')\nif not cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')):\n raise RuntimeError('unable to set fourcc to mjpg')\nif not cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width):\n raise RuntimeError('unable to set frame width')\nif not cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height):\n raise RuntimeError('unable to set frame height')\nif not cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1):\n raise RuntimeError('unable to set auto exposure')\nif not cap.set(cv2.CAP_PROP_EXPOSURE, 200):\n raise RuntimeError('unable to set auto exposure')\n\n#cv2.namedWindow('camera')\n#cv2.moveWindow('camera', user_monitor.width - frame_width, 0)\n\ncv2.namedWindow('background')\ncv2.moveWindow('background', user_monitor.width - frame_width, 0)\n\n\nhave_bg = False\ndone = False\ncnt = 0\n\nwhile not done:\n ret, image = cap.read()\n if ret:\n gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n if have_bg:\n diff_image = cv2.absdiff(gray_image,bg_image) \n blob_list, blob_image, thresh_image = blob_finder.find(diff_image)\n print(blob_list)\n #cv2.imshow('camera', image)\n cv2.imshow('blob_image', blob_image)\n else:\n gray_image = cv2.medianBlur(gray_image, 5)\n if cnt < 3:\n bg_image = np.zeros(gray_image.shape, dtype=np.uint8)\n else:\n pass\n bg_image = np.maximum(bg_image, gray_image)\n cv2.imshow('background', bg_image)\n\n cnt += 1\n\n key = cv2.waitKey(1) & 0xff\n if key == ord('g'):\n have_bg = True\n cv2.destroyWindow('background')\n cv2.namedWindow('blob_image')\n cv2.moveWindow('blob_image', user_monitor.width - frame_width, 0)\n if key == ord('q'): \n done = True\n\ncv2.destroyAllWindows()\ncap.release()\n\n\n"},"size":{"kind":"number","value":2402,"string":"2,402"}}},{"rowIdx":127492,"cells":{"max_stars_repo_path":{"kind":"string","value":"bot/reviewbot/tools/tests/test_rustfmt.py"},"max_stars_repo_name":{"kind":"string","value":"reviewboard/ReviewBot"},"max_stars_count":{"kind":"number","value":91,"string":"91"},"id":{"kind":"string","value":"2171205"},"content":{"kind":"string","value":"\"\"\"Unit tests for reviewbot.tools.rustfmt.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\nimport kgb\nimport six\n\nfrom reviewbot.tools.rustfmt import RustfmtTool\nfrom reviewbot.tools.testing import (BaseToolTestCase,\n ToolTestCaseMetaclass,\n integration_test,\n simulation_test)\nfrom reviewbot.utils.filesystem import tmpdirs\nfrom reviewbot.utils.process import execute\n\n\n@six.add_metaclass(ToolTestCaseMetaclass)\nclass RustfmtToolTests(BaseToolTestCase):\n \"\"\"Unit tests for reviewbot.tools.rustfmt.RustfmtTool.\"\"\"\n\n tool_class = RustfmtTool\n tool_exe_config_key = 'rustfmt'\n tool_exe_path = '/path/to/rustfmt'\n\n @integration_test()\n @simulation_test(stdout=(\n 'Diff in /test.rs at line 1:\\n'\n ' fn main() {\\n'\n '-println!(\"Hi\")\\n'\n '+ println!(\"Hi\")\\n'\n '}\\n'\n ))\n def test_execute(self):\n \"\"\"Testing RustfmtTool.execute\"\"\"\n review, review_file = self.run_tool_execute(\n filename='test.rs',\n file_contents=(\n b'fn main() {\\n'\n b'println!(\"Hi\")\\n'\n b'}\\n'\n ))\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_file.id,\n 'first_line': 1,\n 'num_lines': 1,\n 'text': (\n 'This file contains formatting errors and should be run '\n 'through `rustfmt`.'\n ),\n 'issue_opened': True,\n 'rich_text': True,\n },\n ])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n '-q',\n '--check',\n '--color=never',\n os.path.join(tmpdirs[-1], 'test.rs'),\n ],\n ignore_errors=True,\n return_errors=True)\n\n @integration_test()\n @simulation_test(stderr=(\n 'error: this file contains an unclosed delimiter\\n'\n ' --> /test.rs:2:27\\n'\n ' |\\n'\n '1 | afn main() {\\n'\n ' - unclosed delimiter\\n'\n '2 | println!(\"Hello world!\");\\n'\n ' | ^\\n'\n '\\n'\n 'error: expected one of `!` or `::`, found `main`\\n'\n ' --> /test.rs:1:6\\n'\n ' |\\n'\n '1 | afn main() {\\n'\n ' | ^^^^ expected one of `!` or `::`\\n'\n ))\n def test_execute_with_syntax_error(self):\n \"\"\"Testing RustfmtTool.execute with syntax error\"\"\"\n review, review_file = self.run_tool_execute(\n filename='test.rs',\n file_contents=(\n b'func main() {}\\n'\n ))\n\n self.assertEqual(review.comments, [\n {\n 'filediff_id': review_file.id,\n 'first_line': 1,\n 'num_lines': 1,\n 'text': (\n 'expected one of `!` or `::`, found `main`\\n'\n '\\n'\n 'Column: 6'\n ),\n 'issue_opened': True,\n 'rich_text': False,\n },\n ])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n '-q',\n '--check',\n '--color=never',\n os.path.join(tmpdirs[-1], 'test.rs'),\n ],\n ignore_errors=True,\n return_errors=True)\n\n @integration_test()\n @simulation_test()\n def test_execute_with_success(self):\n \"\"\"Testing RustfmtTool.execute with no errors\"\"\"\n review, review_file = self.run_tool_execute(\n filename='test.rs',\n file_contents=(\n b'fn main() {\\n'\n b' println!(\"Hello world!\");\\n'\n b'}\\n'\n ))\n\n self.assertEqual(review.comments, [])\n\n self.assertSpyCalledWith(\n execute,\n [\n self.tool_exe_path,\n '-q',\n '--check',\n '--color=never',\n os.path.join(tmpdirs[-1], 'test.rs'),\n ],\n ignore_errors=True,\n return_errors=True)\n\n def setup_simulation_test(self, stdout='', stderr=''):\n \"\"\"Set up the simulation test for rustfmt.\n\n This will spy on :py:func:`~reviewbot.utils.process.execute`, making\n it return the provided stdout and stderr results.\n\n Args:\n stdout (unicode, optional):\n The outputted stdout.\n\n stderr (unicode, optional):\n The outputted stderr.\n \"\"\"\n self.spy_on(execute, op=kgb.SpyOpReturn((stdout, stderr)))\n"},"size":{"kind":"number","value":4795,"string":"4,795"}}},{"rowIdx":127493,"cells":{"max_stars_repo_path":{"kind":"string","value":"scratch_ml/supervised_learning/random_forest.py"},"max_stars_repo_name":{"kind":"string","value":"siAyush/scratch_ml"},"max_stars_count":{"kind":"number","value":23,"string":"23"},"id":{"kind":"string","value":"2169996"},"content":{"kind":"string","value":"import numpy as np\nimport progressbar\nimport math\nfrom scratch_ml.utils import bar_widget, get_random_subsets\nfrom scratch_ml.supervised_learning import ClassificationTree\n\n\nclass RandomForest():\n \"\"\"Random Forest classifier\"\"\"\n\n def __init__(self, n_estimators=100, max_features=None, min_samples_split=2,\n min_gain=0, max_depth=float(\"inf\")):\n self.n_estimators = n_estimators # Number of trees\n self.max_features = max_features # Maxmimum number of features per tree\n self.min_samples_split = min_samples_split\n self.min_gain = min_gain # Minimum information gain\n self.max_depth = max_depth # Maximum depth for tree\n self.progressbar = progressbar.ProgressBar(widgets=bar_widget)\n\n self.tree = []\n for i in range(n_estimators):\n tree = ClassificationTree(\n min_samples_split=self.min_samples_split,\n min_impurity=min_gain,\n max_depth=self.max_depth)\n self.tree.append(tree)\n\n def fit(self, x, y):\n n_features = np.shape(x)[1]\n # If max_features have not been defined select sqrt(n_features)\n if self.max_features is None:\n self.max_features = int(math.sqrt(n_features))\n # Choose one random subset of the data for each tree\n subsets = get_random_subsets(x, y, self.n_estimators)\n for i in self.progressbar(range(self.n_estimators)):\n x_subset, y_subset = subsets[i]\n # select random subsets of the features\n idx = np.random.choice(\n range(n_features), size=self.max_features, replace=True)\n self.tree[i].feature_i = idx\n x_subset = x_subset[:, idx]\n self.tree[i].fit(x_subset, y_subset)\n\n def predict(self, x):\n y_preds = np.empty((x.shape[0], len(self.tree)))\n for i, tree in enumerate(self.tree):\n idx = tree.feature_i\n # Make a prediction based on those features\n prediction = tree.predict(x[:, idx])\n y_preds[:, i] = prediction\n y_pred = []\n # Select the most common class prediction\n for sample_predictions in y_preds:\n y_pred.append(np.bincount(\n sample_predictions.astype(\"int\")).argmax())\n return y_pred\n"},"size":{"kind":"number","value":2339,"string":"2,339"}}},{"rowIdx":127494,"cells":{"max_stars_repo_path":{"kind":"string","value":"quizzer/admin.py"},"max_stars_repo_name":{"kind":"string","value":"kaushiksk/Qrios"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170859"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom .models import Quizzer\n# Register your models here.\nadmin.site.register(Quizzer)\n"},"size":{"kind":"number","value":119,"string":"119"}}},{"rowIdx":127495,"cells":{"max_stars_repo_path":{"kind":"string","value":"custom_components/frigate/binary_sensor.py"},"max_stars_repo_name":{"kind":"string","value":"Odis/frigate-hass-integration"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170985"},"content":{"kind":"string","value":"\"\"\"Binary sensor platform for Frigate.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nfrom homeassistant.components.binary_sensor import (\n DEVICE_CLASS_MOTION,\n BinarySensorEntity,\n)\nfrom homeassistant.components.mqtt.subscription import async_subscribe_topics\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\n\nfrom . import get_friendly_name, get_frigate_device_identifier\nfrom .const import DOMAIN, NAME, VERSION\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n \"\"\"Binary sensor entry setup.\"\"\"\n frigate_config = hass.data[DOMAIN][\"config\"]\n\n camera_objects = set()\n for cam_name, cam_config in frigate_config[\"cameras\"].items():\n for obj in cam_config[\"objects\"][\"track\"]:\n camera_objects.add((cam_name, obj))\n\n zone_objects = set()\n for cam, obj in camera_objects:\n for zone_name in frigate_config[\"cameras\"][cam][\"zones\"]:\n zone_objects.add((zone_name, obj))\n\n async_add_entities(\n [\n FrigateMotionSensor(entry, frigate_config, cam_name, obj)\n for cam_name, obj in camera_objects.union(zone_objects)\n ]\n )\n\n\nclass FrigateMotionSensor(BinarySensorEntity):\n \"\"\"Frigate Motion Sensor class.\"\"\"\n\n def __init__(\n self,\n entry: ConfigEntry,\n frigate_config: dict[str, Any],\n cam_name: str,\n obj_name: str,\n ) -> None:\n \"\"\"Construct a new FrigateMotionSensor.\"\"\"\n self._entry = entry\n self._frigate_config = frigate_config\n self._cam_name = cam_name\n self._obj_name = obj_name\n self._is_on = False\n self._available = False\n self._sub_state = None\n self._topic = f\"{self._frigate_config['mqtt']['topic_prefix']}/{self._cam_name}/{self._obj_name}\"\n self._availability_topic = (\n f\"{self._frigate_config['mqtt']['topic_prefix']}/available\"\n )\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Subscribe mqtt events.\"\"\"\n await super().async_added_to_hass()\n await self._subscribe_topics()\n\n async def _subscribe_topics(self) -> None:\n \"\"\"(Re)Subscribe to topics.\"\"\"\n\n @callback\n def state_message_received(msg: str) -> None:\n \"\"\"Handle a new received MQTT state message.\"\"\"\n try:\n self._is_on = int(msg.payload) > 0\n except ValueError:\n self._is_on = False\n self.async_write_ha_state()\n\n @callback\n def availability_message_received(msg: str) -> None:\n \"\"\"Handle a new received MQTT availability message.\"\"\"\n self._available = msg.payload == \"online\"\n self.async_write_ha_state()\n\n self._sub_state = await async_subscribe_topics(\n self.hass,\n self._sub_state,\n {\n \"state_topic\": {\n \"topic\": self._topic,\n \"msg_callback\": state_message_received,\n \"qos\": 0,\n },\n \"availability_topic\": {\n \"topic\": self._availability_topic,\n \"msg_callback\": availability_message_received,\n \"qos\": 0,\n },\n },\n )\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return a unique ID for this entity.\"\"\"\n return f\"{DOMAIN}_{self._cam_name}_{self._obj_name}_binary_sensor\"\n\n @property\n def device_info(self) -> dict[str, Any]:\n \"\"\"Return device information.\"\"\"\n return {\n \"identifiers\": {get_frigate_device_identifier(self._entry, self._cam_name)},\n \"via_device\": get_frigate_device_identifier(self._entry),\n \"name\": get_friendly_name(self._cam_name),\n \"model\": VERSION,\n \"manufacturer\": NAME,\n }\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the sensor.\"\"\"\n return f\"{get_friendly_name(self._cam_name)} {self._obj_name} Motion\".title()\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return true if the binary sensor is on.\"\"\"\n return self._is_on\n\n @property\n def device_class(self) -> str:\n \"\"\"Return the device class.\"\"\"\n return DEVICE_CLASS_MOTION\n\n @property\n def available(self) -> bool:\n \"\"\"Determine if the entity is available.\"\"\"\n return self._available\n"},"size":{"kind":"number","value":4650,"string":"4,650"}}},{"rowIdx":127496,"cells":{"max_stars_repo_path":{"kind":"string","value":"example/sample/admin.py"},"max_stars_repo_name":{"kind":"string","value":"imtapps/django-admin-ext"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"id":{"kind":"string","value":"2171129"},"content":{"kind":"string","value":"from django.contrib import admin\n\nfrom djadmin_ext.helpers import BaseAjaxModelAdmin\nfrom djadmin_ext.admin_forms import BaseAjaxModelForm\nfrom sample import models\n\n\nclass MealAdminForm(BaseAjaxModelForm):\n ajax_change_fields = [\"food_type\", \"main_ingredient\"]\n\n @property\n def dynamic_fields(self):\n selected_food_type = self.data.get('food_type') or self.initial.get('food_type')\n if not selected_food_type:\n return {}\n\n try:\n selected_ingredient = int(self.get_selected_value('main_ingredient'))\n except (TypeError, ValueError):\n selected_ingredient = None\n\n food_type = models.FoodType.objects.get(pk=selected_food_type)\n ingredients = models.Ingredient.objects.filter(food_type=food_type)\n fields = self.setup_fields(ingredients, selected_ingredient)\n return fields\n\n def setup_fields(self, ingredients, selected_ingredient):\n fields = {}\n fields['main_ingredient'] = self.create_field_and_assign_initial_value(ingredients, selected_ingredient)\n\n if fields['main_ingredient']().initial:\n details = models.IngredientDetails.objects.filter(ingredient=selected_ingredient)\n if selected_ingredient and details:\n selected_ingredient_details = self.get_selected_value('ingredient_details')\n fields['ingredient_details'] = self.create_field_and_assign_initial_value(\n details, selected_ingredient_details\n )\n return fields\n\n def create_field_and_assign_initial_value(self, queryset, selected_value):\n return lambda: super(MealAdminForm, self).create_field_and_assign_initial_value(queryset, selected_value)\n\n class Meta(object):\n fields = ['food_type']\n model = models.Meal\n\n\nclass MealAdmin(BaseAjaxModelAdmin):\n form = MealAdminForm\n\n\nadmin.site.register(models.FoodType)\nadmin.site.register(models.Ingredient)\nadmin.site.register(models.IngredientDetails)\nadmin.site.register(models.Meal, MealAdmin)\n"},"size":{"kind":"number","value":2047,"string":"2,047"}}},{"rowIdx":127497,"cells":{"max_stars_repo_path":{"kind":"string","value":"scrapy_doubanmovie/scrapy_doubanmovie/spiders/douban_spider.py"},"max_stars_repo_name":{"kind":"string","value":"davidvivi/you-need-Python"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2171461"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom scrapy_doubanmovie.scrapy_doubanmovie.items import ScrapyDoubanmovieItem\nfrom urllib.parse import urljoin\n\n# 通过scrapy genspider douban_spider movie.douban.com生成的\nclass DoubanSpiderSpider(scrapy.Spider):\n # 爬虫名字\n name = 'douban_spider'\n # 允许的域名\n allowed_domains = ['movie.douban.com']\n # 入口URL,扔到调度器里面\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n item = ScrapyDoubanmovieItem()\n selector = Selector(response)\n Movies = selector.xpath('//div[@class=\"info\"]')\n for eachMovie in Movies:\n title = eachMovie.xpath('div[@class=\"hd\"]/a/span/text()').extract() # 多个span标签\n fullTitle = \"\".join(title) # 将多个字符串无缝连接起来\n introduce = eachMovie.xpath('div[@class=\"bd\"]/p/text()').extract()\n star = eachMovie.xpath('div[@class=\"bd\"]/div[@class=\"star\"]/span/text()').extract()[0]\n evaluate = eachMovie.xpath('div[@class=\"bd\"]/div[@class=\"star\"]/span/text()').extract()[1]\n quote = eachMovie.xpath('div[@class=\"bd\"]/p[@class=\"quote\"]/span/text()').extract()\n # quote可能为空,因此需要先进行判断\n if quote:\n quote = quote[0]\n else:\n quote = ''\n item['title'] = fullTitle\n item['introduce'] = ';'.join([x.strip() for x in introduce if x.strip() != ''])\n item['star'] = star\n item['evaluate'] = evaluate\n item['quote'] = quote\n yield item\n nextLink = selector.xpath('//span[@class=\"next\"]/link/@href').extract()\n # 第10页是最后一页,没有下一页的链接\n if nextLink:\n nextLink = nextLink[0]\n yield Request(urljoin(response.url, nextLink), callback=self.parse)\n"},"size":{"kind":"number","value":1841,"string":"1,841"}}},{"rowIdx":127498,"cells":{"max_stars_repo_path":{"kind":"string","value":"ms2ldaviz/massbank/urls.py"},"max_stars_repo_name":{"kind":"string","value":"RP0001/ms2ldaviz"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2170112"},"content":{"kind":"string","value":"from django.conf.urls import include, url\nfrom massbank import views\n\nurlpatterns = [\n url(r'^generate_massbank/$', views.generate_massbank, name='generate_massbank'),\n url(r'^generate_massbank_multi_m2m/$', views.generate_massbank_multi_m2m,\n name='generate_massbank_multi_m2m'),\n]\n"},"size":{"kind":"number","value":296,"string":"296"}}},{"rowIdx":127499,"cells":{"max_stars_repo_path":{"kind":"string","value":"server/architext/entities/exceptions.py"},"max_stars_repo_name":{"kind":"string","value":"JimenaAndrea/architext"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2171069"},"content":{"kind":"string","value":"# Exceptions related to entities\nclass BadItem(Exception):\n \"\"\"Raised when saving an item that does not abide by the item prerequisites\"\"\"\n\nclass EmptyName(BadItem):\n \"\"\"Raised when creating an item with an empty name\"\"\"\n\nclass WrongNameFormat(BadItem):\n \"\"\"Raised when creating an item with a bad formatted name\"\"\"\n\nclass RoomNameClash(BadItem):\n \"\"\"Raised when creating an item with the same name of an exit at the\n same room\"\"\"\n\nclass TakableItemNameClash(BadItem):\n \"\"\"Raised when creating an Item or Room that may be unique in their room,\n but may cause problems in other ways. e.g. if there is a takable item with\n that name somewhere else\"\"\"\n\nclass NameNotGloballyUnique(BadItem):\n \"\"\"Raised when creating a takable item whose name is already present\n it any item or exit of the world.\"\"\"\n\nclass CantDelete(Exception):\n \"\"\"Raised when trying to delete something that can't be deleted\"\"\"\n\nclass ValueWithLineBreaks(Exception):\n \"\"\"Raised when a value that should not have line breaks has line breaks\"\"\"\n\nclass ValueTooLong(Exception):\n \"\"\"Raised when a value exceeds its max length\"\"\"\n\nclass PublicWorldLimitReached(Exception):\n \"\"\"Raised when trying to publish a world avobe the limit\"\"\""},"size":{"kind":"number","value":1236,"string":"1,236"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1274,"numItemsPerPage":100,"numTotalItems":129320,"offset":127400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzA1NTAzNywic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTcwNTg2MzcsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.jtqtsnkRuN8ioCaz4WAKOHAGSdYNCFA3dDIk73Fq2RC5tIzufNBYoiGArYTb80D-ocJ68UgeWzwaG3IAw-0SDw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
MMunsuperseg-beta/IIC/code/datasets/segmentation/util/dfc_prepare.py
GeoAI4EO/MMUnsupervisedSeg
0
2171300
import glob import os import sys from datetime import datetime import cv2 import numpy as np import scipy.io as sio from skimage import io # split each 6000x6000 image into 15x15 half scaled 200x200 images # make train and test lists SOURCE_IMGS_DIR = "/data/data/dfc/images/" SOURCE_IMGS_SUFFIX = "_RGB.tif" SOURCE_GT_DIR = "/data/data/dfc/classes/" SOURCE_GT_SUFFIX = "_CLS.tif" OUT_DIR = "/data/data/dfc/IIC/" CLASS_DICT = {0: 0, 2: 1, 5: 2, 6: 3, 9: 4, 17: 5, 65: 0} def main(): out_dir_imgs = os.path.join(OUT_DIR, "imgs") out_dir_gt = os.path.join(OUT_DIR, "gt") if not os.path.exists(out_dir_imgs): os.makedirs(out_dir_imgs) if not os.path.exists(out_dir_gt): os.makedirs(out_dir_gt) unlabelled_train = open(os.path.join(OUT_DIR, "unlabelled_train.txt"), "w+") labelled_train = open(os.path.join(OUT_DIR, "labelled_train.txt"), "w+") labelled_test = open(os.path.join(OUT_DIR, "labelled_test.txt"), "w+") for mode in ['training', 'validation']: for i, img_path in enumerate(sorted(glob.glob(SOURCE_IMGS_DIR+ mode + "/*.tif"))): print("on img: %d %s" % (i, datetime.now())) handle = os.path.basename(img_path)[:-len(SOURCE_IMGS_SUFFIX)] img = io.imread(img_path) sio.savemat(os.path.join(out_dir_imgs, "%s.mat" % str(os.path.basename(img_path)[:-4])), {"img": img}) if mode == 'training': unlabelled_train.write("%s\n" % os.path.basename(img_path)[:-4]) labelled_train.write("%s\n" % os.path.basename(img_path)[:-4]) else: labelled_test.write("%s\n" % os.path.basename(img_path)[:-4]) gt_path = os.path.join(SOURCE_GT_DIR, mode, handle + SOURCE_GT_SUFFIX) gt = io.imread(gt_path) for key, value in CLASS_DICT.items(): gt[gt==key] = value sio.savemat(os.path.join(out_dir_gt, "%s.mat" % str(os.path.basename(gt_path)[:-4])), {"gt": gt}) unlabelled_train.close() labelled_train.close() labelled_test.close() if __name__ == "__main__": main()
1,992
tests/fixtures.py
bassory99/aiobaro
0
2170976
import pathlib import pytest from aiobaro.core import MatrixClient from .utils import is_responsive @pytest.fixture(scope="session") def docker_compose_file(pytestconfig): return ( pathlib.Path(pytestconfig.rootdir) / "tests/docker/docker-compose.yml" ) @pytest.fixture(scope="session") def matrix_server_url(docker_ip, docker_services) -> str: """Ensure that HTTP service is up and responsive.""" # `port_for` takes a container port and returns the corresponding host port port = docker_services.port_for("matrix", 8008) url = f"http://{docker_ip}:{port}" docker_services.wait_until_responsive( timeout=30.0, pause=0.1, check=lambda: is_responsive(f"{url}/_matrix/client/versions"), ) return url @pytest.fixture(scope="session") def matrix_client(matrix_server_url): return MatrixClient(matrix_server_url)
889
35_Even_Wins/python/evenwins.py
serranojl/basic-computer-games
1
2170404
""" This version of evenwins.bas based on game decscription and does *not* follow the source. The computer chooses marbles at random. For simplicity, global variables are used to store the game state. A good exercise would be to replace this with a class. The code is not short, but hopefully it is easy for beginners to understand and modify. Infinite loops of the style "while True:" are used to simplify some of the code. The "continue" keyword is used in a few places to jump back to the top of the loop. The "return" keyword is also used to break out of functions. This is generally considered poor style, but in this case it simplifies the code and makes it easier to read (at least in my opinion). A good exercise would be to remove these infinite loops, and uses of continue, to follow a more structured style. """ from dataclasses import dataclass from typing import Literal, Tuple PlayerType = Literal["human", "computer"] @dataclass class MarbleCounts: middle: int human: int computer: int def print_intro() -> None: print("Welcome to Even Wins!") print("Based on evenwins.bas from Creative Computing") print() print("Even Wins is a two-person game. You start with") print("27 marbles in the middle of the table.") print() print("Players alternate taking marbles from the middle.") print("A player can take 1 to 4 marbles on their turn, and") print("turns cannot be skipped. The game ends when there are") print("no marbles left, and the winner is the one with an even") print("number of marbles.") print() def marbles_str(n: int) -> str: if n == 1: return "1 marble" return f"{n} marbles" def choose_first_player() -> PlayerType: while True: ans = input("Do you want to play first? (y/n) --> ") if ans == "y": return "human" elif ans == "n": return "computer" else: print() print('Please enter "y" if you want to play first,') print('or "n" if you want to play second.') print() def toggle_player(whose_turn: PlayerType) -> PlayerType: if whose_turn == "human": return "computer" else: return "human" def to_int(s: str) -> Tuple[bool, int]: """Convert a string s to an int, if possible.""" try: n = int(s) return True, n except Exception: return False, 0 def print_board(marbles: MarbleCounts) -> None: print() print(f" marbles in the middle: {marbles.middle} " + marbles.middle * "*") print(f" # marbles you have: {marbles.human}") print(f"# marbles computer has: {marbles.computer}") print() def human_turn(marbles: MarbleCounts) -> None: """get number in range 1 to min(4, marbles.middle)""" max_choice = min(4, marbles.middle) print("It's your turn!") while True: s = input(f"Marbles to take? (1 - {max_choice}) --> ") ok, n = to_int(s) if not ok: print(f"\n Please enter a whole number from 1 to {max_choice}\n") continue if n < 1: print("\n You must take at least 1 marble!\n") continue if n > max_choice: print(f"\n You can take at most {marbles_str(max_choice)}\n") continue print(f"\nOkay, taking {marbles_str(n)} ...") marbles.middle -= n marbles.human += n return def game_over(marbles: MarbleCounts) -> None: print() print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") print("!! All the marbles are taken: Game Over!") print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") print() print_board(marbles) if marbles.human % 2 == 0: print("You are the winner! Congratulations!") else: print("The computer wins: all hail mighty silicon!") print() def computer_turn(marbles: MarbleCounts) -> None: marbles_to_take = 0 print("It's the computer's turn ...") r = marbles.middle - 6 * int(marbles.middle / 6) if int(marbles.human / 2) == marbles.human / 2: if r < 1.5 or r > 5.3: marbles_to_take = 1 else: marbles_to_take = r - 1 elif marbles.middle < 4.2: marbles_to_take = marbles.middle elif r > 3.4: if r < 4.7 or r > 3.5: marbles_to_take = 4 else: marbles_to_take = r + 1 print(f"Computer takes {marbles_str(marbles_to_take)} ...") marbles.middle -= marbles_to_take marbles.computer += marbles_to_take def play_game(whose_turn: PlayerType) -> None: marbles = MarbleCounts(middle=27, human=0, computer=0) print_board(marbles) while True: if marbles.middle == 0: game_over(marbles) return elif whose_turn == "human": human_turn(marbles) print_board(marbles) whose_turn = toggle_player(whose_turn) elif whose_turn == "computer": computer_turn(marbles) print_board(marbles) whose_turn = toggle_player(whose_turn) else: raise Exception(f"whose_turn={whose_turn} is not 'human' or 'computer'") def main() -> None: print_intro() while True: whose_turn = choose_first_player() play_game(whose_turn) print() again = input("Would you like to play again? (y/n) --> ").lower() if again == "y": print("\nOk, let's play again ...\n") else: print("\nOk, thanks for playing ... goodbye!\n") return if __name__ == "__main__": main()
5,803
django_eventstream/routing.py
noamkush/django-eventstream
0
2171162
from django.conf.urls import url from . import consumers urlpatterns = [ url(r'^$', consumers.EventsConsumer.as_asgi()), ]
128
src/func.py
youssefhoummad/pdf-tools
3
2170321
import os import io import string import tempfile from PIL import Image, ImageOps from PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger from PyPDF2.generic import FloatObject import fitz try: from .constant import * except: from constant import * def makeSafeFilename(inputFilename): # Set here the valid chars safechars = string.printable try: r = list(filter(lambda c: c in safechars, inputFilename)) return r except: return "" pass def get_number_of_pages(path): with open(path, 'rb') as f: pdf = PdfFileReader(f, strict=False) number_of_pages = pdf.getNumPages() # print(number_of_pages) return number_of_pages def split(pdf_file, start_page, end_page): assert pdf_file != "" try: start_page, end_page = int(start_page) - 1, int(end_page)-1 except: raise "start_page and end_page must be digit" input_pdf = PdfFileReader(open(pdf_file, 'rb'), strict=False) output_pdf = PdfFileWriter() # name it new_name = rename_file(pdf_file, '_splited') # creat blank file new_file = open(new_name, 'wb') # insert pages in blank file while start_page <= end_page: output_pdf.addPage(input_pdf.getPage(start_page)) output_pdf.write(new_file) start_page += 1 # close it new_file.close() settings.setsave('last_file', new_name) # settings.setsave('last_func', SPLITING_MSG) def merge(pdf_file1, pdf_file2): pdf_merger = PdfFileMerger(strict=False) pdf_merger.append(pdf_file1) pdf_merger.append(pdf_file2) new_file = rename_file(pdf_file1, '_merged') with open(new_file, 'wb') as fileobj: pdf_merger.write(fileobj) settings.setsave('last_file', new_file) # settings.setsave('last_func', MERGING_MSG) def crop(pdf_file, top, right, bottom, left): # POINT_MM = 25.4 / 72.0 input_pdf = PdfFileReader(open(pdf_file, 'rb'),strict=False) output_pdf = PdfFileWriter() top, right, bottom, left = int(top), int(right), int(bottom), int(left) new_name = rename_file(pdf_file, '_croped') new_file = open(new_name, 'wb') num_pages = input_pdf.getNumPages() for i in range(num_pages): page = input_pdf.getPage(i) page.mediaBox.upperRight = ( page.mediaBox.getUpperRight_x() - FloatObject(right), page.mediaBox.getUpperRight_y() - FloatObject(top) ) page.mediaBox.lowerLeft = ( page.mediaBox.getLowerLeft_x() + FloatObject(left), page.mediaBox.getLowerLeft_y() + FloatObject(bottom) ) output_pdf.addPage(page) output_pdf.write(new_file) new_file.close() settings.setsave('last_file', new_name) # settings.setsave('last_func', CROPING_MSG) def extract(pdf_file): with open(pdf_file,"rb") as file: file.seek(0) pdf = file.read() path = '/'.join(pdf_file.split('/')[:-1])+'/images_extracted/' try: os.mkdir(path) except: path = '/'.join(pdf_file.split('/')[:-1])+'/' startmark = b"\xff\xd8" startfix = 0 endmark = b"\xff\xd9" endfix = 2 i = 0 njpg = 0 while True: istream = pdf.find(b"stream", i) if istream < 0: break istart = pdf.find(startmark, istream, istream + 20) if istart < 0: i = istream + 20 continue iend = pdf.find(b"endstream", istart) if iend < 0: raise Exception("Didn't find end of stream!") iend = pdf.find(endmark, iend - 20) if iend < 0: raise Exception("Didn't find end of JPG!") istart += startfix iend += endfix # print("JPG %d from %d to %d" % (njpg, istart, iend)) jpg = pdf[istart:iend] with open(path + "jpg%d.jpg" % njpg, "wb") as jpgfile: jpgfile.write(jpg) njpg += 1 i = iend settings.setsave('last_file', path) # settings.setsave('last_func', EXTRACTING_MSG) def to_images(doc, pdf_file, start_page=None, end_page=None): # https://stackoverflow.com/a/55480474 # doc = fitz.open(pdf_file) pages = doc.pageCount path = '/'.join(pdf_file.split('/')[:-1])+'/pdf_to_images/' try: os.mkdir(path) except: path = '/'.join(pdf_file.split('/')[:-1])+'/' zoom_x = 2.0 # horizontal zoom zomm_y = 2.0 # vertical zoom mat = fitz.Matrix(zoom_x, zomm_y) # zoom factor 2 in each dimension if pages == 1: p = doc.loadPage(0) pix = p.getPixmap(matrix = mat) output = f"{path}outfile.png" pix.writePNG(output) return for page in range(0,pages-1): p = doc.loadPage(page) pix = p.getPixmap(matrix = mat) output = f"{path}outfile_{page}.png" pix.writePNG(output) settings.setsave('last_file', path) def to_pdf(*images): listImages = [] for path_img in images: image = Image.open(path_img) image = image.convert('RGB') # fix rotation image = ImageOps.exif_transpose(image) listImages.append(image) new_name = os.path.normpath(images[0][:-4]+'.pdf') listImages[0].save(new_name, save_all=True, append_images=listImages[1:]) settings.setsave('last_file', new_name) # settings.setsave('last_func', TOPDF_MSG) def page_to_image(doc, page=0): p = doc.loadPage(page) pix = p.getPixmap() img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) return img def remove_text_watermark(wm_text, inputFile, outputFile): # https://stackoverflow.com/a/57410205 pass def rename_file(pdf_file, refrain): """ return new name width _splited >>> rename_file(file.pdf, "_splited") -> file_splited.pdf if file_splited.pdf existe -> file_splited_1.pdf .... """ new_name = pdf_file[:-4] + f"{refrain}.pdf" i = 1 while os.path.isfile(new_name): new_name = pdf_file[:-4] + f"{refrain}_{i}.pdf" i +=1 # new_name = makeSafeFilename(new_name) # print(new_name) new_name = os.path.realpath(new_name) return new_name if __name__ == "__main__": p = r"C:\Users\youssef\AppData\Local\Packages\38833FF26BA1D.UnigramPreview_g9c9v27vpyspw\LocalState\0\documents\مقدمات مكثف (4).pdf" # o = r"C:\\Users\\youssef\\Downloads\\3.txt" # d = fitz.open(p) to_txt(p)
6,729
tests/test_file_config.py
stephen-bunn/file-config
7
2168923
# Copyright (c) 2019 <NAME> <<EMAIL>> # ISC License <https://opensource.org/licenses/isc> import enum import typing import attr import pytest import jsonschema from hypothesis import given, settings, HealthCheck from hypothesis.strategies import characters import file_config from .strategies import config, config_var, config_var_dict, class_name FIRST_LEVEL_IMPORTS = ( "__version__", "config", "var", "validate", "to_dict", "from_dict", "build_schema", "make_config", "Regex", "CONFIG_KEY", "handlers", "contrib", ) def test_signature(): for importable in FIRST_LEVEL_IMPORTS: assert hasattr(file_config, importable) @given(config()) def test_config(config): assert callable(config) assert callable(file_config.config(maybe_cls=None)) assert file_config.utils.is_config_type(config) assert file_config.utils.is_config(config()) assert hasattr(config, file_config.CONFIG_KEY) @given(config_var()) def test_config_var(var): assert not callable(var) assert file_config.utils.is_config_var(var) assert file_config.CONFIG_KEY in var.metadata @given(class_name(), config_var_dict(), characters(), characters()) def test_make_config(class_name, config_var_dict, title, description): config = file_config.make_config( class_name, config_var_dict, title=title, description=description ) assert file_config.utils.is_config_type(config) assert file_config.utils.is_config(config()) assert getattr(config, file_config.CONFIG_KEY).get("title") == title assert getattr(config, file_config.CONFIG_KEY).get("description") == description def test_validate(): @file_config.config class ConfigInstance: a = file_config.var(type=str, default="a") config_instance = ConfigInstance() # NOTE: validate returns nothing if nothing is wrong assert not file_config.validate(config_instance) config_instance.a = 1 with pytest.raises(jsonschema.exceptions.ValidationError): file_config.validate(config_instance) @given(config()) def test_from_dict(config): config_dict = file_config.to_dict(config()) assert isinstance(file_config.from_dict(config, config_dict), config) @given(config()) def test_to_dict(config): assert isinstance(file_config.to_dict(config()), dict) @given(config()) def test_reflective(config): config_instance = config() config_dict = file_config.to_dict(config_instance) assert isinstance(config_dict, dict) new_instance = file_config.from_dict(config, config_dict) assert isinstance(new_instance, config) assert config_instance == new_instance @given(class_name()) def test_custom_encoder_decoder(config_name): encoder = lambda x: f"x={x}" decoder = lambda x: x[2:] config = file_config.make_config( config_name, {"test": file_config.var(str, encoder=encoder, decoder=decoder)} ) instance = config(test="test") encoded = file_config.to_dict(instance) assert encoded["test"] == "x=test" decoded = file_config.from_dict(config, encoded) assert decoded.test == "test" @given(class_name()) def test_build_exceptions(config_name): with pytest.raises(ValueError): file_config._file_config._build(None, {}) # test build validation config = file_config.make_config(config_name, {"test": file_config.var(str)}) with pytest.raises(jsonschema.exceptions.ValidationError): file_config._file_config._build(config, {"test": 1}, validate=True) def test_build_nested_array(): @file_config.config class A: @file_config.config class B: bar = file_config.var(str) foo = file_config.var(typing.List[B]) bar = file_config.var(typing.List[str]) # test list of nested configs instance = file_config._file_config._build( A, {"foo": [{"bar": "test"}], "bar": ["test"]} ) assert isinstance(instance, A) assert isinstance(instance.bar[0], str) assert instance.bar[0] == "test" assert isinstance(instance.foo[0], A.B) assert instance.foo[0].bar == "test" def test_build_nested_object(): @file_config.config class A: @file_config.config class B: bar = file_config.var(str) foo = file_config.var(typing.Dict[str, B]) bar = file_config.var(typing.Dict[str, str]) instance = file_config._file_config._build( A, {"foo": {"test": {"bar": "test"}}, "bar": {"test": "test"}} ) assert isinstance(instance, A) assert isinstance(instance.bar, dict) assert instance.bar["test"] == "test" assert isinstance(instance.foo, dict) assert isinstance(instance.foo["test"], A.B) assert instance.foo["test"].bar == "test" @given(class_name()) def test_dump_exceptions(config_name): with pytest.raises(ValueError): file_config._file_config._dump(None) def test_dump_enum(): class TestEnum(enum.Enum): A = 0 B = 1 @file_config.config class A: foo = file_config.var(TestEnum) instance = A(foo=TestEnum.A) dumped = file_config._file_config._dump(instance) assert dumped["foo"] == 0 def test_dump_nested_config(): @file_config.config class A: @file_config.config class B: bar = file_config.var(str) foo = file_config.var(B) instance = A(foo=A.B(bar="test")) dumped = file_config._file_config._dump(instance) assert dumped["foo"]["bar"] == "test"
5,518
prolintpy/utils/metrics.py
ProLint/prolintpy
4
2169660
import numpy as np FUNCS = { 'mean' : lambda c, co, *args : c[c>co].mean(), 'sum' : lambda c, co, *args : c[c>co].sum(), 'max' : lambda c, co, *args : c[c>co].max(), 'lnr' : lambda c, co, t, *args : c[c>co].sum() / (t.time[-1] - t.time[0]), 'nlnr' : lambda c, co, t, cl, *args : c[c>co].sum() / (cl * (t.time[-1] - t.time[0])), 'occ' : lambda c, co, t, *args : 100 * (c.sum() / t.n_frames) } def calculate_metric(p, func, co=0, t=None, norm=False, unit='us', *args): """Calculate default ProLint metrics. The function is designed such that it can be easily extended for other custom metrics. The second argument has to be a callable object, which is called using the following default arguments: contacts, cutoff, MDTraj.Trajectory, number_of_contacts, *args. By default ProLint offers the following metrics: ``mean`` (Mean_Duration) : The average duration of all contacts. ``max`` (Longest_Duration) : The longest duration (averaged if more than 1 protein). ``sum`` (Sum_of_all_Contacts) : The total sum of all contacts. ``lnr`` (Lipid_Number) : The average number of lipids in contact (the total number of contacts normalized with respect to the number of frames). ``nlnr`` (Normalized_Lipid_Number) : Lipid_Number normalized with respect to the number of different lipids (e.g. number of different cholesterols). ``occ`` (Occupancy) : For each frame, we give a value of 0 if no lipid of interest is in contact with the residue and 1 otherwise. Occupancy is then: sum_of_all_1s / nr_of_frames. Parameters ---------- p : Per residue contacts. This is the output of ProLint.retrieve_contacts() call. func : callable object. Custom function or one of the default ones: FUNC[x] where x is one of the following: 'mean', 'sum', 'max', 'lnr', 'nlnr', 'occ'. If you want to call FUNCS['occ'] make sure to specify the argument contact='occupancy' to the retrieve_contacts call first. co : int. Discard contacts shorter than co value. Useful when you want to discard contacts with a short duration t : MDTraj.Trajectory norm : bool, default=False, Normalize contacts with respect to time unit : str Time unit to use for the normalization. Either 'us' or 'ns'. *args : additional arguments. These arguments are passed on to the callable object: func. Returns ------- contacts : dict Dictionary where the keys are lipids and values are tuples containing the mean and standard deviation of the calcualted metric. """ per_replicate = {} for lipid, v in p.items(): r = [func(c, co, t, len(v), *args) if len(c[c>co]) > 0 else 0 for c in v] if norm: r = [x / 1000. if unit == 'ns' else x / 1000000. for x in r] per_replicate[lipid] = (np.mean(r), np.std(r)) return per_replicate def residence_time(c, ln, time, delta_t_range=None, range_type='mixed', step=None): """Calculate Residence Time. See the following sources for a detailed explanation: https://pubs.acs.org/doi/abs/10.1021/ja310577u and https://www.pnas.org/content/117/14/7803 This metric is currently not calcualted by default. It needs more testing. Its output is also more difficult to control and predict. Parameters ---------- c : contacts This is the output of retrieve_contacts_flat, or it can be any flat numpy array. ln : int The number of the lipid of interest (i.e. the number of residues) time : float The total trajectory time. delta_t_range : list The range of the delta_t parameter. If None, it is calcculated. range_type : str Residence time can be sped up by supplying a default range. You can specify either a geometric, linear, or mixed range ('geo', 'mixed', 'linear'). Fastest and default is 'geo'. Only used if delta_t_range=None. step : int Custom step size for delta_t_range. Not implemented. """ from scipy.optimize import curve_fit if delta_t_range is None: if range_type == 'mixed': delta_t_range = list(np.arange(5,100,1)) + list(np.arange(100,1000,5)) + list(np.arange(1000,time,10)) elif range_type == 'geo': delta_t_range = list(np.geomspace(0.01,time,1000)) elif range_type == 'linear': delta_t_range = list(np.arange(5,time,1)) sigma = {0: 1} sigma0 = sum([r for r in c]) / (float(time) * ln) for delta_t in delta_t_range: denominator = (float(time) - delta_t) * ln * sigma0 if denominator != 0: sigma[delta_t] = sum([r - delta_t for r in c if r >= delta_t]) / denominator else: sigma[delta_t] = 0 try: popt, pcov = curve_fit(lambda t,a,b,c,d:c*np.exp(-a*t)+d*np.exp(-b*t), list(sigma.keys()), list(sigma.values()), (1, 0.1, 1, 1)) except RuntimeError: return (0, 0) return (1 / min([abs(k) for k in popt[:2]]), pcov)
5,332
meiduo_mall/celery_tasks/main.py
liuyinsi111/ck
0
2171209
from celery import Celery import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'meiduo_mall.settings.dev') celery_app = Celery('meiduo') celery_app.config_from_object('celery_tasks.config') celery_app.autodiscover_tasks(['celery_tasks.sms', 'celery_tasks.email'])
268
pandasuse.py
Varanasi-Software-Junction/Python-repository-for-basics
2
2170586
import pandas as pd data = pd.read_csv("pandas.csv") print(data) """ datarun=data["run"] print(datarun) datarun=data.run print(datarun) max=datarun.max() print(max) print(datarun.median()) print(type(data)) print(data) datanames=data["name"] print(datanames) print(type(datanames)) print(data.describe()) print(datanames.describe()) #print(data) #print(type(data)) datadict=data.to_dict() #print(datadict) datarun=data["run"] print(datarun) print(datarun.median()) #print(data.run) print(type(data.run)) datarun=data[data.run==23] print(datarun) datarun=data[data.name == "GH"] print(datarun) datarun=data[data.run== data.run.min()] print(datarun) print(data.describe()) """
676
aiof/data/property.py
gkama/aiof-metadata
3
2170510
import datetime from pydantic import BaseModel from typing import Optional class MortgageCalculatorRequest(BaseModel): propertyValue: Optional[float] downPayment: Optional[float] interestRate: Optional[float] loanTermYears: Optional[int] startDate: Optional[datetime.datetime] pmi: Optional[float] propertyInsurance: Optional[float] monthlyHoa: Optional[float]
395
rf2db/db/RF1CanonicalCore.py
cts2/rf2db
0
2171152
# -*- coding: utf-8 -*- # Copyright (c) 2014, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # Neither the name of the <ORGANIZATION> nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED # OF THE POSSIBILITY OF SUCH DAMAGE. import os from rf2db.db.RF2FileCommon import RF2FileWrapper class CanonicalCoreDB(RF2FileWrapper): directory = os.path.join('OtherResources', 'Canonical Table') prefixes = ['res1_Canonical_Core_'] table = 'canonical_core' isRF1File = True createSTMT = """CREATE TABLE IF NOT EXISTS %(table)s ( conceptid1 bigint(20) NOT NULL, relationshiptype bigint(20) NOT NULL, conceptid2 bigint(20) NOT NULL, relationshipgroup int(11) NOT NULL, KEY conceptid1 (conceptid1) USING HASH);""" def __init__(self, *args, **kwargs): RF2FileWrapper.__init__(self, *args, **kwargs) def loadTable(self, rf2file): from rf2db.db.RF2RelationshipFile import RelationshipDB from rf2db.db.RF2StatedRelationshipFile import StatedRelationshipDB rdb = RelationshipDB() srdb = StatedRelationshipDB() if not rdb.hascontent() or not srdb.hascontent(): print(("Relationship databases must be loaded before loading %s" % self._fname)) return super(CanonicalCoreDB, self).loadTable(rf2file) print("Updating Stated Relationship File") srdb.updateFromCanonical(self._fname) print("Updating Relationship File") rdb.updateFromCanonical(self._fname)
2,856
ryu/tests/inception_arp/common.py
att/ryu
0
2168677
#!/usr/bin/env python import subprocess import threading import traceback def exec_cmd(cmd): print(('-' * 15 + ' %s ') % cmd) proc = subprocess.Popen(['/bin/bash', '-c', cmd]) proc.communicate() def process_in_parallel(cmds, sliding_window=100): try: # execute each command start = 0 end = sliding_window while True: if start == len(cmds): break end = len(cmds) if end > len(cmds) else end threads = [] for cmd in cmds[start:end]: thread = threading.Thread(target=exec_cmd, args=(cmd,)) thread.start() threads.append(thread) print 'batch processing %s to %s' % (start, end) for thread in threads: thread.join() start = end end += sliding_window except Exception: print(traceback.format_exc())
944
nestedtreedictionary.py
RetepRennelk/arborg
0
2171331
from collections import defaultdict, deque from node_uuid import Node import json '''The godId is a dummy identifier to keep the root node itself in the dictionary. ''' class NestedDictionaryTree(): def __init__(self, godId=None, root=None, dd=None): self.dd = dd self.root = root self.godId = godId # Once the NestedDictionaryTree has been instantiated, # the godId shall remain unchanged through repeated savings. if godId is None: godNode = Node() self.godId = godNode.getId() def initialise(self, columnNames): self.root = Node(None, columnNames) self.dd = defaultdict(list) self.dd[self.godId].append(self.root.toList()) def createNDTfromTree(self, root): '''Based on the root-node a default-dict is created which can be saved to disk and later on be loaded. ''' dd = defaultdict(list) dd[self.godId].append(root.toList()) qu = deque([root]) while len(qu) > 0: parent = qu.popleft() for child in parent.getChildren(): dd[parent.getId()].append(child.toList()) qu.append(child) return root, dd def updateNDTfromTree(self): root, self.dd = self.createNDTfromTree(self.root) @staticmethod def createTreeFromFile(filename): dd, godId = NestedDictionaryTree.readDD2File(filename) root = Node(None, dd[godId][0][1], dd[godId][0][0]) qu = deque([root]) while len(qu) > 0: parent = qu.popleft() for element in dd[parent.getId()]: newNode = Node(parent, element[1], element[0]) qu.append(newNode) return NestedDictionaryTree(godId, root, dd) def writeDD2File(self, filename): with open(filename, 'w') as f: f.write(self.godId + '\n') json.dump(self.dd, f) @staticmethod def readDD2File(filename): with open(filename, 'r') as f: godId = f.readline().strip() dd = json.load(f) return defaultdict(list, dd), godId def getRoot(self): return self.root def columnCount(self): if self.dd is None: return 0 else: return len(self.dd[self.godId][0][1]) def setHeaderData(self, section, value): if self.root.content[section] != value: self.root.content[section] = value self.dd[self.godId][0][1][section] = value return True return False def getEmptyAndValidNode(self): '''The item identfier is set. Parent and content are not set and must be provided externally.''' columnCount = self.columnCount() return Node(None, ['???']*columnCount) def insertSiblingAbove(self, node, siblingNode=None): parent = node.getParent() if parent is None: parent = self.getRoot() if siblingNode is None: siblingNode = self.getEmptyAndValidNode() siblingNode.parent = parent row = 0 if parent.childrenCount() > 0: row = parent.children.index(node) parent.children.insert(row, siblingNode) return siblingNode def insertSiblingBelow(self, node, siblingNode=None): parent = node.getParent() if parent is None: parent = self.getRoot() if siblingNode is None: siblingNode = self.getEmptyAndValidNode() siblingNode.parent = parent row = -1 if parent.childrenCount() > 0: row = parent.children.index(node) parent.children.insert(row+1, siblingNode) return siblingNode def insertChildBelow(self, item, childNode=None): if childNode is None: childNode = self.getEmptyAndValidNode() childNode.parent = item item.children.insert(0, childNode) return childNode if __name__ == '__main__': root = Node(None, ['column', 'comment']) parent1 = Node(root, ['AAA', 'aaa']) parent2 = Node(root, ['BBB', 'bbb']) child1 = Node(parent1, ['CCC', 'ccc']) ndt = NestedDictionaryTree() ndt.createNDTfromTree(root) filename = 'tmp.ndt' ndt.writeDD2File(filename) ndt2 = NestedDictionaryTree.createTreeFromFile(filename)
4,393
MultiFilesReader.py
zeibou/pyHiLightExtractor
2
2170997
from dataclasses import dataclass from datetime import timedelta import os from pyHiLightExtractor import HiLightFinder from moviepy.editor import VideoFileClip @dataclass class HiLightDescriptor: name: str local_time: timedelta global_time: timedelta @dataclass class VideoDescriptor: name : str path: str previous_name : str total_time: timedelta def get_all_hilights(folder, startswith='G', endswith='.mp4'): global_time = timedelta(0) for file_name in sorted(os.listdir(folder)): if file_name.lower().startswith(startswith.lower()) and file_name.lower().endswith(endswith.lower()): full_path = os.path.join(folder, file_name) duration = VideoFileClip(full_path).duration hilights = HiLightFinder.find_hilights(full_path) for h in hilights: local_time = timedelta(milliseconds=h) yield HiLightDescriptor(file_name, local_time, local_time + global_time) global_time += timedelta(seconds=duration) def get_all_videos(folder, startswith='G', endswith='.mp4'): previous_name = None for file_name in sorted(os.listdir(folder)): if file_name.lower().startswith(startswith.lower()) and file_name.lower().endswith(endswith.lower()): full_path = os.path.join(folder, file_name) duration = VideoFileClip(full_path).duration yield VideoDescriptor(file_name, full_path, previous_name, duration) previous_name = file_name if __name__ == '__main__': path = "/Users/nicolas.seibert/Documents/foot/2019-01-14/" for d in get_all_videos(path, endswith='.MP4'): print(d) for d in get_all_hilights(path, endswith='.MP4'): print(d.name, d.local_time, d.global_time, d.local_time.total_seconds())
1,814
chatbotenv/lib/python2.7/site-packages/chatterbot_corpus/__init__.py
MavenCode/chatbotApp
0
2169337
""" A machine readable multilingual dialog corpus. """ from .corpus import Corpus __version__ = '1.0.1' __author__ = '<NAME>' __email__ = '<EMAIL>' __url__ = 'https://github.com/gunthercox/chatterbot-corpus' __all__ = ( 'Corpus', )
251
easytext/label_decoder/label_index_decoder/sequence_max_label_index_decoder.py
cuilunan/easytext
1
2170200
#!/usr/bin/env python 3 # -*- coding: utf-8 -*- # # Copyright (c) 2020 PanXu, Inc. All Rights Reserved # """ 通过取到最大值的 sequence label 解码 Authors: PanXu Date: 2020/07/05 11:28:00 """ from typing import Tuple import torch from easytext.utils import bio as BIO from easytext.data import LabelVocabulary from .label_index_decoder import LabelIndexDecoder class SequenceMaxLabelIndexDecoder(LabelIndexDecoder): """ 对于 sequence logits, shape: (batch_size, seq_len, num_label), 使用 max 进行 在每一个 timestep 上进行 decode, 得到 label index. """ def __init__(self, label_vocabulary: LabelVocabulary): """ 初始化 :param label_vocabulary: label 词汇表 """ self._label_vocabulary = label_vocabulary def __call__(self, logits: torch.Tensor, mask: torch.ByteTensor) -> torch.LongTensor: """ 对于 sequence logits, shape: (batch_size, seq_len, num_label), 使用 max 进行 在每一个 timestep 上进行 decode, 得到 label index. :param logits: shape: (batch_size, seq_len, num_label) :param mask: shape: (bath_size, seq_len), 存储的是 0 或 1 :return: 解码后的 label index, shape: (batch_size, seq_len), 注意这是有padding_index 的结果, 需要使用 mask 来提取实际的 label index. """ if logits.dim() != 3: raise RuntimeError(f"logits shape 错误, 应该是 (B, seq_len, num_label), " f"而现在是 {logits.shape}") if (mask is not None) and (mask.dim() != 2): raise RuntimeError(f"mask shape 错误, 应该是 (B, seq_len), " f"而现在是 {mask.shape}") batch = logits.size(0) max_sequence_length = logits.size(1) # mask shape: (B, seq_len) if mask is None: mask = torch.ones(size=(logits.shape[0], logits.shape[1]), dtype=torch.long) sequence_length = mask.sum(dim=-1).tolist() batch_indices = list() for i in range(batch): sequence_labels, sequence_label_indices = BIO.decode_one_sequence_logits_to_label( sequence_logits=logits[i, :sequence_length[i]], vocabulary=self._label_vocabulary) padding_indices = [self._label_vocabulary.padding_index] * (max_sequence_length - sequence_length[i]) sequence_label_indices.extend(padding_indices) batch_indices.append(sequence_label_indices) batch_indices = torch.tensor(batch_indices, dtype=torch.long) return batch_indices
2,491
src/minespex/base/spectra.py
aszorn/minespex
1
2170283
""" XPS spectra are collected from numerous individual measurements (spectrum). """ import numpy as np from abc import ABC, abstractmethod from collections.abc import Hashable from copy import deepcopy class Spectra(ABC): """Abstract base class for spectra objects. """ class Dimension(Hashable): def __init__(self, axis, name, scale): """Information about a dimension in the Spectra. Spectra contain information in dense blocks. Information about the dimension is capture in this helper class. Attributes: axis (int): (Immutable) Accessed through a property. name (str): (Immutable) Name of this property. size (int): (Immutable) Length (number of entries) in this dimension. scale (tuple): (Immutable) Scale along this dimension. This will generally be a tuple of floats. Args: axis (int): The axis index. Whether 0- or 1-indexed will depend on the source data. name (str): Name for this dimension/axis. scale (array-like): Values/labels of this dimension. For example, the binding energies at each step. """ super().__init__() self.__axis = int(axis) self.__name = str(name) self.__scale = tuple(scale) def __hash__(self): return hash(self.axis) + hash(self.name) + hash(self.scale) def __str__(self): return f"Dimension {self.axis} ({self.name}): {self.scale}" @property def axis(self): return self.__axis @property def name(self): return self.__name @property def size(self): return len(self.__scale) @property def scale(self): return self.__scale def __init__(self, name=''): """Stores XPS data in a numpy.ndarray `Spectra.data`. Information about the dimensions--name, scale, and which axis--is accessible by name or index. Attributes: name (str): Name of this spectra. attributes (dict): Named attributes; the metadata associated with this spectra. data (numpy.ndarray): 2- or 3-D spectral data, e.g. intensities. Information about the dimensions (scale, name, etc.) is provided in the `dim` attribute. dim (Spectra.Dimension): Information about each dimension of the spectra. Args: name (str): Name of the Spectra. """ self.name = name self.attributes = dict() self.dim = dict() self.data = np.array([]) def get_dim(self, key): """Get the dimension corresponding to `key`. Args: key (int, str, tuple, or Spectra.Dimension): Key identifying the axis. Returns: Spectra.Dimension: The dimension information. """ return self.dim.get(key, None) def set_dim(self, axis, name=None, scale=None): """Set dimension information. Set dimension information for the specified axis. Args: axis (int or Spectra.Dimension): The axis that is to be added. name (str): (optional) The name of the dimension to be added. scale (tuple): (optional) The scale/labels of the axis. Returns: None. """ if isinstance(axis, Spectra.Dimension): dim = axis axis, name, scale = axis.axis, axis.name, axis.scale else: dim = Spectra.Dimension(axis=axis, name=name, scale=scale) self.rm_dim(axis) self.dim[dim] = dim self.dim[name] = dim self.dim[axis] = dim self.dim[scale] = dim def rm_dim(self, key): """Remove all references to the dimension identified by `key`. Args: key (int, str, tuple, or Spectra.Dimension): The axis to be removed. Returns: None. """ dim = self.get_dim(key) if dim: axis, name, scale = dim.axis, dim.name, dim.scale if dim in self.dim: del self.dim[dim] if axis in self.dim: del self.dim[axis] if name in self.dim: del self.dim[name] if scale in self.dim: del self.dim[scale] @abstractmethod def axis(self, key): """Returns the axis of the dimension identified by `key`. While the data may be 1-indexed in the file, the return value from this function will be adjusted to be 0-indexed; that is, `Spectra.axis(1)` will return 0. Args: key (int, str, or Spectra.Dimension): Representation of the dimension whose axis is to be returned. Returns: int: Python-indexable axis of the requested axis, or None if not found. This method must be overloaded based on whether the source data is one-indexed or zero-indexed. """ raise NotImplementedError("Axis depends on indexing convention of " "input file.") def name(self, key): """Returns the name of the dimension identified by `key`. Args: key (int, str, Spectra.Dimension): Representation of the dimension whose name is to be returned. Returns: str: Name of the requested axis, or None if not found. """ try: return self.dim[key].name except KeyError: return None def scale(self, key): """Returns the scale of the dimension identified by `key`. Args: key (int, str, or Spectra.Dimension): Representation of the dimension whose scale is to be returned. Returns: numpy.ndarray: Scale of the requested axis, or None if not found. """ try: return np.array(self.dim[key].scale) except KeyError: return None def size(self, key): """Returns the size (length) of the dimension identified by `key`. Args: key (int, str, or Spectra.Dimension): Representation of the dimension whose length is to be returned. Returns: int: Length along the requested axis. """ try: return self.dim[key].size except KeyError: return None def integrate_along(self, key): """Integrates the spectra along the dimension identified by `key`. Args: key (int, str, or Spectra.Dimension): Representation of the dimension whose length is to be returned. Returns: Spectra: Spectra integrated along the specified axis, or the unchanged spectra if the axis is not found. """ if key not in self.dim: return self # return value rval = type(self)(self.name) rval.attributes = deepcopy(self.attributes) # calculate the integrated spectra axis, scale = self.axis(key), self.scale(key) rval.data = np.trapz(self.data, x=scale, axis=axis) rval.data /= np.sum(scale[1:] - scale[:-1]) # populate the dimension information. rmdim = self.get_dim(key) for dim in set(self.dim.values()): # drop the dimension that was reduced if dim is rmdim: continue # get the axis, name and scle of the dimension axis, name, scale = dim.axis, dim.name, dim.scale if axis > rmdim.axis: # the dimensionality has been reduced by 1. axis -= 1 rval.set_dim(axis=axis, name=name, scale=scale) # done return rval class Scienta(Spectra): def axis(self, key): try: return self.dim[key].axis-1 except KeyError: return None axis.__doc__ = Spectra.axis.__doc__
8,328
Scripts/Core_Scripts/Setup_Extract_PDF_Data.py
ystol/PDF-Tracker
1
2171182
import pandas as pd from Core_Scripts.Custom_Functions.Functions_General import append_df_to_excel import Core_Scripts.Custom_Functions.Functions_General as extractFunctions import Core_Scripts.Custom_Functions.Functions_Custom_Functions as processPDFdata import os from tkinter import Tk from tkinter.filedialog import askdirectory import csv # so that output doesnt get cut short when displaying, below line will widen what the print window shows pd.options.display.width = None def script_extract_pdf_data(pdfFolder, ask_target_directory=False): if ask_target_directory: Tk().withdraw() # dont want the full GUI print('Choose folder where to analyze PDFs from.') pdfFolder = askdirectory() # have to change the working directory to the target so that 'open' function works (if the URL ver isnt being used) # note where the original directory its running from is starting_wd = os.getcwd() os.chdir(pdfFolder) print('Directory location: ' + pdfFolder) fileset = extractFunctions.get_files_from_directory(pdfFolder) # get the list of applicable pdfs from file list to run this on, to find only pdfs, use an if # condition in the list composition to identify a pdf file datafiles = [file for file in fileset if file[-4:] == '.pdf'] author_column = '/T' stringlist = ['/Subtype', '/CreationDate', '/Subj', '/Contents', author_column] date_attributes = ['/CreationDate', '/ModDate'] # extract all markup data and pdf general data for each pdf file and add onto a dataframe for storing markupdata = pd.DataFrame() pdfdata = pd.DataFrame() for filename in datafiles: print('Extract PDF Data: Processing ' + filename) markup_data_df = processPDFdata.extract_from_pdf(filename, stringlist, ignore_string='/Link', remove=True) pdf_data_df = processPDFdata.get_pdf_date_data(filename, date_attributes) markupdata = pd.concat([markupdata, markup_data_df], ignore_index=True) markupdata[author_column] = [s.replace('.', ' ') for s in markupdata[author_column]] pdfdata = pd.concat([pdfdata, pdf_data_df], ignore_index=True) # return to the directory started from (to reset if needs to run in succession) os.chdir(starting_wd) return markupdata, pdfdata # run this if the script is ran standalone if __name__ == '__main__': configName = '../Local_Config.csv' # get the config file with open(configName) as config_file: reader = csv.reader(config_file) config = dict(reader) maindirectory = config['Folderpath'] markupdata, pdfdata = script_extract_pdf_data(maindirectory, ask_target_directory=False) db_name = config['DB_Filename'] xlPath = config['DB_Filepath'] # saving to excel (eventually should be more efficient database type) # need to set directory to save the excel to as well before saving sheetname = 'MarkupRawData' datasheetname = 'PDF_Data' # use path from config file (should be setup to point to main database) os.chdir(xlPath) print('Extract PDF Data: Saving Data') append_df_to_excel(db_name, markupdata, sheetname, startrow=0, truncate_sheet=True) append_df_to_excel(db_name, pdfdata, datasheetname, startrow=0, truncate_sheet=True) print('Extract PDF Data: Save Complete')
3,332
replace_vowels.py
pythonism/sums.py
3
2170430
#!/usr/bin/env python3 def replaceStr(str,c,c2): i = 0 d = 0 n_str = "" while i < len(str): if str[i] == c[d]: n_str += c2 else: n_str += str[i] i+=1 return n_str def reStr(str,c,c2): i = 0 strs = str while i < len(c): strs = replaceStr(strs,c[i],"") i+=1 return strs Str = "<NAME>" p = reStr(Str,"eou","") print(p)
387
HOCRParser/Utils.py
KarimTarabishy/TMIXT
8
2171323
import math import numpy as np from scipy import spatial from sklearn.metrics.pairwise import cosine_similarity def cosine_similarity_custom( x, y): print(' x=', len(x), ' y=', len(y)) if len(x) != 600: # for item in x,y: # y[item]=y[item].encode("utf-8") # x[item]=x[item].encode("utf-8") print(x, ' ', y) numerator = sum(a * b for a, b in zip(x, y)) denominator = square_rooted(x) * square_rooted(y) return numerator / float(denominator) def square_rooted( x): return round(math.sqrt(sum([a * a for a in x])), 3) def cosine_similarity_sklearn(x, y): """ :param x: :param y: :return: """ result = cosine_similarity(np.array(x).reshape(1, -1), np.array(y).reshape(1, -1)) return result[0][0] def cosine_similarity_scipy( x, y): result = 1 - spatial.distance.cosine(x, y) return result def softmax(x): e = np.exp(x - np.max(x)) s = np.sum(e) return e / s
982
postman/urls.py
tuxis/django-postman
0
2171220
""" If the default usage of the views suits you, simply use a line like this one in your root URLconf to set up the default URLs:: (r'^messages/', include('postman.urls')), Otherwise you may customize the behavior by passing extra parameters. Recipients Max -------------- Views supporting the parameter are: ``write``, ``reply``. Example:: ..., {'max': 3}, name='postman_write'), See also the ``POSTMAN_DISALLOW_MULTIRECIPIENTS`` setting User filter ----------- Views supporting a user filter are: ``write``, ``reply``. Example:: def my_user_filter(user): if user.get_profile().is_absent: return "is away" return None ... ..., {'user_filter': my_user_filter}, name='postman_write'), function interface: In: a User instance Out: None, False, '', 'a reason', or ValidationError Exchange filter --------------- Views supporting an exchange filter are: ``write``, ``reply``. Example:: def my_exchange_filter(sender, recipient, recipients_list): if recipient.relationships.exists(sender, RelationshipStatus.objects.blocking()): return "has blacklisted you" return None ... ..., {'exchange_filter': my_exchange_filter}, name='postman_write'), function interface: In: ``sender``: a User instance ``recipient``: a User instance ``recipients_list``: the full list of recipients Out: None, False, '', 'a reason', or ValidationError Auto-complete field ------------------- Views supporting an auto-complete parameter are: ``write``, ``reply``. Examples:: ..., {'autocomplete_channels': (None,'anonymous_ac')}, name='postman_write'), ..., {'autocomplete_channels': 'write_ac'}, name='postman_write'), ..., {'autocomplete_channel': 'reply_ac'}, name='postman_reply'), Auto moderators --------------- Views supporting an ``auto-moderators`` parameter are: ``write``, ``reply``. Example:: def mod1(message): # ... return None def mod2(message): # ... return None mod2.default_reason = 'mod2 default reason' ... ..., {'auto_moderators': (mod1, mod2)}, name='postman_write'), ..., {'auto_moderators': mod1}, name='postman_reply'), function interface: In: ``message``: a Message instance Out: rating or (rating, "reason") with reting: None, 0 or False, 100 or True, 1..99 Others ------ Refer to documentation. ..., {'form_classes': (MyCustomWriteForm, MyCustomAnonymousWriteForm)}, name='postman_write'), ..., {'form_class': MyCustomFullReplyForm}, name='postman_reply'), ..., {'form_class': MyCustomQuickReplyForm}, name='postman_view'), ..., {'template_name': 'my_custom_view.html'}, name='postman_view'), ..., {'success_url': 'postman_inbox'}, name='postman_reply'), ..., {'formatters': (format_subject,format_body)}, name='postman_reply'), ..., {'formatters': (format_subject,format_body)}, name='postman_view'), """ try: from django.conf.urls import patterns, include, url # django 1.4 except ImportError: from django.conf.urls.defaults import patterns, include, url # django 1.3 from django.views.generic.simple import redirect_to OPTION_MESSAGES = 'm' OPTIONS = OPTION_MESSAGES urlpatterns = patterns('postman.views', url(r'^inbox/(?:(?P<option>'+OPTIONS+')/)?$', 'inbox', name='postman_inbox'), url(r'^sent/(?:(?P<option>'+OPTIONS+')/)?$', 'sent', name='postman_sent'), url(r'^archives/(?:(?P<option>'+OPTIONS+')/)?$', 'archives', name='postman_archives'), url(r'^trash/(?:(?P<option>'+OPTIONS+')/)?$', 'trash', name='postman_trash'), url(r'^write/(?:(?P<recipients>[\w.@+-:]+)/)?$', 'write', name='postman_write'), url(r'^reply/(?P<message_id>[\d]+)/$', 'reply', name='postman_reply'), url(r'^view/(?P<message_id>[\d]+)/$', 'view', name='postman_view'), url(r'^view/t/(?P<thread_id>[\d]+)/$', 'view_conversation', name='postman_view_conversation'), url(r'^markasunread/$', 'mark_as_unread', name='postman_markasunread'), url(r'^markasread/$', 'mark_as_read', name='postman_markasread'), url(r'^archive/$', 'archive', name='postman_archive'), url(r'^delete/$', 'delete', name='postman_delete'), url(r'^undelete/$', 'undelete', name='postman_undelete'), (r'^$', redirect_to, {'url': 'inbox/'}), )
4,267
iko.py
pollydrag/iko
0
2171078
import asyncio from typing import Dict from typing import Type OPTIONAL = object() class Field: def __init__( self, default=OPTIONAL, dump_to=None, load_from=None, outer_name=None, ): if outer_name is not None: assert dump_to is None and load_from is None self.default = default self.dump_to = dump_to or outer_name self.load_from = load_from or outer_name async def dump(self, data, attr, context): value = data.get(attr, self.default) if value == OPTIONAL: return value return await self.post_dump(value, context) async def post_dump(self, value, context): return value async def load(self, data, attr, context): value = data.get(attr, self.default) if value == OPTIONAL: return value return await self.post_load(value, context) async def post_load(self, value, context): return value class Nested(Field): def __init__( self, schema: Type['Schema'], default=OPTIONAL, dump_to=None, load_from=None, outer_name=None, ): self.schema = schema super().__init__(default, dump_to, load_from, outer_name) async def post_dump(self, value, context): return await self.schema.dump(value, context=context) async def post_load(self, value, context): return await self.schema.load(value, context=context) class List(Field): def __init__( self, schema: Type['Schema'] = None, default=OPTIONAL, dump_to=None, load_from=None, outer_name=None, ): self.schema = schema super().__init__(default, dump_to, load_from, outer_name) async def post_dump(self, value, context): return [ ( await self.schema.dump(item, context=context) if self.schema else item ) for item in value ] async def post_load(self, value, context): return [ ( await self.schema.load(item, context=context) if self.schema else item ) for item in value ] class SchemaMeta(type): def __new__(mcs, name, bases, attrs): fields = {} for base in bases: if issubclass(base, Schema): fields.update(base.__fields__) fields.update( { name: field for name, field in attrs.items() if isinstance(field, Field) }, ) attrs['__fields__'] = fields return super().__new__(mcs, name, bases, attrs) class Schema(metaclass=SchemaMeta): __fields__: Dict[str, Field] @classmethod async def dump(cls, data, *, context=None): if context is None: context = {} values = await asyncio.gather( *[ field.dump(data, attr, context) for attr, field in cls.__fields__.items() ], ) attrs = [ field.dump_to or attr for attr, field in cls.__fields__.items() ] return { attr: value for attr, value in zip(attrs, values) if value != OPTIONAL } @classmethod def dump_many(cls, items, *, context=None): if context is None: context = {} return asyncio.gather( *[cls.dump(item, context=context) for item in items], ) @classmethod async def load(cls, data, *, context=None): if context is None: context = {} values = await asyncio.gather( *[ field.load( data, field.load_from if field.load_from else attr, context, ) for attr, field in cls.__fields__.items() ], ) return { attr: value for attr, value in zip(cls.__fields__, values) if value != OPTIONAL } @classmethod def load_many(cls, items, *, context=None): if context is None: context = {} return asyncio.gather( *[cls.load(item, context=context) for item in items], )
4,469
database/models.py
12urenloop/Ronny-the-station-chef
0
2171347
from sqlalchemy import Column, Integer, Float, CHAR from sqlalchemy.types import DateTime, TIMESTAMP from .database import Base class Detection(Base): __tablename__ = "detection" id = Column(Integer, primary_key=True, index=True) detection_time = Column(TIMESTAMP, nullable=False) mac = Column(CHAR(6 * 2 + 5), nullable=False) rssi = Column(Integer, nullable=False) baton_uptime_ms = Column(Integer, nullable=False) battery_percentage = Column(Float, nullable=False) def __repr__(self): return f'<Detection {self.id} at {self.detection_time}, {self.mac} {self.rssi} {self.battery_percentage:.1f}%>'
643
Relue/Eu100.py
jialing3/corner_cases
1
2170799
# total * (total - 1) == 2 * blue * (blue - 1) # total or total - 1 needs to be divisible by 4 total = 10 ** 12 blue = 707106781186 tmp = total * (total - 1) - 2 * blue * (blue - 1) while tmp != 0: #print(tmp) if tmp > 0: blue += 1 tmp -= 4 * (blue - 1) else: total += 1 tmp += 2 * (total - 1) if total % 4 not in (0, 1): total += 1 tmp += 2 * (total - 1) print(blue, total)
453
pychk/file_management.py
ayushpriya10/pychk
1
2169455
import sys def read_from_requirement(path='requirements.txt'): requirements = list() if path == '.': path = 'requirements.txt' try: with open(path) as requirements_file: content = requirements_file.read().split('\n') requirements = [dependency.split('==') for dependency in content] while [''] in requirements: requirements.remove(['']) copy_of_requirements = tuple(requirements) for dependency in copy_of_requirements: if len(dependency) == 1: if '>=' in dependency[0] or '<=' in dependency[0]: print(f'[MSG] Please specify exact version to check by replacing \'<=\' or \'>=\' with \'==\' for {dependency}.') requirements.remove(dependency) continue if '#' in dependency[0]: if '#' == dependency[0][0]: print(f'[MSG] Skipping commented dependency "{dependency}"') requirements.remove(dependency) continue else: dependency = [dependency[0][:dependency[0].index('#')].strip()] requirements[requirements.index(dependency)] += [''] if len(dependency) == 2: if '#' in dependency[0]: if dependency[0][0] == '#': print(f'[MSG] Skipping commented dependency "{dependency}"') requirements.remove(dependency) continue if '#' in dependency[1]: version = dependency[1].split('#')[0].strip() requirements.remove(dependency) dependency = [dependency[0], version] requirements += [dependency] return requirements except FileNotFoundError: print(f'[ERR] Could not open "{path}". Please check the path and try again.') sys.exit(1) except: print('[ERR] An error occurred while opening requirements file.') print(requirements) sys.exit(1) if __name__ == "__main__": requirements = read_from_requirement() print(requirements)
2,377
demo/demo-simple.py
amisadmin/fastapi_amis_admin
166
2170549
from fastapi import FastAPI from fastapi_amis_admin.admin.settings import Settings from fastapi_amis_admin.admin.site import AdminSite # 创建FastAPI应用 app = FastAPI() # 创建AdminSite实例 site = AdminSite(settings=Settings(database_url_async='sqlite+aiosqlite:///amisadmin.db')) # 挂载后台管理系统 site.mount_app(app) if __name__ == '__main__': import uvicorn uvicorn.run(app, debug=True)
388
ting/core_module/search_analytics/sentiment.py
Stank7/Ting
1
2171042
import pandas as pd import numpy as np import nltk from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer import spacy nlp = spacy.load("en_core_web_sm") #python -m spacy download en_core_web_sm import collections class OpinionMiner: #Input : array list of tweets def __init__(self, sentiment_thresh = None, ): self.sentiment_thresh =None self.analyser = SentimentIntensityAnalyzer() self.pos = set() self.neg = set() self.h = collections.defaultdict(lambda: [0, 0]) def get_pos_neg(self,tweets): #Covid tweet detector pos = set() neg = set() for tweet in tweets: doc = nlp(tweet) doc = [x.lemma_ for x in doc] doc = nlp(' '.join(doc)) for token in doc: if token.pos_ == 'ADJ' and len(token) >= 2 and not token.is_stop and not token.text[0].isupper(): a = self.analyser.polarity_scores(tweet) ##Add phrase matcher/ bigram/ trigram if a > 0: self.pos.add(token.text) elif a <= 0: self.neg.add(token.text) def feature_sentiment(self, sentence, pos, neg): ''' input: dictionary and sentence function: appends dictionary with new features if the feature did not exist previously,then updates sentiment to each of the new or existing features output: updated dictionary ''' sent_dict = dict() sentence = nlp(sentence) opinion_words = neg + pos debug = 0 for token in sentence: # print(token.text) # check if the word is an opinion word, then assign sentiment if token.text in opinion_words: sentiment = 1 if token.text in pos else -1 # sentiment = TextBlob(token.text).sentiment.polarity # if target is an adverb modifier (i.e. pretty, highly, etc.) # but happens to be an opinion word, ignore and pass # if token.pos_ not in ['NOUN']: # continue if (token.dep_ == "advmod"): continue elif (token.dep_ == "amod"): sent_dict[token.head.text] = sentiment # for opinion words that are adjectives, adverbs, verbs... else: for child in token.children: # if there's a adj modifier (i.e. very, pretty, etc.) add more weight to sentiment # This could be better updated for modifiers that either positively or negatively emphasize if ((child.dep_ == "amod") or (child.dep_ == "advmod")) and (child.text in opinion_words): sentiment *= 1.5 # check for negation words and flip the sign of sentiment if child.dep_ == "neg": sentiment *= -1 for child in token.children: # if verb, check if there's a direct object if (token.pos_ == "VERB") & (child.dep_ == "dobj"): sent_dict[child.text] = sentiment # check for conjugates (a AND b), then add both to dictionary subchildren = [] conj = 0 for subchild in child.children: if subchild.text == "and": conj = 1 if (conj == 1) and (subchild.text != "and"): subchildren.append(subchild.text) conj = 0 for subchild in subchildren: sent_dict[subchild] = sentiment # check for negation for child in token.head.children: noun = "" if ((child.dep_ == "amod") or (child.dep_ == "advmod")) and (child.text in opinion_words): sentiment *= 1.5 # check for negation words and flip the sign of sentiment if (child.dep_ == "neg"): sentiment *= -1 # check for nouns for child in token.head.children: noun = "" if (child.pos_ == "NOUN") and (child.text not in sent_dict): noun = child.text # Check for compound nouns for subchild in child.children: if subchild.dep_ == "compound": noun = subchild.text + " " + noun sent_dict[noun] = sentiment debug += 1 return sent_dict def fetch_popular_opinions(self, tweets): self.get_pos_neg(tweets) dict_list = [] for t in tweets: x = self.feature_sentiment(t, self.pos, self.neg) dict_list.append(x) self.helper(dict_list) return self.h def priority(self, covid_related): ''' Defines priority for the keywords :return: ''' def helper(self, l): for i in range(len(l)): d = l[i] for key in d.keys(): if key in self.h: self.h[key][0] += d[key] self.h[key][1] += 1 else: self.h[key][0] = d[key] self.h[key][1] = 1 self.h = dict(sorted(self.h.items(), key=lambda x: x[1][1], reverse=True)) def analyze_sentiment(self, tweets): ''' Input : List of tweets Process: Return keyword related dictionary with sentiment Aspect based opinion mining :param tweets: :return: ''' d = {} for tweet, tw_id in tweets: score_dict = self.analyser.polarity_scores(tweet) d[tw_id] = score_dict return d
6,336
dimensigon/domain/locker_memory.py
dimensigon/dimensigon
2
2169198
import threading import threading import typing as t from abc import ABC from typing_extensions import Protocol from dimensigon.domain.exceptions import StateError, ApplicantError, StateAlreadyInPreventingLock, StateAlreadyInLock, \ StateAlreadyInUnlock, StateTransitionError, PriorityError from dimensigon.utils.typos import Priority OE = t.Optional[t.Union[StateError, ApplicantError]] class Comparable(Protocol): """type for objects that support < operator""" def __lt__(self, other) -> bool: ... class State(ABC): def preventing_lock(self, lock: 'Locker') -> OE: return StateAlreadyInPreventingLock() def lock(self, lock: 'Locker') -> OE: return StateAlreadyInLock() def unlock(self, lock: 'Locker') -> OE: return StateAlreadyInUnlock() def __str__(self): return self.__class__.__name__ class UnlockState(State): def lock(self, lock: 'Locker') -> OE: return StateTransitionError('UNLOCK', 'LOCK') def preventing_lock(self, lock: 'Locker') -> OE: lock._state = PreventingLockState(lock.unlock, kwargs={'applicant': lock.applicant}) return class PreventingLockState(State): def __init__(self, func: t.Callable, args: t.Tuple = None, kwargs: t.Mapping[str, t.Any] = None): def safe_unlock(func_, *args_, **kwargs_): try: func_(*args_, **kwargs_) except StateError: pass args = args if args is not None else [] kwargs = kwargs if kwargs is not None else {} self.timer = threading.Timer(interval=Locker.TIMEOUT, function=safe_unlock, args=(func, *args), kwargs=kwargs) self.timer.start() def lock(self, lock: 'Locker') -> OE: self.timer.cancel() lock._state = LockState() def unlock(self, lock: 'Locker') -> OE: self.timer.cancel() lock._state = UnlockState() class LockState(State): def preventing_lock(self, lock: 'Locker') -> OE: return StateTransitionError('LOCK', 'PREVENTING_LOCK') def unlock(self, lock: 'Locker') -> OE: lock._state = UnlockState() return # def uid_to_class(uid): # for name, cls in inspect.getmembers(sys.modules[__name__], lambda c: inspect.isclass(c) and issubclass(c, State)): # if getattr(cls, 'id', None) == uid: # return cls # class Locker: """ class that holds the state of the lock """ TIMEOUT: t.ClassVar = 90 def __init__(self): self._state: State = UnlockState() self._applicant: t.Optional[Comparable] = None # holds the owner identifier who requested the lock self._mutex = threading.Lock() def set_timeout(self, timeout: float): self.__class__.TIMEOUT = timeout return self @staticmethod def _raise_if_error(msg): if isinstance(msg, StateError): raise msg @property def state(self): return self._state @property def applicant(self) -> t.Optional[Comparable]: return self._applicant def preventing_lock(self, applicant: t.Any): with self._mutex: if self.applicant is not applicant and self.applicant is not None: raise ApplicantError() self._applicant = applicant msg = self._state.preventing_lock(self) self._raise_if_error(msg) def lock(self, applicant: t.Any): with self._mutex: if self.applicant is not applicant and self.applicant is not None: raise ApplicantError() msg = self._state.lock(self) self._raise_if_error(msg) def unlock(self, applicant: t.Any): with self._mutex: if self.applicant is not applicant and self.applicant is not None: raise ApplicantError() msg = self._state.unlock(self) self._raise_if_error(msg) self._reset_locker() def _reset_locker(self): self._applicant = None def stop_timer(self): if isinstance(self._state, PreventingLockState): self._state.timer.cancel() def __str__(self): return f'{self._state}' class PriorityLocker: """ Priority Locker class that prevents locking a locker if a more prior locker is trying to be locked """ def __init__(self, priority: Comparable, persistent=False, uid=None): """ Parameters ---------- priority: locker priority. Could be any hashable instance that implements __lt__ method. 1 is higher priority than 2 """ self.priority = priority self._locker = Locker() def set_timeout(self, timeout: int): self._locker.set_timeout(timeout=timeout) @property def applicant(self): return self._locker.applicant @property def state(self): return self._locker.state def preventing_lock(self, lockers: t.Dict[Priority, 'PriorityLocker'], applicant: t.Any): # check if higher lockers are locked or in preventing lock cond = any(map(lambda s: isinstance(s, (PreventingLockState, LockState)), [locker.state for priority, locker in lockers.items() if priority < self.priority])) if not cond: self._locker.preventing_lock(applicant=applicant) else: raise PriorityError() def lock(self, applicant: t.Any): self._locker.lock(applicant=applicant) def unlock(self, applicant: t.Any): self._locker.unlock(applicant=applicant) def stop_timer(self): self._locker.stop_timer()
5,644
research_tools/augment_bin_examples.py
ionutzzu12/external-knowledge-codegen
0
2171423
import pickle import json from components.dataset import Example import nltk from numpy.random import choice from operator import itemgetter # examples_bin_file = "../data/conala/train.gold.full.bin" # examples_json_file = "../data/annotated_train_set.json" examples_bin_file = "../data/conala/dev.bin" examples_json_file = "../data/annotated_dev_set.json" functions_file = "../data/functions_details.json" NUM_MULTICHOICE = 10 def tokenize_intent(intent): lower_intent = intent.lower() tokens = nltk.word_tokenize(lower_intent) return tokens def get_single_token_functions_ids(func_set): ret = [] # total = 0 # opcode = 0 for f in func_set: if '.' not in f['name'] and f['module'] != '2to3' and "opcode" not in f['name']: ret += [f['index']] # print('total', total) # print('opcode', opcode) return ret if __name__ == "__main__": data = pickle.load(open(examples_bin_file, 'rb')) annot_set = json.load(open(examples_json_file)) func_set = json.load(open(functions_file)) sglf_ids = get_single_token_functions_ids(func_set) func_counts = {} for index, example in enumerate(data): assert example.meta['example_dict']['question_id'] == annot_set[index]['question_id'] crt_functions = [{'fid': j, 'fname': i} for i, j in zip(annot_set[index]['functions'], annot_set[index]['functions_ids'])] for fdict in crt_functions: if fdict['fid'] is not None and '.' not in fdict['fname']: fid = fdict['fid'] if fid in func_counts: func_counts[fid] += 1 else: func_counts[fid] = 1 sorted_funcs = [] common_single_f_ids = [] for fid, cnt in sorted(func_counts.items(), key=itemgetter(1), reverse=True): sorted_funcs.append({(fid, func_set[fid]['name'], cnt)}) common_single_f_ids.append(fid) new_data = [] for index, example in enumerate(data): assert example.meta['example_dict']['question_id'] == annot_set[index]['question_id'] new_ex = Example(example.src_sent, example.tgt_actions, example.tgt_code, example.tgt_ast, example.idx, example.meta) crt_functions = [{'fid': j, 'fname': i} for i, j in zip(annot_set[index]['functions'], annot_set[index]['functions_ids'])] new_data.append(new_ex) valid_functions_ids = [] for fdict in crt_functions: # select sample (python internal) functions if fdict['fid'] is not None and '.' not in fdict['fname']: valid_functions_ids.append(fdict['fid']) # f_details = sglf[fdict['fid']] # assert f_details['index'] == fdict['fid'] # fdict['doc'] = tokenize_intent(''.join(f_details['doc'])) # valid_functions.append(fdict) fid = fdict['fid'] if fid in func_counts: func_counts[fid] += 1 else: func_counts[fid] = 1 valid_functions_ids_chosen = choice(common_single_f_ids, NUM_MULTICHOICE - len(valid_functions_ids)) valid_functions_ids += list(valid_functions_ids_chosen) valid_functions = [] for id in valid_functions_ids: f_details = func_set[id] assert f_details['index'] == id fdict = {'fname': f_details['name'], 'fid': id, 'doc': tokenize_intent(''.join(f_details['doc']))} valid_functions.append(fdict) new_ex.functions = valid_functions pickle.dump(new_data, open("../data/conala/added_funcs_dev.bin", 'wb'))
3,649
plots/exp_2_6.py
mkannwischer/xmss-prng-dpa
2
2171102
#!/usr/bin/python # -*- coding: utf-8 -*- ''' * Copyright (c) 2017, <NAME> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. ''' import csv import math import numpy as np from os import listdir from os.path import isfile, join import matplotlib.pyplot as plt def parse_csv(file): ifile = open(file, "rb") reader = csv.reader(ifile, delimiter=';') values = [] for row in reader: values.append(np.array([int(row[0]), row[3]])) ifile.close() return np.array(values) def scatter_txt(x,y): txt = "" for p, d in zip(x, y): txt += "\nn=%d p=%.3f" % (p, d) return txt def plot_scatter(x, y, fname): f = plt.figure(figsize=(11,5)) plt.plot(x,y, "x", ms=18.0) plt.ylabel("success probability") plt.xlabel("number of traces") plt.ylim(ymin=0) plt.ylim(ymax=1.1) plt.tight_layout() plt.figtext(.7, .2, scatter_txt(x, y)) #plt.show() f.savefig(fname) #experiment 2 n = [8,10,16,32,64,128,256,512] d = [] for i in n: data = np.array(parse_csv("../data/exp_2/%d.log" %i)[:, 0], dtype=np.float) d.append(data.sum() / len(data)) plot_scatter(n,d, "dpa_exp_2.pdf") # experiment 6 n = [16, 32, 64, 96, 128, 256, 512, 1024, 2048, 4048, 8096] d = [] for i in n: data = np.array(parse_csv("../data/exp_6/%d.log" %i)[:, 0], dtype=np.float) d.append(data.sum() / len(data)) plot_scatter(n,d, "dpa_exp_6.pdf")
2,687
raspberryio/project/forms.py
cvautounix/raspberryio
113
2171212
from gdata.youtube.service import YouTubeService as yt_service from django import forms from raspberryio.project.models import Project, ProjectStep, ProjectImage from raspberryio.project.utils import get_youtube_video_id PLACEHOLDER_WIDGET_TYPES = ( 'TextInput', 'PasswordInput', 'Textarea', ) class PlaceHolderMixin(object): """ Mixin that sets the placeholder text for form text, password and textarea fields. Use Meta.remove_labels = True to remove labels whose placeholder text is set. Placeholder text defaults to each field's label. To override, set Meta.placeholders to a dictionary of the form: {'fieldname': 'placeholder text', ...} """ def __init__(self, *args, **kwargs): super(PlaceHolderMixin, self).__init__(*args, **kwargs) placeholders = getattr(self.Meta, 'placeholders', {}) for name, field in self.fields.iteritems(): widget_type = field.widget.__class__.__name__ if widget_type in PLACEHOLDER_WIDGET_TYPES: placeholder_text = placeholders.get(name, '') if not placeholder_text: placeholder_text = field.label if field.label else name placeholder_text = placeholder_text.replace('_', ' ') \ .title() field.widget.attrs.update({ 'placeholder': placeholder_text }) if getattr(self.Meta, 'remove_labels', False): field.label = '' class ProjectForm(PlaceHolderMixin, forms.ModelForm): class Meta(object): model = Project placeholders = { 'title': 'The title of your RaspberryPi project', } fields = ( 'title', 'featured_photo', 'featured_video', 'tldr', 'categories', ) class ProjectStepForm(PlaceHolderMixin, forms.ModelForm): images = forms.CharField(required=False, widget=forms.HiddenInput) def clean_images(self): images_str = self.cleaned_data.get('images', '') image_pks = images_str.split(',') if images_str else [] try: image_pks = [int(pk) for pk in image_pks] except ValueError: image_pks = [] self.images = image_pks def clean_video(self): data = self.cleaned_data.get('video', '') if data: video_id = get_youtube_video_id(data) try: yt_service().GetYouTubeVideoEntry(video_id=video_id) except: msg = "The supplied URL is not a valid Youtube video" raise forms.ValidationError(msg) return data def save(self, *args, **kwargs): result = super(ProjectStepForm, self).save(*args, **kwargs) if self.images: self.instance.gallery.add(*self.images) return result class Meta(object): model = ProjectStep placeholders = { 'title': 'The title of this step in the project', } fields = ( 'title', 'content', 'video', 'images' ) class ProjectImageForm(forms.ModelForm): def clean(self): files_data = self.files.get('file', None) if not files_data: raise forms.ValidationError('No file data present') self.data['file'] = files_data def save(self): file_data = self.data['file'] instance = self.instance if file_data: instance.file = file_data instance.save() return instance class Meta(object): model = ProjectImage
3,605
scripts/12_maip.py
ersilia-os/osm-series4-candidates-2
1
2171288
from __init__ import OUTPUT import pandas as pd import numpy as np import os ROOT = os.path.dirname(os.path.abspath(__file__)) print("MAIP") df = pd.read_csv(os.path.join(OUTPUT, "data_11.csv")) print(df.shape) print(df) maip = pd.read_csv(os.path.join(ROOT, "..", "maip", "maip_predictions.csv")) print(maip.shape) print(maip) df["Maip"] = maip["model_score"] cut = np.percentile(df["Maip"], 10) df = df[df["Maip"] > cut] print(df.shape) df.to_csv(os.path.join(OUTPUT, "data_12.csv"), index = False)
510
tests/callbacks/meters/timer_test.py
eivtho/PyLaia
89
2171326
import time import unittest from laia.callbacks.meters.timer import Timer class TimerTest(unittest.TestCase): def test(self): m = Timer() time.sleep(1) t = m.value # Check that the timer has measured ~1 second. self.assertGreaterEqual(t, 1.0) self.assertLess(t, 1.1) if __name__ == "__main__": unittest.main()
371
ionosenterprise/requests/lan.py
ionos-cloud/ionos-enterprise-sdk-python
6
2170707
import ionoscloud from coreadaptor.IonosCoreProxy import IonosCoreProxy class lan: @IonosCoreProxy.process_response def get_lan(self, datacenter_id, lan_id, depth=1): """ Retrieves a single LAN by ID. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param lan_id: The unique ID of the LAN. :type lan_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ return self.get_api_instance(ionoscloud.LanApi)\ .datacenters_lans_find_by_id_with_http_info( datacenter_id, lan_id, depth=depth, response_type='object') @IonosCoreProxy.process_response def list_lans(self, datacenter_id, depth=1): """ Retrieves a list of LANs available in the account. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ return self.get_api_instance(ionoscloud.LanApi)\ .datacenters_lans_get_with_http_info( datacenter_id, depth=depth, response_type='object') @IonosCoreProxy.process_response def delete_lan(self, datacenter_id, lan_id): """ Removes a LAN from the data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param lan_id: The unique ID of the LAN. :type lan_id: ``str`` """ return self.get_api_instance(ionoscloud.LanApi)\ .datacenters_lans_delete_with_http_info(datacenter_id, lan_id) @IonosCoreProxy.process_response def create_lan(self, datacenter_id, lan): """ Creates a LAN in the data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param lan: The LAN object to be created. :type lan: ``dict`` """ lan_properties = lan.__dict__ del lan_properties['nics'] return self.get_api_instance(ionoscloud.LanApi)\ .datacenters_lans_post_with_http_info(datacenter_id, ionoscloud.models.Lan( properties=lan_properties ), response_type='object') @IonosCoreProxy.process_response def update_lan(self, datacenter_id, lan_id, name=None, public=None, ip_failover=None, pcc=None): """ Updates a LAN :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param lan_id: The unique ID of the LAN. :type lan_id: ``str`` :param name: The new name of the LAN. :type name: ``str`` :param public: Indicates if the LAN is public. :type public: ``bool`` :param ip_failover: A list of IP fail-over dicts. :type ip_failover: ``list`` :param pcc: Unique identifier of the private cross connect the given LAN is connected to if any :type pcc: ``str`` """ data = {} if name: data['name'] = name if public is not None: data['public'] = public if ip_failover: data['ipFailover'] = ip_failover if pcc: data['pcc'] = pcc return self.get_api_instance(ionoscloud.LanApi)\ .datacenters_lans_patch_with_http_info(datacenter_id, lan_id, ionoscloud.models.LanProperties(**data), response_type='object') @IonosCoreProxy.process_response def get_lan_members(self, datacenter_id, lan_id, depth=1): """ Retrieves the list of NICs that are part of the LAN. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param lan_id: The unique ID of the LAN. :type lan_id: ``str`` """ return self.get_api_instance(ionoscloud.LanApi)\ .datacenters_lans_nics_get_with_http_info( datacenter_id, lan_id, depth=depth, response_type='object')
4,595
models/ingredient.py
allku/3xamplePythonFlask
0
2171433
# -*- coding: utf-8 -*- from app import db from sqlalchemy import ForeignKey class Ingredient(db.Model): __tablename__ = 'ingredients' id = db.Column(db.Integer, db.Identity(start=1), primary_key=True) name = db.Column(db.String, nullable=False) beer_id = db.Column(db.Integer, db.ForeignKey('beers.id', ondelete='CASCADE')) def __init__(self, name): self.name = name def __repr__(self): return '<id {}>'.format(self.id) def serialize(self): ingredients = { 'id': self.id, 'name': self.name } return ingredients
639
main.py
cristilianojr/JOKENPOH
1
2171256
from tkinter import * import hudmain import animframe import gamesystem import threading import replay class GameWindow(Tk): def __init__(self, master=None): super(GameWindow, self).__init__(master) # Definição tamanho e posição da janela self.width = 800 # Largura self.height = 600 # Altura self.posx = 350 # Posição de x self.posy = 50 # Posição de y #Setter Geometria self.geometry(f'{self.width}x{self.height}+{self.posx}+{self.posy}') #Definição do título da Janela self.title('JohKenPoh') #Configurações self.configure(background='#1a1a1a') # Cor de fundo geral self.resizable(False, False) # Não permite que o tamanho da janela seja alterado #Istanciamento dos compartimentos---------------------------------- """ HUD """ self.hud = hudmain.Hud(self) self.hud.pack(side=TOP, fill=BOTH) """Central Widget""" self.display = animframe.DisplayAnim(None) self.display.pack(side=TOP, fill=BOTH) """Player Buttons""" self.playerbuttons = hudmain.PlayerButtons(self, self.hud) self.playerbuttons.pack(side=BOTTOM, fill=BOTH) self.player_choice = '' self.ia_choice = '' self.v_state = '' self.on_animation = False def construct_replay(self, state2): self.replay = replay.Replay(self, state2) self.replay.pack(fill=BOTH, expand=1) self.update() def new_init(self): self.replay.destroy() """ HUD """ self.hud = hudmain.Hud(self) self.hud.pack(side=TOP, fill=BOTH) """Central Widget""" self.display = animframe.DisplayAnim(None) self.display.pack(side=TOP, fill=BOTH) """Player Buttons""" self.playerbuttons = hudmain.PlayerButtons(self, self.hud) self.playerbuttons.pack(side=BOTTOM, fill=BOTH) self.player_choice = '' self.ia_choice = '' self.v_state = '' self.on_animation = False self.update() def game_win(self): self.hud.destroy() self.display.destroy() self.playerbuttons.destroy() print('win') self.construct_replay('win') def game_over(self): self.hud.destroy() self.display.destroy() self.playerbuttons.destroy() print('lose') self.construct_replay('lose') def start_game(self, player_choice): """Define a escolha da IA""" self.player_choice = player_choice self.ia_choice = gamesystem.ia_chocer() """Faz a verificação de vitoria ou derrota""" verify_state = gamesystem.battle_verification(player_choice, self.ia_choice) self.v_state = verify_state """""" self.on_animation = True """Reservado para a inicialização da animação""" t = threading.Thread(target=self.display.battle_animation, args=()) t.start() """Remoção de pontos dos jogadores""" if verify_state == 'victory': self.hud.enemy.damage() elif verify_state == 'draw': pass elif verify_state == 'defeat': self.hud.player.damage()
3,285
forecast/simple.py
elbaschid/double-click-example
0
2171257
import os import re import click import requests from dateutil.parser import parse from collections import namedtuple from .parameters import LOCATION Config = namedtuple('Config', ['config_file', 'api_key']) session = requests.Session() def validate_api_key(value): if not value: raise click.ClickException('you need to provide an API key') if not re.match(r'[a-z0-9]{32}', str(value)): raise click.ClickException('invalid API key format') return value @click.group() @click.pass_context @click.option( '--config-file', '-c', type=click.Path(), default=os.path.expanduser('~/.forecast.cfg')) @click.option('--api-key', envvar='API_KEY', default='') def main(ctx, config_file, api_key): if os.path.exists(config_file): with open(config_file) as cfg: api_key = cfg.readline() ctx.obj = Config(config_file, api_key) @main.command() @click.pass_obj def config(obj): config_file = obj.config_file api_key = obj.api_key api_key = click.prompt('Please enter your API key', default=api_key) validate_api_key(api_key) with open(config_file, 'w') as cfg: cfg.write(api_key) @main.command('find') @click.pass_obj @click.argument('location', type=LOCATION) def find_city_id(obj, location): api_key = obj.api_key if location.type is 'id': click.echo(f"City ID: {location.value}") return url = 'https://api.openweathermap.org/data/2.5/find' params = { 'APPID': api_key, 'q': location.value, } response = session.get(url, params=params) data = response.json() click.echo(f"City ID: {data['list'][0]['id']}") @main.command() @click.pass_obj @click.argument('location', type=LOCATION) def today(obj, location): api_key = obj.api_key url = 'https://api.openweathermap.org/data/2.5/weather' params = { 'APPID': api_key, location.query: location.value, 'units': 'metric', } response = session.get(url, params=params) data = response.json() name = data.get('name', location.value) description = data['weather'][0]['description'] temp_min = data['main']['temp_min'] temp_max = data['main']['temp_max'] click.secho(f'Weather for {name}: {description.capitalize()}') click.secho(f'Temperature (C): {temp_min} to {temp_max}') @main.command() @click.pass_obj @click.argument('location', type=LOCATION) def forecast(obj, location): api_key = obj.api_key url = 'https://api.openweathermap.org/data/2.5/forecast' params = { 'APPID': api_key, location.query: location.value, 'units': 'metric', } response = session.get(url, params=params) data = response.json() time = 'Time' description = 'Description' temp_min = 'Min Temp' temp_max = 'Max Temp' click.echo('') click.echo(f'{time:^20}{description:^20}{temp_min:>10}{temp_max:>10}') click.echo('=' * 60) for data in data['list']: time = parse(data['dt_txt']).strftime('%a, %b %d @ %Hh') description = data['weather'][0]['description'] temp_min = data['main']['temp_min'] temp_max = data['main']['temp_max'] click.echo( f'{time:<20}{description:^20}{temp_min:>10.1f}{temp_max:>10.1f}' )
3,327
fjord/heartbeat/healthcheck.py
bopopescu/fjord
16
2170890
from collections import namedtuple from datetime import datetime, timedelta import logging from django.conf import settings from django.core.mail import send_mail from django.db import connection from django.template.loader import render_to_string from fjord.heartbeat.models import Answer from fjord.mailinglist.utils import get_recipients log = logging.getLogger('i.heartbeat') MAILINGLIST = 'heartbeat_health' SEVERITY_LOW = 1 SEVERITY_MEDIUM = 5 SEVERITY_HIGH = 10 SEVERITY = { SEVERITY_LOW: 'low', SEVERITY_MEDIUM: 'medium', SEVERITY_HIGH: 'high' } Result = namedtuple('Result', ['name', 'severity', 'summary', 'output']) CHECKS = [] def register_check(cls): CHECKS.append(cls) return cls class Check(object): name = '' @classmethod def check(cls): pass @register_check class CheckAnyAnswers(Check): """Are there any heartbeat answers? If not, that's very bad.""" name = 'Are there any heartbeat answers?' @classmethod def check(cls): day_ago = datetime.now() - timedelta(days=1) count = Answer.objects.filter(received_ts__gt=day_ago).count() if count == 0: return Result( cls.name, SEVERITY_HIGH, '0 answers in last 24 hours.', str(count) ) return Result( cls.name, SEVERITY_LOW, '%s answers in last 24 hours.' % str(count), str(count) ) def tableify(table): """Takes a list of lists and converts it into a formatted table :arg table: list (rows) of lists (columns) :returns: string .. Note:: This is text formatting--not html formatting. """ num_cols = 0 maxes = [] for row in table: num_cols = max(num_cols, len(row)) if len(maxes) < len(row): maxes.extend([0] * (len(row) - len(maxes))) for i, cell in enumerate(row): maxes[i] = max(maxes[i], len(str(cell))) def fix_row(maxes, row): return ' '.join([ str(cell) + (' ' * (maxes[i] - len(str(cell)))) for i, cell in enumerate(row) ]) return '\n'.join( [ fix_row(maxes, row) for row in table ] ) @register_check class CheckMissingVotes(Check): """FIXME: I don't understand this check""" name = 'Are there votes of 0 for large cells?' @classmethod def check(cls): # Note: This SQL statement comes from Gregg. It's probably # mysql-specific. sql = """ SELECT sum(score is not NULL) as nvoted, DATE_FORMAT(received_ts, '%Y-%m-%d') as ydm, version, channel, 100*sum(flow_began_ts > 0) / count(received_ts) as pct_began, 100*sum(flow_offered_ts >0) / count(received_ts) as pct_offered, 100*sum(flow_voted_ts > 0)/ count(received_ts) as pct_voted, 100*sum(flow_engaged_ts > 0) / count(received_ts) as pct_engaged, count(received_ts) as N FROM heartbeat_answer WHERE received_ts > DATE_SUB(now(), interval 1 day) AND is_test=0 AND survey_id="heartbeat-by-user-first-impression" AND (locale='en-us') GROUP BY version, channel, ydm HAVING N >= 50 and nvoted = 0 ORDER BY channel, version, ydm; """ cursor = connection.cursor() cursor.execute(sql) data = list(cursor.fetchall()) severity = SEVERITY_LOW message = 'Data looks ok.' for row in data: votes = row[8] if severity == SEVERITY_LOW and votes >= 50: severity = SEVERITY_MEDIUM message = '{} null votes within the last day.'.format(votes) if severity == SEVERITY_MEDIUM and votes >= 250: severity = SEVERITY_HIGH break # Can't get worse! if data: data.insert(0, [ 'nvoted', 'ydm', 'version', 'channel', 'pct_began', 'pct_offered', 'pct_voted', 'pct_engaged', 'N' ]) data = tableify(data) else: data = repr(data) return Result(cls.name, severity, message, data) def get_all_healthchecks(): return CHECKS def run_healthchecks(): return [checker.check() for checker in get_all_healthchecks()] def email_healthchecks(results): has_high = any([result.severity == SEVERITY_HIGH for result in results]) # The subject should indicate very very obviously whether the sky is # falling or not. subject = '[hb health] %s (%s)' % ( ('RED ALERT' if has_high else 'fine'), datetime.now().strftime('%Y-%m-%d %H:%M') ) # We do the entire email body in HTML because some output will want to # preserve whitespace and use a fixed-width font. Further, this lets # us make it super easy to spot SEVERITY_HIGH situations. html_body = render_to_string('heartbeat/email/heartbeat_health.html', { 'severity_name': SEVERITY, 'results': results }) recipients = get_recipients(MAILINGLIST) if recipients: send_mail( subject=subject, message='This email is in HTML.', from_email=settings.SERVER_EMAIL, recipient_list=recipients, html_message=html_body ) else: # FIXME: log this? is that a good idea? log.info('No recipients for "%s"\n%s\n%s' % ( MAILINGLIST, subject, html_body))
5,676
11 - Extra-- sonos snips voice app/snipssonos/use_cases/play/track.py
RedaMastouri/marvis
1
2170825
from snipssonos.services.node.query_builder import NodeQueryBuilder from snipssonos.shared.use_case import UseCase from snipssonos.shared.response_object import ResponseSuccess, ResponseFailure class PlayTrackUseCase(UseCase): def __init__(self, device_discovery_service, music_search_service, music_playback_service, feedback_service): self.device_discovery_service = device_discovery_service self.music_search_service = music_search_service self.music_playback_service = music_playback_service self.feedback_service = feedback_service def process_request(self, request_object): device = self.device_discovery_service.get() track_name = request_object.track_name if request_object.track_name else None artist_name = request_object.artist_name if request_object.artist_name else None album_name = request_object.album_name if request_object.album_name else None playlist_name = request_object.playlist_name if request_object.playlist_name else None results_tracks = list() # TODO : routing tests. if track_name and album_name and artist_name and playlist_name: # Track - Album - Artist - Playlist results_tracks = self.music_search_service.search_track_for_album_and_for_artist_and_for_playlist( track_name, album_name, artist_name, playlist_name) if track_name and album_name and artist_name: # Track - Album - Artist results_tracks = self.music_search_service.search_track_for_album_and_for_artist(track_name, album_name, artist_name) if track_name and album_name and playlist_name: # Track - Album - Playlist results_tracks = self.music_search_service.search_track_for_album_and_for_playlist(track_name, album_name, playlist_name) if track_name and artist_name and playlist_name: # Track - Artist - Playlist results_tracks = self.music_search_service.search_track_for_artist_and_for_playlist(track_name, artist_name, playlist_name) if track_name and playlist_name: # Track - Playlist results_tracks = self.music_search_service.search_track_for_playlist(track_name, playlist_name) if track_name and artist_name: # Track - Artist results_tracks = self.music_search_service.search_track_for_artist(track_name, artist_name) if track_name and album_name: # Track - Album results_tracks = self.music_search_service.search_track_for_album(track_name, album_name) # Track if track_name and not (artist_name or playlist_name or album_name): results_tracks = self.music_search_service.search_track(request_object.track_name) if len(results_tracks): # TODO : write feedback tests first_result = results_tracks[0] self.music_playback_service.clear_queue(device) self.music_playback_service.play(device, first_result) if first_result.artists is None: tts_feedback = "" else: artist_names = self.feedback_service.concatenate_artists_in_string(first_result.artists) tts_feedback = self.feedback_service.get_track_template() \ .format(first_result.name, artist_names) return ResponseSuccess(feedback=tts_feedback) return ResponseFailure.build_resource_error(self.feedback_service.get_generic_error_message())
3,461
robosuite/experiments/train_parallel.py
sigmundhh/robosuite
0
2171115
from distutils.command.config import config from typing import Callable from unicodedata import name import robosuite as suite from robosuite.wrappers import GymWrapper from robosuite.wrappers import GymWrapperRGBD import stable_baselines3 as sb3 from stable_baselines3.common.vec_env import SubprocVecEnv, VecNormalize, DummyVecEnv, VecVideoRecorder from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.callbacks import BaseCallback, EvalCallback from stable_baselines3.common.utils import set_random_seed from wandb.integration.sb3 import WandbCallback import wandb import numpy as np import gym import os import time import multiprocessing import argparse config = { "env_params": { "env_name" : "Lift", "robots" : "IIWA", "has_renderer" : False, "has_offscreen_renderer" : False, "use_object_obs" : True, "use_camera_obs" : False, "reward_shaping" : True, "controller_configs" : suite.load_controller_config( default_controller="OSC_POSE"), "horizon" : 200, }, "total_timesteps": int(2e6), "timesteps_pr_save": int(1e5), "algorithm" : "SAC", "policy_model" : "MlpPolicy", "num_processes" : 8, "random_seed" : 42 } class TensorboardCallback(BaseCallback): """ Custom callback for plotting additional values in tensorboard. """ def __init__(self, verbose=0): super(TensorboardCallback, self).__init__(verbose) def _on_step(self) -> bool: # Log scalar value (here a random variable) value = np.random.random() self.logger.record('random_value', value) return True def make_env(env_params: dict, rank: int = 0, seed: int = 0) -> Callable: """ Utility function for multiprocessed env. Supports both RGB-D images or flattened observations In the case of "use_camera_obs" = True, a GymWrapperRGBD is used. :param env_id: (str) the environment ID :param num_env: (int) the number of environment you wish to have in subprocesses :param seed: (int) the initial seed for RNG :param rank: (int) index of the subprocess :return: (Callable) """ def _init() -> gym.Env: if env_params["use_camera_obs"]: # These parameters are needed to use RGB-D observations assert env_params["has_offscreen_renderer"] == True and env_params["use_object_obs"] == False env = Monitor(GymWrapperRGBD(suite.make(**env_params), keys=['agentview_image', 'agentview_depth'])) else: env = Monitor(GymWrapper(suite.make(**env_params))) env.seed(seed + rank) return env set_random_seed(seed) return _init def parse_arguments(): parser = argparse.ArgumentParser( description=f'Train a {config["algorithm"]} agent on the Lift environment') parser.add_argument('--cont', type=str, required=False, help='Instance id of the model to continue training') return parser.parse_args() if __name__ == '__main__': run = wandb.init( project="robosuite_lift_dense_object_obs", config=config, sync_tensorboard=True, # auto-upload sb3's tensorboard metrics #monitor_gym=True, # auto-upload the videos of agents playing the game save_code=True, # optional, what does this imply? #monitor_gym=True, #mode="disabled" # for testing ) # Parse arguments args = parse_arguments() reward_type = "dense" if config["env_params"]["reward_shaping"] else "sparse" # The folders for weights and logs and their filenames run_name = f'{config["algorithm"]}-{config["env_params"]["env_name"]}-{reward_type}-' models_dir = f'./models/{run_name}' logdir = f'./logs/{run_name}' video_dir = f'./videos/{run_name}' # Make vectorized Lift environment env = SubprocVecEnv([make_env(config["env_params"], i, config["random_seed"]) for i in range(config["num_processes"])]) #env = VecVideoRecorder(env, video_dir, record_video_trigger=lambda x: x % 2000 == 0, video_length=200) # Evaluation environment eval_env_config = config["env_params"] eval_env_config["reward_shaping"] = False # Sparse rewards for evaluation eval_callback = EvalCallback(make_env(eval_env_config)(), eval_freq=500, deterministic=True) # Check: Do I need to pass the env into this another way? #Check if continue training argument is given if args.cont != None: #Continue previous model instance_id = args.cont #Check that model and logs exist for given instance id if not os.path.exists(models_dir+instance_id) or not os.path.exists(logdir+instance_id): raise ValueError(f"No model or log found for instance id {instance_id}") if config["algorithm"] == "PPO": model = sb3.PPO.load(models_dir+instance_id, env) elif config["algorithm"] == "SAC": model = sb3.SAC.load(models_dir+instance_id, env) else: # We want to make new model instance instance_id = str(int(time.time())) #Check that instance does not already exist (if several computers train in parallel) if os.path.exists(models_dir+instance_id) or os.path.exists(logdir+instance_id): raise ValueError(f"Model or log already exists for instance id {instance_id}") #Create the model and log folders os.makedirs(models_dir+instance_id) os.makedirs(logdir+instance_id) # Initialize policy if config["algorithm"] == "SAC": model = sb3.SAC(config["policy_model"], env, verbose=1, tensorboard_log=logdir+instance_id, seed=config["random_seed"]) elif config["algorithm"] == "PPO": model = sb3.PPO(config["policy_model"], env, verbose=1, tensorboard_log=logdir+instance_id, seed=config["random_seed"]) # Train the model training_iterations = config["total_timesteps"] // config["timesteps_pr_save"] learning_timesteps = config["timesteps_pr_save"] try: for i in range(training_iterations): model.learn(total_timesteps=learning_timesteps, reset_num_timesteps=False, callback=[WandbCallback(), eval_callback]) model.save(f"{models_dir+instance_id}/{learning_timesteps*(i+1)}") env.close() run.finish() print("Run successfully finished. Closed environment") except KeyboardInterrupt: env.close() run.finish() print("Closed environment")
6,617
tests/unit/peapods/test_pods.py
YueLiu-jina/jina
0
2170794
import unittest from jina.main.parser import set_pod_parser, set_gateway_parser from jina.peapods.pod import BasePod, GatewayPod, MutablePod, GatewayFlowPod, FlowPod from tests import JinaTestCase class PodTestCase(JinaTestCase): def test_pod_context(self): def _test_pod_context(runtime): args = set_pod_parser().parse_args(['--runtime', runtime, '--parallel', '2']) with BasePod(args): pass BasePod(args).start().close() for j in ('process', 'thread'): with self.subTest(runtime=j): _test_pod_context(j) def test_gateway_pod(self): def _test_gateway_pod(runtime): args = set_gateway_parser().parse_args(['--runtime', runtime]) with GatewayPod(args): pass GatewayPod(args).start().close() for j in ('process', 'thread'): with self.subTest(runtime=j): _test_gateway_pod(j) def test_gatewayflow_pod(self): def _test_gateway_pod(runtime): with GatewayFlowPod({'runtime': runtime}): pass GatewayFlowPod({'runtime': runtime}).start().close() for j in ('process', 'thread'): with self.subTest(runtime=j): _test_gateway_pod(j) def test_mutable_pod(self): def _test_mutable_pod(runtime): args = set_pod_parser().parse_args(['--runtime', runtime, '--parallel', '2']) with MutablePod(BasePod(args).peas_args): pass MutablePod(BasePod(args).peas_args).start().close() for j in ('process', 'thread'): with self.subTest(runtime=j): _test_mutable_pod(j) def test_flow_pod(self): def _test_flow_pod(runtime): args = {'runtime': runtime, 'parallel': 2} with FlowPod(args): pass FlowPod(args).start().close() for j in ('process', 'thread'): with self.subTest(runtime=j): _test_flow_pod(j) def test_pod_context_autoshutdown(self): def _test_pod_context(runtime): args = set_pod_parser().parse_args(['--runtime', runtime, '--parallel', '2', '--max-idle-time', '5', '--shutdown-idle']) with BasePod(args) as bp: bp.join() BasePod(args).start().close() for j in ('process', 'thread'): with self.subTest(runtime=j): _test_pod_context(j) if __name__ == '__main__': unittest.main()
2,719
cv/__init__.py
iboraham/job-finder
1
2169514
import PyPDF2 import urllib class CV: def __init__(self, url): self.url = url pdfFileObj = urllib.request.urlopen(self.url) pdfReader = PyPDF2.PdfFileReader(pdfFileObj) pageObj = pdfReader.getPage(0) self.text = pageObj.extractText() pdfFileObj.close() def get_cv(self): return self.text
355
2019/12 December/dp12142019.py
vishrutkmr7/DailyPracticeProblemsDIP
5
2170908
# This problem was recently asked by AirBNB: # Given a phone number, return all valid words that can be created using that phone number. lettersMaps = { 1: [], 2: ["a", "b", "c"], 3: ["d", "e", "f"], 4: ["g", "h", "i"], 5: ["j", "k", "l"], 6: ["m", "n", "o"], 7: ["p", "q", "r", "s"], 8: ["t", "u", "v"], 9: ["w", "x", "y", "z"], 0: [], } validWords = ["dog", "fish", "cat", "fog"] def makeWords(phone): # Fill this in ret = [""] for digit in phone: letters = lettersMaps[int(digit)] ret = [prefix + letter for prefix in ret for letter in letters] return [r for r in ret if r in validWords] print(makeWords("364")) # ['dog', 'fog']
713
django/main/react_urls.py
mnieber/shared-goal
0
2171116
"""Main urls.""" from django.conf import settings from django.conf.urls import url from django.contrib import admin from django.views import static from goal.react_views import GoalListView, GoalView, NewGoalView from suggestion.react_views import ( SuggestionList, SuggestionView, EditSuggestionView, UploadSuggestionImageView ) from review.react_views import ReviewView from react_views import HomeView, BundleView urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^api/goals$', GoalListView.as_view()), url(r'^api/new-goal$', NewGoalView.as_view()), url( r'^api/new-suggestion/(?P<goal_slug>[\-\w]+)$', EditSuggestionView.as_view() ), url( r'^api/edit-suggestion/(?P<goal_slug>[\-\w]+)' + '/(?P<suggestion_slug>[\-\w]+)$', EditSuggestionView.as_view() ), url( r'^api/upload-suggestion-image/(?P<goal_slug>[\-\w]+)$', UploadSuggestionImageView.as_view() ), url( r'^api/upload-suggestion-image/(?P<goal_slug>[\-\w]+)' + '/(?P<suggestion_slug>[\-\w]+)$', UploadSuggestionImageView.as_view() ), url(r'^api/goal/(?P<goal_slug>[\-\w]+)$', GoalView.as_view()), url(r'^api/suggestions/(?P<goal_slug>[\-\w]+)$', SuggestionList.as_view()), url( r'^api/review/(?P<goal_slug>[\-\w]+)/' + r'(?P<suggestion_slug>[\-\w]+)$', ReviewView.as_view() ), url( r'^api/suggestion/(?P<goal_slug>[\-\w]+)/' + r'(?P<suggestion_slug>[\-\w]+)$', SuggestionView.as_view() ), url( r'^api/bundle/(?P<goal_slug>[\-\w]+)/' + r'(?P<suggestion_slug>[\-\w]+)$', BundleView.as_view() ), ] if settings.DEBUG: urlpatterns += [ url( r'^media/(?P<path>.*)$', static.serve, { 'document_root': settings.MEDIA_ROOT, 'show_indexes': True } ), ] urlpatterns += [ url(r'', HomeView.as_view(), name='home'), ]
2,014
app/controllers/lotteryGameCTRL.py
DiegoGalante/loteriafacil_python-flask
0
2171327
from app.models.DAO import lotteryDAO as _lotBD from flask import jsonify, json from app.models.tables import Lottery from app.controllers.utilities.enums import TipoJogo as _enumGameType from app.controllers.utilities.utilities import Utils as _utilCTRL from app.controllers.utilities import email as _emailCTRL from app.controllers import configurationCTRL as _configCTRL from bs4 import BeautifulSoup import requests from decimal import Decimal from datetime import datetime, date import time from app.controllers.jsonencoder import GenericJsonEncoder from app.controllers.local_config import Token def RecuperaJogoSistema(tipoJogo, dtInicio, dtFim=None): return _lotBD.RecuperaJogoSistema(tipoJogo, dtInicio, dtFim) def RecuperaUltimoJogo(to_json): return _lotBD.RecuperaUltimoJogo(to_json) def RecuperaJogo(tipoJogo, concurso, to_json): return _lotBD.RecuperaJogo(tipoJogo, concurso, to_json) def VerificaJogo(tipoJogo, concurso): return _lotBD.VerificaJogo(tipoJogo, concurso) def GravarJogo(loteria): return _lotBD.GravarJogo(loteria) def VerificaJogoOnline(num_concurse, pes_id=0): try: t0 = time.time() pes_id = 1 #DIEGO _objConfiguration = _configCTRL.RecuperaConfiguracao(pes_id, False) lottery_atual = RecuperaUltimoJogo(False) try: num_concurse = int(num_concurse) if _lotBD.VerificaJogo(_enumGameType.lotofacil.value, num_concurse): num_concurse = 0 except: num_concurse =0 # print("Número para o token: {0}".format(num_concurse)) if lottery_atual.dtNextConcurse <= datetime.today() and _objConfiguration.check_game_online and _utilCTRL.CheckConnection(): lottery = Lottery(None,None,None,None,None,None,None,None,None,None,None,None,None,None,None,None) page = requests.get(Token().GetToken(num_concurse)) # page = requests.get(_utilCTRL.GetToken()) soup = BeautifulSoup(page.text, 'html.parser') resultadoJson = json.loads(str(soup)) # print(resultadoJson) if int(resultadoJson['concurso']['numero']) > 0 and not _lotBD.VerificaJogo(_enumGameType.lotofacil.value, int(resultadoJson['concurso']['numero'])) and Decimal(resultadoJson['concurso']['premiacao']['acertos_14']['valor_pago'].replace('.','').replace(',','.')) > 0: lottery = Lottery(0, int(resultadoJson['concurso']['numero']), date(int(resultadoJson['concurso']['data'].split('/')[2]), int(resultadoJson['concurso']['data'].split('/')[1]), int(resultadoJson['concurso']['data'].split('/')[0])), _utilCTRL.OrganizaJogo(resultadoJson['concurso']['dezenas'], _enumGameType.lotofacil.value), int(resultadoJson['concurso']['premiacao']['acertos_15']['ganhadores']), int(resultadoJson['concurso']['premiacao']['acertos_14']['ganhadores']), int(resultadoJson['concurso']['premiacao']['acertos_13']['ganhadores']), int(resultadoJson['concurso']['premiacao']['acertos_12']['ganhadores']), int(resultadoJson['concurso']['premiacao']['acertos_11']['ganhadores']), Decimal(resultadoJson['concurso']['premiacao']['acertos_15']['valor_pago'].replace('.','').replace(',','.')), Decimal(resultadoJson['concurso']['premiacao']['acertos_14']['valor_pago'].replace('.','').replace(',','.')), Decimal(resultadoJson['concurso']['premiacao']['acertos_13']['valor_pago'].replace('.','').replace(',','.')), Decimal(resultadoJson['concurso']['premiacao']['acertos_12']['valor_pago'].replace('.','').replace(',','.')), Decimal(resultadoJson['concurso']['premiacao']['acertos_11']['valor_pago'].replace('.','').replace(',','.')), date(int(resultadoJson['proximo_concurso']['data'].split('/')[2]), int(resultadoJson['proximo_concurso']['data'].split('/')[1]), int(resultadoJson['proximo_concurso']['data'].split('/')[0])), # Decimal(resultadoJson['proximo_concurso']['valor_estimado'].replace('.','').replace(',','.')), int(_enumGameType.lotofacil.value) ) # print(lottery.__str__()) if lottery.shared14 > 0: if not _lotBD.VerificaJogo(lottery.tpj_id, lottery.concurse): _lotBD.GravarJogo(lottery) _emailCTRL.EnviaEmail(lottery, False, pes_id=0) return True #Verifica se possui dados pra enviar no email # if _lotBD.VerificaEnvioEmailAutomatico(lottery.concurse, lottery.tpj_id, _objConfiguration): # ProcessaJogos(lottery, None) #verifica se a configuracao está ativada para mandar email automatico # if _objConfiguration.send_email_automatically: # #envia os emails da pessoa else: print("Não há novo jogo para salvar!") except Exception as ex: print("Ocorreu um erro ao fazer a verificação online. Erro: {0}".format(ex.args)) finally: print("Tempo de execução do VerificaJogoOnline: {0}".format(time.time() - t0)) return _lotBD.RecuperaUltimoJogo(True) def VerificaEnvioEmailAutomatico(numConcurso, tpj_id, objConfiguracao): try: if objConfiguracao.send_email_automatically or objConfiguracao.send_email_manually: return _lotBD.ExecuteCheckGame(numConcurso, tpj_id, pes_id=0) else: return False except: return False def CheckGameUpdate(numConcurso, tpj_id, pes_id): try: return _lotBD.ExecuteCheckGame(numConcurso, tpj_id, pes_id) # import threading # thread = threading.Thread(target=_lotBD.ExecuteCheckGame(1679,2,1)) # thread.start() # thread.join(timeout=60) # return True except: return False def ContJogosPessoaSemVerificar(numConcurso, tpj_id, pes_id): return _lotBD.ContJogosPessoaSemVerificar(numConcurso, tpj_id, pes_id=pes_id) def ProcessaJogos(lottery, pessoa): try: if pessoa is not None and pessoa.id > 0: _objConfig = _configCTRL.RecuperaConfiguracao(pessoa.id, False) if _objConfig.send_email_automatically: return _emailCTRL.EnviaEmail(lottery, True, pes_id=pessoa.id) else: return CheckGameUpdate(lottery.concurse, lottery.tpj_id, pessoa.id) else: return CheckGameUpdate(lottery.concurse, lottery.tpj_id, 0) # return _emailCTRL.EnviaEmail(lottery, True) except Exception as ex: print("Ocorreu um erro ao execurar ProcessaJogos. Erro: {0}".format(ex.args)) return False
7,099
scripts/git_utils_test.py
gchatelet/llvm-premerge-checks
0
2169599
# Copyright 2022 Google LLC # # Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://llvm.org/LICENSE.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import git import os import scripts.git_utils as git_utils def assertForkIsSynced(upstreamPath, forkPath): upstream = git.Repo(path=upstreamPath) fork = git.Repo(path=forkPath) forkBranches = {} for b in fork.branches: forkBranches[b.name] = b for b in upstream.branches: assert b.name in forkBranches assert b.commit.hexsha == forkBranches[b.name].commit.hexsha, f'branch {b.name} head' def forkIsSynced(upstreamPath, forkPath) -> bool: upstream = git.Repo(path=upstreamPath) fork = git.Repo(path=forkPath) forkBranches = {} for b in fork.branches: forkBranches[b.name] = b for b in upstream.branches: if b.name not in forkBranches: return False if b.commit.hexsha != forkBranches[b.name].commit.hexsha: return False return True def add_simple_commit(remote, name: str): with open(os.path.join(remote.working_tree_dir, name), 'wt') as f: f.write('first line\n') remote.index.add([os.path.join(remote.working_tree_dir, name)]) remote.index.commit(name) def test_sync_branches(tmp_path): upstreamRemote = os.path.join(tmp_path, 'upstreamBare') forkRemote = os.path.join(tmp_path, 'forkBare') git.Repo.init(path=upstreamRemote, bare=True) git.Repo.init(path=forkRemote, bare=True) upstreamPath = os.path.join(tmp_path, 'upstream') forkPath = os.path.join(tmp_path, 'fork') upstream = git.Repo.clone_from(url=upstreamRemote, to_path=upstreamPath) add_simple_commit(upstream, '1') upstream.git.push('origin', 'main') fork = git.Repo.clone_from(url=forkRemote, to_path=forkPath) fork.create_remote('upstream', url=upstreamRemote) git_utils.syncRemotes(fork, 'upstream', 'origin') fork.remotes.upstream.fetch() fork.create_head('main', fork.remotes.upstream.refs.main) fork.heads.main.checkout() # Sync init commit. git_utils.syncRemotes(fork, 'upstream', 'origin') assertForkIsSynced(upstreamRemote, forkRemote) # Add new change upstream. add_simple_commit(upstream, '2') upstream.git.push('--all') git_utils.syncRemotes(fork, 'upstream', 'origin') assertForkIsSynced(upstreamRemote, forkRemote) # Add new branch. upstream.create_head('branch1') upstream.heads['branch1'].checkout() add_simple_commit(upstream, '3') upstream.git.push('--all') git_utils.syncRemotes(fork, 'upstream', 'origin') assertForkIsSynced(upstreamRemote, forkRemote) # Add another branch commit. add_simple_commit(upstream, '4') upstream.git.push('--all') git_utils.syncRemotes(fork, 'upstream', 'origin') assertForkIsSynced(upstreamRemote, forkRemote) # Discard changes in fork. fork.remotes.origin.pull() fork.heads.main.checkout() add_simple_commit(fork, '5') fork.remotes.origin.push() upstream.remotes.origin.pull('main') upstream.heads.main.checkout() add_simple_commit(upstream, '6') upstream.remotes.origin.push() assert not forkIsSynced(upstreamRemote, forkRemote) assert os.path.isfile(os.path.join(fork.working_tree_dir, '5')) git_utils.syncRemotes(fork, 'upstream', 'origin') assertForkIsSynced(upstreamRemote, forkRemote) fork.git.pull('origin', 'main') fork.heads.main.checkout() assert not os.path.isfile(os.path.join(fork.working_tree_dir, '5')) assert os.path.isfile(os.path.join(fork.working_tree_dir, '6'))
4,007
aurora/ndarray/_base.py
upul/Aurora
111
2170459
# coding: utf-8 # pylint: disable=invalid-name """ ctypes library of dlsys and helper functions """ from __future__ import absolute_import import os import ctypes from pathlib import Path def _load_lib(): """Load libary in build/lib.""" lib_root = Path(__file__).parents[2] lib_path = os.path.join(lib_root, 'cuda/build/lib/') path_to_so_file = os.path.join(lib_path, "libc_runtime_api.so") lib = ctypes.CDLL(path_to_so_file, ctypes.RTLD_GLOBAL) return lib # global library instance try: _LIB = _load_lib() except: # TODO: (upul) Do we need to log the error message? pass ################## # Helper Methods # ################## def check_call(ret): """Check the return value of C API call This function will crash when error occurs. Wrap every API call with this function Parameters ---------- ret : int return value from API calls """ assert (ret == 0) def c_array(ctype, values): """Create ctypes array from a python array Parameters ---------- ctype : ctypes data type data type of the array we want to convert to values : tuple or list data content Returns ------- out : ctypes array Created ctypes array """ return (ctype * len(values))(*values)
1,308
tests/test_preprocessing.py
shubham0204/NMT_With_Attention_Eng-Marathi
1
2169981
from preprocessing import CorpusProcessor , load_corpus_processor import pandas as pd sentences = pd.read_csv('../mar.txt', sep='\t', encoding='utf8', header=None).sample(frac=1.).values eng_sentences = sentences[ : , 0 ] marathi_sentences = sentences[ : , 1 ] eng_processor = CorpusProcessor( eng_sentences , lang='eng' ) marathi_processor = CorpusProcessor( marathi_sentences , lang='mar' ) out = eng_processor.texts_to_sequences( eng_sentences ) print( out ) out = marathi_processor.texts_to_sequences( marathi_sentences ) print( out )
556
funex.py
Rafael-C-Correa/Exercises
0
2171002
# Para o cabeçário padrão def cabecario(titulo): print('-'*(len(titulo)+2)) print(titulo.center(len(titulo)+2)) print('-' * (len(titulo)+2))
154
web/views.py
NejcZupec/tictactoe
1
2171133
import json from django.core.urlresolvers import reverse from django.http import Http404, HttpResponse from django.views.decorators.csrf import csrf_exempt from django.views.generic import TemplateView from django.shortcuts import redirect, render from .models import Game, Player, Move from .utils import create_new_game, generate_unique_anonymous_username, calculate_stats class HomeView(TemplateView): template_name = 'home.html' class GameView(TemplateView): template_name = 'game.html' def get(self, request, game_id, *args, **kwargs): game = Game.objects.get(id=game_id) return render(request, self.template_name, { 'game': Game.objects.get(id=game_id), 'board': [[game.get_field_state(row_idx, column_idx) for column_idx in range(3)] for row_idx in range(3)], 'game_finished': True if game.get_winner_or_draw() else False, 'stats': calculate_stats(game), 'online_game': 'false', 'player': game.get_player(), }) class GameOnlineView(TemplateView): template_name = 'game.html' def get(self, request, game_id, *args, **kwargs): game = Game.objects.get(id=game_id) username = request.GET.get('player') url = '%s?player=%s' % (reverse('online_game', args=[game.id]), game.player2.username) player = 'p1' if username == game.player1.username else 'p2' return render(request, self.template_name, { 'game': Game.objects.get(id=game_id), 'board': [[game.get_field_state(row_idx, column_idx) for column_idx in range(3)] for row_idx in range(3)], 'game_finished': True if game.get_winner_or_draw() else False, 'stats': calculate_stats(game), 'username': username, 'show_online_modal_window': True if username == game.player1.username and game.move_set.count() == 0 else False, 'online_game_opponent_url': request.build_absolute_uri(url), 'online_game': 'true', 'player': player, }) class Leaderboard(TemplateView): template_name = 'leaderboard.html' def get(self, request, *args, **kwargs): return render(request, self.template_name, { 'players': Player.objects.all(), }) def new_game(request, p1_type, p2_type): """ Start a new game. Create a Game object and redirects to it. """ if p1_type == 'anonymous' and p2_type == 'anonymous': game = create_new_game('anonymous', 'anonymous') return redirect(game) if p1_type == 'anonymous' and p2_type == 'ai_random': player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type) player2, _ = Player.objects.get_or_create(username="AI Random", type=p2_type) game = Game.objects.create(player1=player1, player2=player2) return redirect(game) if p1_type == 'anonymous' and p2_type == 'ai_min_max': player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type) player2, _ = Player.objects.get_or_create(username="AI MiniMax", type=p2_type) game = Game.objects.create(player1=player1, player2=player2) return redirect(game) raise Http404 def new_online_game(request, p1_type, p2_type): if p1_type == 'anonymous' and p2_type == 'anonymous': player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type) player2 = Player.objects.create(username=generate_unique_anonymous_username(), type=p2_type) game = Game.objects.create(player1=player1, player2=player2) url = '%s?player=%s' % (reverse('online_game', args=[game.id]), player1.username) return redirect(url) raise Http404 @csrf_exempt def new_move(request, game_id): """ Save a new game's move to database. """ game = Game.objects.get(id=game_id) player = request.POST.get('player') x = request.POST.get('x') y = request.POST.get('y') _, action = game.add_move_and_get_action(player, x, y) return HttpResponse(str(action)) def rematch(request, game_id): old_game = Game.objects.get(id=game_id) game = Game.objects.create( player1=old_game.player2, player2=old_game.player1, ) return redirect(game) @csrf_exempt def ai_next_move(request, game_id): game = Game.objects.get(id=game_id) if game.get_ai_player_type() == 'ai_random': x, y = game.get_next_random_move() else: x, y = game.get_next_minimax_move() return HttpResponse(json.dumps({'x': x, 'y': y}), content_type='application/json') def opponent_move(request, game_id): """ GET parameters: - opponent_player ['p1', 'p2'] """ game = Game.objects.get(id=game_id) opponent_player = request.GET.get('opponent_player') sequence_no = request.GET.get('sequence_no') try: m = Move.objects.filter( game=game, player=game.player1 if opponent_player == 'p1' else game.player2, sequence_no=sequence_no, ).latest('sequence_no') action = game.get_winner_or_draw() return HttpResponse(json.dumps({ 'player': opponent_player, 'x': m.x, 'y': m.y, 'action': action, }), content_type='application/json') except Move.DoesNotExist: return HttpResponse(json.dumps(None), content_type='application/json') def health(request): return HttpResponse( json.dumps({'status': 'ok'}), content_type='application/json', )
5,623
setup.py
YumenoG/pycashaccount
2
2171009
from setuptools import setup, find_packages with open('requirements.txt') as f: requirements = [l for l in f.readlines() if l.strip()] with open('README.md') as f: long_description = f.read() setup( name='pycashaccount', version='0.3.2', author='emergent-reasons', author_email='<EMAIL>', description='helper for creating cash accounts', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/emergent-reasons/pycashaccount', packages=find_packages(), classifiers=[ 'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', ], include_package_data=True, install_requires=requirements, entry_points=''' [console_scripts] cashaccount=cashaccount.cli:run ''', )
882
cli/tests/test_auth.py
n0npax/lime-comb
1
2169159
import os import tempfile from uuid import uuid4 import google import pytest from google.auth.credentials import AnonymousCredentials import lime_comb.auth.google from lime_comb.auth.google import get_anon_cred, get_cred, read_creds, save_creds from lime_comb.config import config from .conftest import * class TestAuth: def test_read_and_save_creds(self, existing_config, valid_cred): save_creds(valid_cred) r_creds = read_creds() assert valid_cred.uuid == r_creds.uuid def test_get_saved_creds(self, valid_cred): save_creds(valid_cred) with get_cred(config.credentials_file) as cred: assert not cred.expired def test_get_saved_expired_creds(self, invalid_cred, web_login): save_creds(invalid_cred) with get_cred(config.credentials_file) as cred: assert not cred.expired def test_get_no_saved_creds(self, credentials_file, web_login): with get_cred(config.credentials_file) as cred: assert not cred.expired def test_get_anon_creds(self): with get_anon_cred() as cred: assert type(cred) == AnonymousCredentials
1,160
test/inner_test.py
sebkeim/inner-class
1
2170529
import unittest from inner import raw_inner, static_inner, class_inner, inner class TestBase(unittest.TestCase): def setUp(self): class Base1: @self.inner_mode class inner1: def method1(self): return "method1" def method5(self): return "will be overided by Outer.inner1" @self.inner_mode class inner2: def method2(self): return "method2" @self.inner_mode class inner3: def method6(self): return "will be overided by Outer.inner1" self.Base1 = Base1 class Base2: @self.inner_mode class inner1: def method1(self): return "will be overided by Base1.inner1" def method3(self): return "method3" def method5(self): return "will be overided by Outer and Base1" @self.inner_mode class inner2: def method4(self): return "method4" self.Base2 = Base2 class Outer(Base1, Base2): @self.inner_mode class inner1: def method5(self): return "method5" @self.inner_mode class inner3(Base1.inner3): # explicit inheritence def method6(self): return "method6" self.Outer = Outer class TestRaw(TestBase): inner_mode = staticmethod(raw_inner) def test_outer_attr_cls(self): # outer attribute is not set Outer = self.Outer Base1 = self.Base1 Base2 = self.Base2 self.assertFalse(hasattr(Outer.inner1, "outer")) def test_outer_attr_obj(self): # outer attribute is not set outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertFalse(hasattr(outer.inner1, "outer")) def test_inheritance_cls(self): # no inheritence except if explicit Outer = self.Outer Base1 = self.Base1 Base2 = self.Base2 self.assertFalse(issubclass(Outer.inner1, Base1.inner1)) self.assertFalse(issubclass(Outer.inner1, Base2.inner1)) self.assertTrue(issubclass(Outer.inner3, Base1.inner3)) self.assertTrue(Outer.inner2 is Base1.inner2) def test_inheritance_obj(self): outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertFalse(issubclass(outer.inner1, base1.inner1)) self.assertFalse(issubclass(outer.inner1, base2.inner1)) self.assertTrue(issubclass(outer.inner3, base1.inner3)) self.assertTrue(outer.inner2 is base1.inner2) # static_inner allow descriptors but class_inner raise an exception def _inner_get(self): class Outer: @self.inner_mode class inner1: def __get__(self, outerobj, outercls): pass def _inner_set(self): class Outer: @self.inner_mode class inner1: def __get__(self, outerobj, value): pass def test_descriptor(self): self._inner_get() self._inner_set() class TestStatic(TestRaw): inner_mode = staticmethod(static_inner) def test_outer_attr_cls(self): # outer attr is set at class definition time Outer = self.Outer Base1 = self.Base1 Base2 = self.Base2 self.assertEqual(Outer.inner1.outer, Outer) self.assertEqual(Base1.inner1.outer, Base1) self.assertEqual(Base2.inner1.outer, Base2) self.assertEqual(Outer.inner2.outer, Base1) self.assertEqual(Outer.inner3.outer, Outer) self.assertEqual(Base1.inner3.outer, Base1) def test_outer_attr_obj(self): # outer attr is set at class definition time outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertEqual(outer.inner1.outer, type(outer)) self.assertEqual(base1.inner1.outer, type(base1)) self.assertEqual(base2.inner1.outer, type(base2)) self.assertEqual(outer.inner2.outer, type(base1)) self.assertEqual(outer.inner3.outer, type(outer)) self.assertEqual(base1.inner3.outer, type(base1)) class TestClass(TestStatic): inner_mode = staticmethod(class_inner) def test_outer_attr_cls(self): # outer attr dynamically map to class Outer = self.Outer Base1 = self.Base1 Base2 = self.Base2 self.assertEqual(Outer.inner1.outer, Outer) self.assertEqual(Base1.inner1.outer, Base1) self.assertEqual(Base2.inner1.outer, Base2) self.assertEqual(Outer.inner2.outer, Outer) self.assertEqual(Outer.inner3.outer, Outer) self.assertEqual(Base1.inner3.outer, Base1) def test_outer_attr_obj(self): # outer attr still map to class outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertEqual(outer.inner1.outer, type(outer)) self.assertEqual(outer.inner2.outer, type(outer)) self.assertEqual(outer.inner3.outer, type(outer)) self.assertEqual(base1.inner1.outer, type(base1)) self.assertEqual(base1.inner3.outer, type(base1)) self.assertEqual(base2.inner1.outer, type(base2)) def test_inheritance_cls(self): Outer = self.Outer Base1 = self.Base1 Base2 = self.Base2 # carried inheritance self.assertTrue(issubclass(Outer.inner1, Base1.inner1)) self.assertTrue(issubclass(Outer.inner1, Base2.inner1)) # inner derivation self.assertFalse(Outer.inner2 is Base1.inner2) self.assertTrue(issubclass(Outer.inner2, Base1.inner2)) self.assertTrue(issubclass(Outer.inner2, Base2.inner2)) def test_inheritance_obj(self): outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertTrue(issubclass(outer.inner1, base1.inner1)) self.assertTrue(issubclass(outer.inner1, base2.inner1)) self.assertFalse(outer.inner2 is base1.inner2) self.assertTrue(issubclass(outer.inner2, base1.inner2)) self.assertTrue(issubclass(outer.inner2, base2.inner2)) def test_descriptor(self): self.assertRaises(ValueError, self._inner_get) self.assertRaises(ValueError, self._inner_set) class TestInner(TestClass): inner_mode = staticmethod(inner) def test_outer_attr_obj(self): # outer attr map to object outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertEqual(outer.inner1().outer, outer) self.assertEqual(outer.inner2().outer, outer) self.assertEqual(outer.inner3().outer, outer) self.assertEqual(base1.inner1().outer, base1) self.assertEqual(base1.inner3().outer, base1) self.assertEqual(base2.inner1().outer, base2) def test_outer_attr_cls_obj(self): # acces to iner object from outer class : outer attr map to class outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertEqual(outer.inner1().outer, outer) def test_inheritance_obj(self): outer = self.Outer() inner1 = outer.inner1() self.assertEqual(inner1.method1(), "method1") self.assertEqual(inner1.method3(), "method3") self.assertEqual(inner1.method5(), "method5") inner2 = outer.inner2() self.assertEqual(inner2.method2(), "method2") self.assertEqual(inner2.method4(), "method4") inner3 = outer.inner3() self.assertEqual(inner3.method6(), "method6") class TestProperty(TestClass): inner_mode = staticmethod(inner.property) def test_outer_attr_obj(self): # outer attr map to object outer = self.Outer() base1 = self.Base1() base2 = self.Base2() self.assertEqual(outer.inner1.outer, outer) self.assertEqual(outer.inner2.outer, outer) self.assertEqual(outer.inner3.outer, outer) self.assertEqual(base1.inner1.outer, base1) self.assertEqual(base1.inner3.outer, base1) self.assertEqual(base2.inner1.outer, base2) def test_inheritance_obj(self): outer = self.Outer() inner1 = outer.inner1 self.assertEqual(inner1.method1(), "method1") self.assertEqual(inner1.method3(), "method3") self.assertEqual(inner1.method5(), "method5") inner2 = outer.inner2 self.assertEqual(inner2.method2(), "method2") self.assertEqual(inner2.method4(), "method4") inner3 = outer.inner3 self.assertEqual(inner3.method6(), "method6") def test_property(self): # a new object is created outer = self.Outer() inner1a = outer.inner1 inner1b = outer.inner1 self.assertFalse(inner1a is inner1b) class TestCachedProperty(TestProperty): inner_mode = staticmethod(inner.cached_property) def test_property(self): # object is cached outer = self.Outer() inner1a = outer.inner1 inner1b = outer.inner1 self.assertTrue(inner1a is inner1b) if __name__ == "__main__": unittest.main()
9,427
mp3download.py
imnobody0396/Nancy-The-Virtual-Assistant
0
2168558
### Author - <NAME> ### import requests, bs4, AudioIO from os import chdir, system from settings import MP3_DIR, nancy_notify from time import sleep # Print iterations progress def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) system('clear') print('\n\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r') # Print New Line on Complete if iteration == total: print() def download_song(link, name): chdir(MP3_DIR) print('Downloading ' + name + '...') AudioIO.speak('Downloading ' + name + '...') res = requests.get(link, stream=True) try: res.raise_for_status() except: AudioIO.speak('Downloading Error') return False song = open(name, 'wb') dl = 0 total_length = int(res.headers.get('content-length')) printProgressBar(dl, total_length, prefix = 'Progress:', suffix = 'Complete', length = 50) for chunk in res.iter_content(100000): song.write(chunk) dl += 100000 sleep(0.1) printProgressBar(dl, total_length, prefix = 'Progress:', suffix = 'Complete', length = 50) song.flush() song.close() nancy_notify("Downloaded\n" + name) #AudioIO.speak('Download finished') return True def download_link(addr): res = requests.get(addr) res.raise_for_status() soup = bs4.BeautifulSoup(res.text, 'lxml') s = soup.select('a') link = [] for i in s: try: if "Download In" in i.get_text('strong'):# and "Quality" in i.get_text('strong'): link.append(i.get('href')) except: pass try: name = link[-1][len(link[-1]) - link[-1][::-1].find('/'):] except IndexError: return False return download_song(link[-1], name) def page_link(name): name += ' mp3mad' res = requests.get('https://google.com/search?q=' + name) res.raise_for_status() soup = bs4.BeautifulSoup(res.text, 'lxml') opt = soup.select('.r a') for link in opt[:3]: try: addr = link.get('href') addr = addr[7:addr.index('&')] print('trying -> ' + addr) if download_link(addr): return 'Download finished' except IndexError: pass else: return "No Link found" #page_link(input())
3,102
backend/core/wsgi.py
albeiks/omaralbeik.com
10
2170770
from os import environ as env from django.core.wsgi import get_wsgi_application env.setdefault("DJANGO_SETTINGS_MODULE", "core.settings") application = get_wsgi_application()
176
src/torch/legacy/nn/SpatialFullConvolution.py
warcraft12321/Hyperfoods
51
2171083
import math import torch from .Module import Module from .utils import clear class SpatialFullConvolution(Module): def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0): super(SpatialFullConvolution, self).__init__() self.nInputPlane = nInputPlane self.nOutputPlane = nOutputPlane self.kW = kW self.kH = kH self.dW = dW self.dH = dH self.padW = padW self.padH = padH if padH is not None else padW self.adjW = adjW self.adjH = adjH if self.adjW > self.dW - 1 or self.adjH > self.dH - 1: raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively') self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW) self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW) self.bias = torch.Tensor(self.nOutputPlane) self.gradBias = torch.Tensor(self.nOutputPlane) self.ones = torch.Tensor() self.finput = None self.fgradInput = None self.zeroScalar = None self._input = None self._gradOutput = None self.reset() def noBias(self): self.bias = None self.gradBias = None return self def reset(self, stdv=None): if stdv is not None: stdv = stdv * math.sqrt(3) else: nInputPlane = self.nInputPlane kH = self.kH kW = self.kW stdv = 1 / math.sqrt(kW * kH * nInputPlane) self.weight.uniform_(-stdv, stdv) if self.bias is not None: self.bias.uniform_(-stdv, stdv) def _makeContiguous(self, input, gradOutput=None): if not input.is_contiguous(): if self._input is None: self._input = input.new() self._input.resize_as_(input).copy_(input) input = self._input if gradOutput is not None: if not gradOutput.is_contiguous(): if self._gradOutput is None: self._gradOutput = gradOutput.new() self._gradOutput.resize_as_(gradOutput).copy_(gradOutput) gradOutput = self._gradOutput return input, gradOutput return input def _calculateAdj(self, targetSize, ker, pad, stride): return (targetSize + 2 * pad - ker) % stride def updateOutput(self, input): inputTensor = input adjW, adjH = self.adjW, self.adjH # The input can be a table where the second element indicates the target # output size, in which case the adj factors are computed automatically if isinstance(input, list): inputTensor = input[0] targetTensor = input[1] tDims = targetTensor.dim() tH = targetTensor.size(tDims - 2) tW = targetTensor.size(tDims - 1) adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW) adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH) if not hasattr(self, 'finput') or self.finput is None: self.finput = input[0].new() if not hasattr(self, 'fgradInput') or self.fgradInput is None: self.fgradInput = input[0].new() else: if not hasattr(self, 'finput') or self.finput is None: self.finput = input.new() if not hasattr(self, 'fgradInput') or self.fgradInput is None: self.fgradInput = input.new() inputTensor = self._makeContiguous(inputTensor) self._backend.SpatialFullConvolution_updateOutput( self._backend.library_state, inputTensor, self.output, self.weight, self.bias, self.finput, self.fgradInput, self.kW, self.kH, self.dW, self.dH, self.padW, self.padH, adjW, adjH ) return self.output def updateGradInput(self, input, gradOutput): if self.gradInput is None: return inputTensor = input adjW, adjH = self.adjW, self.adjH # The input can be a table where the second element indicates the target # output size, in which case the adj factors are computed automatically if isinstance(input, list): inputTensor = input[0] targetTensor = input[1] tDims = targetTensor.dim() tH = targetTensor.size(tDims - 2) tW = targetTensor.size(tDims - 1) adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW) adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH) # Momentarily extract the gradInput tensor if isinstance(self.gradInput, list): self.gradInput = self.gradInput[0] inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput) self._backend.SpatialFullConvolution_updateGradInput( self._backend.library_state, inputTensor, gradOutput, self.gradInput, self.weight, self.finput, self.kW, self.kH, self.dW, self.dH, self.padW, self.padH, adjW, adjH ) if isinstance(input, list): # Create a zero tensor to be expanded and used as gradInput[1]. if self.zeroScalar is None: self.zeroScalar = input[1].new(1).zero_() self.ones.resize_(input[1].dim()).fill_(1) zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1]) self.gradInput = [self.gradInput, zeroTensor] return self.gradInput def accGradParameters(self, input, gradOutput, scale=1): inputTensor = input adjW, adjH = self.adjW, self.adjH # The input can be a table where the second element indicates the target # output size, in which case the adj factors are computed automatically if isinstance(inputTensor, list): inputTensor = input[0] targetTensor = input[1] tDims = targetTensor.dim() tH = targetTensor.size(tDims - 2) tW = targetTensor.size(tDims - 1) adjW = calculateAdj(tW, self.kW, self.padW, self.dW) adjH = calculateAdj(tH, self.kH, self.padH, self.dH) inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput) self._backend.SpatialFullConvolution_accGradParameters( self._backend.library_state, inputTensor, gradOutput, self.gradWeight, self.gradBias, self.finput, self.fgradInput, self.kW, self.kH, self.dW, self.dH, self.padW, self.padH, adjW, adjH, scale ) def type(self, type=None, tensorCache=None): if self.finput is not None: self.finput = torch.Tensor() if self.fgradInput is not None: self.fgradInput = torch.Tensor() return super(SpatialFullConvolution, self).type(type, tensorCache) def __repr__(self): s = super(SpatialFullConvolution, self).__repr__() s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH) if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0: s += ', {}, {}'.format(self.dW, self.dH) if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0): s += ', {}, {}'.format(self.padW, self.padH) if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0): s += ', {}, {}'.format(self.adjW, self.adjH) s += ')' if self.bias is None: s += ' without bias' return s def clearState(self): clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') return super(SpatialFullConvolution, self).clearState()
8,026
docs/examples/function-pointer/example.py
RoyVorster/pygccxml
80
2170992
# Copyright 2014-2017 Insight Software Consortium. # Copyright 2004-2009 <NAME>. # Distributed under the Boost Software License, Version 1.0. # See http://www.boost.org/LICENSE_1_0.txt from pygccxml import utils from pygccxml import declarations from pygccxml import parser import os import sys import warnings warnings.simplefilter("error", Warning) # Find out the file location within the sources tree this_module_dir_path = os.path.abspath( os.path.dirname(sys.modules[__name__].__file__)) # Find out the c++ parser generator_path, generator_name = utils.find_xml_generator() # Configure the xml generator xml_generator_config = parser.xml_generator_configuration_t( xml_generator_path=generator_path, xml_generator=generator_name) # The c++ file we want to parse filename = "example.hpp" filename = this_module_dir_path + "/" + filename decls = parser.parse([filename], xml_generator_config) global_namespace = declarations.get_global_namespace(decls) function_ptr = global_namespace.variables()[0] # Print the name of the function pointer print(function_ptr.name) # > myFuncPointer # Print the type of the declaration print(function_ptr.decl_type) # > void (*)( int,double ) # Print the real type of the declaration (it's just a pointer) print(type(function_ptr.decl_type)) # > <class 'pygccxml.declarations.cpptypes.pointer_t'> # Check if this is a function pointer print(declarations.is_calldef_pointer(function_ptr.decl_type)) # > True # Remove the pointer part, to access the function's type f_type = declarations.remove_pointer(function_ptr.decl_type) # Print the type print(type(f_type)) # > <class 'pygccxml.declarations.cpptypes.free_function_type_t'> # Print the return type and the arguments of the function print(f_type.return_type) # > void # Print the return type and the arguments print(str(f_type.arguments_types[0]), str(f_type.arguments_types[1])) # > int, double
1,913
work_strategies/work_text_strategy.py
jmskinner/pii_firewall
0
2171319
from presidio_analyzer import AnalyzerEngine from presidio_anonymizer import AnonymizerEngine from work_strategies.work_base_strategy import WorkerBaseStrategy from threading import Lock,Thread, Semaphore from urllib.request import urlopen class WorkerTextStrategy(WorkerBaseStrategy): def __init__(self,domain, task_type): super().__init__(domain, task_type) self.analyzer = AnalyzerEngine() self.anonymizer = AnonymizerEngine() self.text_generator = None self.thread_semaphore = Semaphore(2) self.my_lock = Lock() def _fetch(self, task): return task def _process(self, task): redacted_lines = {} results = [] txt_lines_ordered = [] local_threads = [] if task.in_is_local: with open(task.in_endpoint) as fh: for pos, chunk in enumerate(self._read_in_chunks(fh)): self.thread_semaphore.acquire() thread = Thread(target=self._redact_a_chunk, args=(chunk,pos,redacted_lines,task.in_endpoint,results)) local_threads.append(thread) thread.start() else: fh = urlopen(task.in_endpoint) for pos, chunk in enumerate(self._read_in_chunks(fh)): self.thread_semaphore.acquire() thread = Thread(target=self._redact_a_chunk, args=(chunk,pos,redacted_lines,task.in_endpoint,results)) local_threads.append(thread) thread.start() for thread in local_threads: thread.join() for num, page in sorted(redacted_lines.items()): txt_lines_ordered.append(page) task.data = txt_lines_ordered task.profile['txt_NER'] = results return task def _push(self, worker, task): print(f"Worker {worker.id} pushed task at {task.in_endpoint}") worker.write_queue.put(task) def _redact_a_chunk(self,chunk,key,output,in_endpoint,results_list): self.my_lock.acquire() try: new_chunk = ''.join(str(e) for e in chunk) results = self.analyzer.analyze(text=new_chunk, language='en') results_list.extend(results) output[key] = self.anonymizer.anonymize(text=new_chunk,analyzer_results=results) except Exception: print(f"Incompatible text type occured on chunk {key+1} in the doc located at {in_endpoint}... ignoring this page") finally: self.my_lock.release() self.thread_semaphore.release() def _read_in_chunks(self,file_handler, block_size=1000): block = [] for line in file_handler: block.append(line) if len(block) == block_size: yield block block = [] if block: yield block
2,881
compile_yaml.py
queueball/compile_yaml
0
2171382
#!/usr/local/bin/python3 import click import yaml def _tag(value, *tags): if len(tags): return f"<{tags[-1]}>" + _tag(value, *(tags[:-1])) + f"</{tags[-1]}>" return value def _split_name(value, padding_size=3): has_strike = "strike" in value padding = ["" for a in range(padding_size)] out = "" for i, v in enumerate((value["name"].split(" - ") + padding)[:padding_size]): if i in [1, 2]: v = _tag(v, "kbd") if has_strike: out += _tag(v, "strike", "td") else: out += _tag(v, "td") return out def _dict(values): return _tag("".join(_tag(f"{k}: {values[k]}", "li") for k in values), "ul") def _link(value): return f'<a target="_blank" rel="noopener noreferrer" href="{value}">{value}</a>' def _helper(value): if isinstance(value, dict): if len(value) > 1 and set(value.keys()) != {"name", "strike"}: out = "" for k in value: if k in ["name", "strike"]: continue if "http" in str(value[k]): out += _tag(_link(value[k]), "li") elif isinstance(value[k], dict): out += _tag(f"{k}:", "li") + _dict(value[k]) else: out += _tag(f"{k}: {value[k]}", "li") return _tag(_split_name(value) + _tag(out, "ul", "details", "td"), "tr") else: return _tag(_split_name(value, 4), "tr") return _tag(value, "td", "tr") @click.command() @click.argument("src") def main(src): with open(src) as f: # https://www.tutorialspoint.com/yaml/index.htm data = yaml.load(f, yaml.FullLoader) for key in data: print(_tag(key, "h2")) print("<table>") for value in data.get(key): print(_helper(value)) print("</table>") print( """<style> strike { color: #999; } table { border-collapse: collapse; border-spacing: 0; } tbody tr:nth-child(odd) { background: #eee; } td { padding: 3px 10px 3px 15px; } ul { margin: 1px; } </style> """ ) if __name__ == "__main__": main()
2,176
app.py
tinfins/67HSshopBot
0
2170556
""" This module loads the token, cogs, and runs the bot app """ import logging.config import os from os import listdir from os.path import isfile, join, dirname import traceback import discord from discord.ext import commands from discord_slash import SlashCommand from dotenv import load_dotenv, find_dotenv # Load Discord secret token from .env file load_dotenv(find_dotenv()) TOKEN = os.getenv('DISCORD_TOKEN') # Set up logging for application to write to logging.config.fileConfig(fname='logs/config.ini', disable_existing_loggers=False) logger = logging.getLogger(__name__) # Cog directory. 'meme.py' in cogs directory is be cogs.meme cogs_dir = "cogs" bot = commands.Bot(command_prefix="!", description='The 67HS shop bot using slash commands', self_bot=True, intents=discord.Intents.default()) slash = SlashCommand(bot, sync_commands=True) # Load the extensions(cogs) that are located in the cogs directory. Any file in here attempts to load. if __name__ == '__main__': for extension in [f.replace('.py', '') for f in listdir(cogs_dir) if isfile(join(cogs_dir, f))]: try: bot.load_extension(cogs_dir + "." + extension) print(f'Loaded {extension} successfully') logger.info('Loaded %s successfully', extension) except (discord.ClientException, ModuleNotFoundError): print(f'Failed to load extension: {extension}') print(discord.ClientException) print(ModuleNotFoundError) logger.error('Failed to load extension: %s', extension) traceback.print_exc() @bot.event async def on_ready(): await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="/help")) print('-' * 15) print('Successfully logged in and booted...!') print(f'Logged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}') print('-' * 15) logger.info('Successfully logged in and booted...!') logger.info('Logged in as: %s - %s\nVersion: %s', bot.user.name, bot.user.id, discord.__version__) bot.run(TOKEN, bot=True, reconnect=True)
2,124
utils/dem_processing/srtm_tiles/process_srtm_tile.py
globalmangrovewatch/gmw_monitoring_demo
2
2171418
from pbprocesstools.pbpt_process import PBPTProcessTool import os import logging import subprocess import rsgislib import rsgislib.imageutils import rsgislib.imagecalc logger = logging.getLogger(__name__) class ProcessSRTMTile(PBPTProcessTool): def __init__(self): super().__init__(cmd_name='process_srtm_tile.py', descript=None) def do_processing(self, **kwargs): rsgis_utils = rsgislib.RSGISPyUtils() srtm_file = self.params['srtm_file'] tmp_dir = self.params['tmp_dir'] basename = self.params['basename'] out_min1_img = self.params['out_min1_img'] out_min0_img = self.params['out_min0_img'] srtm_kea_file = os.path.join(tmp_dir, "{}.kea".format(basename)) rsgislib.imageutils.gdal_translate(srtm_file, srtm_kea_file, 'KEA') rsgislib.imagecalc.imageMath(srtm_kea_file, out_min0_img, 'b1<0?0:b1', 'KEA', rsgislib.TYPE_32INT, False, False) rsgislib.imageutils.popImageStats(out_min0_img, usenodataval=True, nodataval=0, calcpyramids=True) rsgislib.imagecalc.imageMath(srtm_kea_file, out_min1_img, 'b1<1?1:b1', 'KEA', rsgislib.TYPE_32INT, False, False) rsgislib.imageutils.popImageStats(out_min1_img, usenodataval=True, nodataval=1, calcpyramids=True) def required_fields(self, **kwargs): return ["srtm_file", "tmp_dir", "out_min1_img", "out_min0_img", "basename"] def outputs_present(self, **kwargs): files_dict = dict() files_dict[self.params['out_min1_img']] = 'gdal_image' files_dict[self.params['out_min0_img']] = 'gdal_image' return self.check_files(files_dict) if __name__ == "__main__": ProcessSRTMTile().std_run()
1,763
tests/beta_tests/test_rammstein_needs_your_help.py
the-zebulan/CodeWars
40
2169757
import unittest from katas.beta.rammstein_needs_your_help import feuer_frei class FeuerFreiTestCase(unittest.TestCase): def test_equal_1(self): self.assertEqual(feuer_frei(5, 20), 'Perfekt!') def test_equal_2(self): self.assertEqual(feuer_frei(5, 200), 900) def test_equal_3(self): self.assertEqual(feuer_frei(5, 2), '90 Stunden mehr Benzin ben\xf6tigt.')
422
scripts/benchmark-demo.py
bworrell/buckshot
0
2171035
#!/usr/bin/env python from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import sys import timeit import random import logging import functools import fractions import buckshot def harmonic_sum(x): hsum = 0 for x in xrange(1, x + 1): hsum += fractions.Fraction(1, x) return hsum @buckshot.distribute(ordered=False) def unordered_distributed_harmonic_sum(x): return harmonic_sum(x) @buckshot.distribute(ordered=True) def ordered_distributed_harmonic_sum(x): return harmonic_sum(x) def run_serial(values): return [harmonic_sum(x) for x in values] def main(): values = range(10) random.shuffle(values) print("Verifying results are the same across functions...", end=" ") r1 = run_serial(values) r2 = list(unordered_distributed_harmonic_sum(values)) r3 = list(ordered_distributed_harmonic_sum(values)) assert r1 == r3 assert sorted(r1) == sorted(r2) == sorted(r3) print("All good!") print("Benchmarking...") values = range(1000, 2000, 50) benchmark = functools.partial(timeit.repeat, number=1, repeat=3) print("serial: ", benchmark(lambda: run_serial(values))) print("@distribute(ordered=False): ", benchmark(lambda: list(ordered_distributed_harmonic_sum(values)))) print("@distribute(ordered=True): ", benchmark(lambda: list(unordered_distributed_harmonic_sum(values)))) if __name__ == "__main__": if "-d" in sys.argv: logging.basicConfig(level=logging.DEBUG) main()
1,579
examples/service_b.py
kevinqqnj/sanic-zipkin
7
2171429
from sanic import Sanic, response from sanic_zipkin import SanicZipkin, logger app = Sanic(__name__) sz = SanicZipkin(app, service='backend-b') @sz.route("/api/get", methods=['GET']) async def handler_get(request, context): message = 'get method' return response.json({"hello": "handler_get"}) @sz.route("/api/post", methods=['POST']) async def handler_post(request, context): message = 'post method' return response.json({"hello": "handler_post"}) @app.route("/") async def index(request): return response.json({"hello": 'service-b'}) if __name__ == '__main__': app.run(host="0.0.0.0", port=8001, debug=True)
667
HtmlExport.py
revolunet/sublimetext-html-export
2
2168422
import sublime, sublime_plugin import webbrowser import tempfile import os import json settings = sublime.load_settings('HtmlExport.sublime-settings') LANGUAGES = { 'c': 'clike', 'cc': 'clike', 'cpp': 'clike', 'cs': 'clike', 'coffee': 'coffeescript', 'css': 'css', 'diff': 'diff', 'go': 'go', 'html': 'htmlmixed', 'htm': 'htmlmixed', 'js': 'javascript', 'json': 'javascript', 'less': 'less', 'lua': 'lua', 'md': 'markdown', 'markdown': 'markdown', 'pl': 'perl', 'php': 'php', 'py': 'python', 'pl': 'perl', 'rb': 'ruby', 'xml': 'xml', 'xsl': 'xml', 'xslt': 'xml' } DEPENDENCIES = { 'php': ['xml', 'javascript', 'css', 'clike'], 'markdown': ['xml'], 'htmlmixed': ['xml', 'javascript', 'css'] } class HtmlExportCommand(sublime_plugin.TextCommand): """ Export file contents to a single HTML file""" def run(self, edit): region = sublime.Region(0, self.view.size()) encoding = self.view.encoding() if encoding == 'Undefined': encoding = 'UTF-8' elif encoding == 'Western (Windows 1252)': encoding = 'windows-1252' contents = self.view.substr(region) contents = contents.replace('<', '&lt;').replace('>', '&gt;') tmp_html = tempfile.NamedTemporaryFile(delete=False, suffix='.html') tmp_html.write('<meta charset="%s">' % self.view.encoding()) # package manager path plugin_dir = os.path.join(sublime.packages_path(), 'HTML Export') if not os.path.isdir(plugin_dir): # git dir plugin_dir = os.path.join(sublime.packages_path(), 'sublimetext-html-export') if not os.path.isdir(plugin_dir): raise Exception("ERROR: cant find codemirror dir !") filename = self.view.file_name() language = None if filename: fileext = os.path.splitext(filename)[1][1:] language = LANGUAGES.get(fileext.lower()) else: filename = 'unamed file' js = open(os.path.join(plugin_dir, 'codemirror', 'lib', 'codemirror.js'), 'r').read() if language: for dependency in DEPENDENCIES.get(language, []): js += open(os.path.join(plugin_dir, 'codemirror', 'mode', dependency, '%s.js' % dependency), 'r').read() js += open(os.path.join(plugin_dir, 'codemirror', 'mode', language, '%s.js' % language), 'r').read() css = open(os.path.join(plugin_dir, 'codemirror', 'lib', 'codemirror.css'), 'r').read() editorConfig = { 'mode': language, 'lineNumbers': True } user_editorConfig = settings.get('editorConfig') if user_editorConfig and isinstance(user_editorConfig, dict): editorConfig.update(user_editorConfig) theme = editorConfig.get('theme') if theme: theme_css = os.path.join(plugin_dir, 'codemirror', 'theme', '%s.css' % theme) if os.path.isfile(theme_css): css += open(theme_css, 'r').read() datas = { 'title': os.path.basename(filename), 'css': css, 'js': js, 'code': contents, 'editorConfig': json.dumps(editorConfig) } html = u""" <!doctype html> <html> <head> <title>%(title)s</title> <script>%(js)s</script> <style>%(css)s</style> <style>.CodeMirror-scroll {height: auto; overflow: visible;}</style> </head> <body> <h3>%(title)s</h3> <textarea id="code" name="code">%(code)s</textarea> <script> var editor = CodeMirror.fromTextArea(document.getElementById("code"), %(editorConfig)s); </script> </body> </html> """ % datas tmp_html.write(html.encode(encoding)) tmp_html.close() webbrowser.open_new_tab(tmp_html.name)
4,083
manage.py
ProfesseurGibaud/TestSite
304
2170971
#!/usr/bin/env python import os import sys # This is not part of the regular manage.py files, but ensure students # don't get blocked because they use the wrong Python version if sys.version_info < (3, 6): sys.exit("'Django, an app at a time' requires Python 3.6 or greater") # This is a hack to allow "ignore_this_directory" to be added to the PYTHON PATH # and is not part of the usual manage.py file from project import settings sys.path.append(str(settings.BASE_DIR / 'ignore_this_directory')) if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
711
skpar/dftbutils/taskdict.py
by-student-2017/skpar-0.2.4_Ubuntu18.04LTS
9
2170288
""" Provide mapping between task names available to user and actual functions. """ from skpar.core.utils import get_logger from skpar.dftbutils.queryDFTB import get_dftbp_data, get_bandstructure from skpar.dftbutils.queryDFTB import get_dftbp_evol from skpar.dftbutils.queryDFTB import get_effmasses, get_special_Ek from skpar.dftbutils.plot import magic_plot_bs LOGGER = get_logger(__name__) TASKDICT = { # obtain data from model evaluations 'get_data': get_dftbp_data, 'get_evol': get_dftbp_evol, 'get_bs' : get_bandstructure, 'get_meff': get_effmasses, 'get_Ek' : get_special_Ek, # plot data # this one is currently used via the wrapper of PlotTask in ../core/taskdict.py 'plot_bs' : magic_plot_bs, }
744
HLTrigger/Configuration/python/HLT_75e33/sequences/HLTFastJetForEgamma_cfi.py
PKUfudawei/cmssw
1
2170865
import FWCore.ParameterSet.Config as cms from ..tasks.HLTFastJetForEgammaTask_cfi import * HLTFastJetForEgamma = cms.Sequence( HLTFastJetForEgammaTask )
159
scripts/practice/FB/LongestArithmeticSubSequqenceWithDifference.py
bhimeshchauhan/competitive_programming
0
2168974
""" Longest Arithmetic Subsequence Given an array nums of integers, return the length of the longest arithmetic subsequence in nums. Recall that a subsequence of an array nums is a list nums[i1], nums[i2], ..., nums[ik] with 0 <= i1 < i2 < ... < ik <= nums.length - 1, and that a sequence seq is arithmetic if seq[i+1] - seq[i] are all the same value (for 0 <= i < seq.length - 1). Example 1: Input: nums = [3,6,9,12] Output: 4 Explanation: The whole array is an arithmetic sequence with steps of length = 3. Example 2: Input: nums = [9,4,7,2,10] Output: 3 Explanation: The longest arithmetic subsequence is [4,7,10]. Example 3: Input: nums = [20,1,15,3,10,5,8] Output: 4 Explanation: The longest arithmetic subsequence is [20,15,10,5]. Constraints: 2 <= nums.length <= 1000 0 <= nums[i] <= 500 """ from typing import List from collections import defaultdict class Solution: """ 6 June 2020. DP - Bottom up. Look at the solution as to how it was done. T: O(N^2). The use of the double for loops. S: O(N^2). The lengths of the dictionary in dp follows this order: 0, 1, 2, 3,...n. That's N^2. **The literal running time varies with LC. This same solution ran in 2.1ms and 1.1ms.** **The literal space time is consistent at like 150MB which is insanely high but beats 80%. ** """ def longestArithSeqLength(self, nums: List[int]) -> int: # Minimum answer is always 2. if len(nums) < 2: return len(A) # The DP is a list of dictionaries. # dp[i] is the dictionary for item i in nums # Each kv pair in dp[i] is delta:lengthOfSubsequence. n = len(nums) dp = [{} for i in range(n)] result = 2 for i in range(1, n): for j in range(i): delta = nums[i] - nums[j] # If we've seen this delta with dp[j], then increase the length of the subseq by 1. # This is equivalent of dp[i] 'adding on' to the subsequence. if delta in dp[j]: currentLength = dp[j].get(delta) dp[i][delta] = currentLength + 1 # Else, start a new subsequence with just dp[i] and dp[j]. # Length is always two. else: dp[i][delta] = 2 # Update max. result = max(result, dp[i][delta]) return result class Solution: def longestArithSeqLength(self, nums: List[int]): """ - have a `sequence_cache` hashmap for each element in the array with the keys and values: `{sequence_difference: count/length}` - iterate in reverse order - for each `element_1`: - iterate through all the elements to its right, and for each `element_2`: - get the `sequence difference`: (`element_1-element_2`) - check if staring a sequence with that sequence difference will be greater than what we have seen b4 for the same sequence difference - update the longest var to reflect the longest we have seen so far """ longest = 0 seq_cache = [defaultdict(lambda: 1) for num in nums] for idx_1 in reversed(range(len(nums))): for idx_2 in range(idx_1+1, len(nums)): seq_diff = nums[idx_2] - nums[idx_1] # current_seq_len = max(current_seq_len, seq_starting_at_idx_2_len+1) seq_cache[idx_1][seq_diff] = max( seq_cache[idx_1][seq_diff], seq_cache[idx_2][seq_diff]+1) longest = max(longest, seq_cache[idx_1][seq_diff]) return longest
3,683
src/training/sampling/negative_sampling.py
CorentinBrtx/car-detection-opencv
0
2171409
from typing import List, Tuple import numpy as np def generate_negative_samples( img: np.ndarray, size: Tuple[int, int] = (64, 64), n_samples: int = 10, bounding_boxes: List[List[int]] = None, max_tries: int = 500, ) -> List[np.ndarray]: """ Generate negative samples from a given image by sampling random patches while avoiding the specified bounding boxes. Parameters ---------- img : np.ndarray The image to generate negative samples from. size : Tuple[int, int], optional Size of the samples, by default (64, 64) n_samples : int, optional Number of samples to generate, by default 10 bounding_boxes : List[List[int]], optional Bounding boxes to avoid, by default None max_tries : int, optional Maximum tries to generate samples (avoid infinite loop if bounding_boxes take too much space), by default 500 Returns ------- negative_samples : List[np.ndarray] List of generated negative samples. """ if bounding_boxes is None: bounding_boxes = [] mask = np.zeros(img.shape[:2], np.uint8) mask_with_margin = np.zeros(img.shape[:2], np.uint8) for box in bounding_boxes: x, y, w, h = box mask[y : y + h, x : x + w] = 1 mask_with_margin[max(0, y - size[0]) : y + h, max(0, x - size[1]) : x + w] = 1 mask_with_margin[:, -size[1] :] = 1 mask_with_margin[-size[0] :, :] = 1 negative_samples = [] tries = 0 indices = np.transpose(np.nonzero(mask_with_margin == 0)) while len(negative_samples) < n_samples and tries < max_tries: tries += 1 top_left = indices[np.random.randint(0, len(indices))] if ( mask[top_left[0] : top_left[0] + size[0], top_left[1] : top_left[1] + size[1]].sum() == 0 ): negative_samples.append( img[top_left[0] : top_left[0] + size[0], top_left[1] : top_left[1] + size[1]] ) return negative_samples
2,029
mc2pbrt/pyanvil/world.py
PbrtCraft/mc2pbrt
3
2171020
import os import math import zlib import pyanvil.nbt as nbt import pyanvil.stream as stream class BlockState: def __init__(self, name, props): self.name = name self.props = props def __str__(self): return 'BlockState(' + self.name + ',' + str(self.props) + ')' class Block: AIR = None def __init__(self, state): self.state = state def __str__(self): return 'Block(' + str(self.state) + ')' def get_state(self): return self.state Block.AIR = Block(BlockState('minecraft:air', {})) class ChunkSection: def __init__(self, blocks, raw_section, y_index): self.blocks = blocks self.raw_section = raw_section self.y_index = y_index def get_block(self, block_pos): x = block_pos[0] y = block_pos[1] z = block_pos[2] return self.blocks[x + z * 16 + y * 16 ** 2] class Chunk: def __init__(self, xpos, zpos, raw_nbt): self.xpos = xpos self.zpos = zpos self._build(raw_nbt) def _build(self, raw_nbt): sections = {} level_node = raw_nbt.get('Level') for section in level_node.get('Sections').children: if section.has('BlockStates'): flatstates = [c.get() for c in section.get('BlockStates').children] pack_size = int((len(flatstates) * 64) / (16**3)) states = [ self._read_width_from_loc(flatstates, pack_size, i) for i in range(16**3) ] palette = [ BlockState( state.get('Name').get(), state.get('Properties').to_dict() if state.has( 'Properties') else {} ) for state in section.get('Palette').children ] blocks = [ Block(palette[state]) for state in states ] else: blocks = [Block.AIR]*(16**3) sections[section.get('Y').get()] = ChunkSection( blocks, section, section.get('Y').get()) self.sections = sections self.biome_table = [b.get() for b in level_node.get('Biomes').children] def _read_width_from_loc(self, long_list, width, possition): offset = possition * width # if this is split across two nums if (offset % 64) + width > 64: # Find the lengths on each side of the split side1len = 64 - ((offset) % 64) side2len = ((offset + width) % 64) # Select the sections we want from each side1 = self._read_bits( long_list[int(offset/64)], side1len, offset % 64) side2 = self._read_bits( long_list[int((offset + width)/64)], side2len, 0) # Join them comp = (side2 << side1len) + side1 return comp else: comp = self._read_bits( long_list[int(offset/64)], width, offset % 64) return comp def _read_bits(self, num, width, start): # create a mask of size 'width' of 1 bits mask = (2 ** width) - 1 # shift it out to where we need for the mask mask = mask << start # select the bits we need comp = num & mask # move them back to where they should be comp = comp >> start return comp def get_block(self, block_pos): return self.get_section(block_pos[1]).get_block([n % 16 for n in block_pos]) def get_biome(self, block_pos): z = block_pos[2] % 16 x = block_pos[0] % 16 return self.biome_table[z*16 + x] def get_section(self, y): key = int(y/16) if key not in self.sections: self.sections[key] = ChunkSection( [Block.AIR]*4096, nbt.CompoundTag('None'), key ) return self.sections[key] def __str__(self): return "Chunk(" + str(self.xpos) + "," + str(self.zpos) + ")" class World: def __init__(self, file_name, save_location=''): self.file_name = file_name self.save_location = save_location self.chunks = {} def get_block(self, block_pos): chunk_pos = self._get_chunk(block_pos) chunk = self.get_chunk(chunk_pos) return chunk.get_block(block_pos) def get_biome(self, block_pos): chunk_pos = self._get_chunk(block_pos) chunk = self.get_chunk(chunk_pos) return chunk.get_biome(block_pos) def get_chunk(self, chunk_pos): if chunk_pos not in self.chunks: self._load_chunk(chunk_pos) return self.chunks[chunk_pos] def _load_chunk(self, chunk_pos): chunk_location = os.path.join( self.save_location, self.file_name, "region", self._get_region_file(chunk_pos)) with open(chunk_location, mode='rb') as region: locations = [[ int.from_bytes(region.read(3), byteorder='big', signed=False) * 4096, int.from_bytes(region.read(1), byteorder='big', signed=False) * 4096 ] for i in range(1024)] timestamps = region.read(4096) chunk = self._load_binary_chunk_at( region, locations[((chunk_pos[0] % 32) + (chunk_pos[1] % 32) * 32)][0]) self.chunks[chunk_pos] = chunk def _load_binary_chunk_at(self, region_file, offset): region_file.seek(offset) datalen = int.from_bytes(region_file.read( 4), byteorder='big', signed=False) compr = region_file.read(1) decompressed = zlib.decompress(region_file.read(datalen)) data = nbt.parse_nbt(stream.InputStream(decompressed)) chunk_pos = (data.get('Level').get('xPos').get(), data.get('Level').get('zPos').get()) chunk = Chunk( chunk_pos[0], chunk_pos[1], data ) return chunk def _get_region_file(self, chunk_pos): return 'r.' + '.'.join([str(x) for x in self._get_region(chunk_pos)]) + '.mca' def _get_chunk(self, block_pos): return (math.floor(block_pos[0] / 16), math.floor(block_pos[2] / 16)) def _get_region(self, chunk_pos): return (math.floor(chunk_pos[0] / 32), math.floor(chunk_pos[1] / 32))
6,474
students/k3342/laboratory_works/Salnikova_Nadezhda/laboratory_work_1/flights/forms.py
TonikX/ITMO_ICT_-WebProgramming_2020
10
2171105
from django import forms from django.forms import ModelForm, Textarea from django.contrib.auth.models import User from flights.models import Client, Comment class RegisterUserForm(forms.ModelForm): class Meta: model = User fields = ('username', 'password') def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for field in self.fields: self.fields[field].widget.attrs['class'] = 'form-control' def save(self, commit=True): user = super().save(commit=False) user.set_password(self.cleaned_data["password"]) if commit: user.save() return user class ClientRegister(forms.ModelForm): class Meta: model = Client fields = ['first_name', 'last_name', 'date_of_birth', 'bonus_card'] class AddComment(forms.ModelForm): class Meta: model = Comment fields = ['flight', 'comment_type', 'text'] labels = { 'flight': ('Chose a flight to leave a comment'), 'type_of_comment': ('Choose the comment type'), 'text': ('Type your comment'), } widgets = { "text": Textarea(attrs={'cols': 70, 'rows': 10}), }
1,203
MyDetector/generatefakesubmission.py
lkk688/WaymoObjectDetection
6
2170969
import tensorflow.compat.v1 as tf from pathlib import Path import json import argparse import tqdm import uuid from waymo_open_dataset import dataset_pb2 from waymo_open_dataset import label_pb2 from waymo_open_dataset.protos import metrics_pb2 from waymo_open_dataset.protos import submission_pb2 def _fancy_deep_learning(frame): """Creates a prediction objects file.""" o_list = [] for camera_labels in frame.camera_labels: if camera_labels.name != 1: #Only use front camera continue for gt_label in camera_labels.labels: o = metrics_pb2.Object() # The following 3 fields are used to uniquely identify a frame a prediction # is predicted at. o.context_name = frame.context.name # The frame timestamp for the prediction. See Frame::timestamp_micros in # dataset.proto. o.frame_timestamp_micros = frame.timestamp_micros # This is only needed for 2D detection or tracking tasks. # Set it to the camera name the prediction is for. o.camera_name = camera_labels.name # Populating box and score. box = label_pb2.Label.Box() box.center_x = gt_label.box.center_x box.center_y = gt_label.box.center_y box.length = gt_label.box.length box.width = gt_label.box.width o.object.box.CopyFrom(box) # This must be within [0.0, 1.0]. It is better to filter those boxes with # small scores to speed up metrics computation. o.score = 0.9 # Use correct type. o.object.type = gt_label.type o_list.append(o) return o_list from glob import glob import os if __name__ == "__main__": PATH='/data/cmpe295-liu/Waymo' #validation_folders = ["validation_0000"] validation_folders = ["validation_0000","validation_0001","validation_0002","validation_0003","validation_0004","validation_0005","validation_0006","validation_0007"] #["validation_0007","validation_0006","validation_0005","validation_0004","validation_0003","validation_0002","validation_0001","validation_0000"] data_files = [path for x in validation_folders for path in glob(os.path.join(PATH, x, "*.tfrecord"))] print(data_files)#all TFRecord file list print(len(data_files)) #dataset = tf.data.TFRecordDataset([str(x.absolute()) for x in Path(data_files)]) dataset = [tf.data.TFRecordDataset(FILENAME, compression_type='') for FILENAME in data_files]#create a list of dataset for each TFRecord file print("Dataset type:",type(dataset)) frames = [] #store all frames = total number of TFrecord files * 40 frame(each TFrecord) objects = metrics_pb2.Objects() for i, data_file in enumerate(dataset): print("Datafile: ",i)#Each TFrecord file for idx, data in enumerate(data_file): #Create frame based on Waymo API, 199 frames per TFrecord (20s, 10Hz) # if idx % 5 != 0: #Downsample every 5 images, reduce to 2Hz, total around 40 frames # continue frame = dataset_pb2.Frame() frame.ParseFromString(bytearray(data.numpy())) o_list = _fancy_deep_learning(frame) frames.append(frame) for o in o_list: objects.objects.append(o) #https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/protos/submission.proto submission = submission_pb2.Submission() submission.task = submission_pb2.Submission.DETECTION_2D submission.account_name = '<EMAIL>' submission.authors.append('<NAME>') submission.affiliation = 'None' submission.unique_method_name = 'fake' submission.description = 'none' submission.method_link = "empty method" submission.sensor_type = submission_pb2.Submission.CAMERA_ALL submission.number_past_frames_exclude_current = 0 submission.number_future_frames_exclude_current = 0 submission.inference_results.CopyFrom(objects) submission.docker_image_source = '' #// Link to the latency submission Docker image stored in Google Storage bucket #object_types // Object types this submission contains. By default, we assume all types. #latency_second Self-reported end to end inference latency in seconds outputfilepath='/home/010796032/MyRepo/submissionoutput/fake_valfrontcameraall.bin' f = open(outputfilepath, 'wb') f.write(submission.SerializeToString()) f.close()
4,516
local_test/bus_schedule/app/post/post.py
NYUSHer/Widgets
1
2170725
from app.post import post from flask import jsonify from util.util import query_fetch, query_mod, PostList, query_dict_fetch, ErrorResponse from instance.config import VERBOSE, DB from util.util import token_required, replace from flask import request ########################################### # # # Authorized Code # # # ########################################### @post.route('/list', methods=['POST']) @token_required def get_list(): offset = int(request.form.get('offset')) size = int(request.form.get('size')) # offset = (int(temp)+1)*int(size) sql = "SELECT pid, title, content, authorid, user_avatar, user_name FROM " \ "posts INNER JOIN users ON users.user_id = posts.authorid ORDER BY priority DESC, pid DESC LIMIT {} OFFSET {}".format(size, offset) if VERBOSE: print('get list query:' + sql) indicator = query_dict_fetch(sql, DB) if indicator: response = PostList() response.data['offset'] = offset response.data['size'] = size response.data['count'] = str(len(indicator)) response.data['postlist'] = indicator else: response = ErrorResponse() response.error['errorCode'] = '105' response.error['errorMsg'] = 'No post found.' return jsonify(response.__dict__) @post.route('/submit', methods=['POST']) @token_required def post_submit(): post_title = replace(request.form.get('title')) post_category = replace(request.form.get('category')) post_tags = replace(request.form.get('tags')) post_content = replace(request.form.get('content')) post_by = request.headers.get('userid') if VERBOSE: print(post_title, post_category, post_tags, post_content, post_by) # No empty title if post_title == "": response = ErrorResponse() response.error['errorCode'] = '108' response.error['errorMsg'] = 'title cannot be empty' return jsonify(response.__dict__) # No empty content elif post_content == "": response = ErrorResponse() response.error['errorCode'] = '108' response.error['errorMsg'] = 'content cannot be empty' return jsonify(response.__dict__) # Modify Existing Post elif request.form.get('pid') is not None and request.form.get('pid').isdigit(): post_id = request.form.get('pid') # Check if user_id and post_by matches sql = "SELECT authorid FROM posts WHERE pid = '{}'".format(post_id) if VERBOSE: print(sql) indicator = query_fetch(sql, DB) user_id = request.headers.get('userid') response = PostList() if indicator['authorid'] == int(user_id): sql = "UPDATE posts SET title='{}', category='{}', tags='{}', content='{}', timestamp = (CURRENT_TIMESTAMP) WHERE pid='{}'"\ .format(post_title, post_category, post_tags, post_content, post_id) if VERBOSE: print(sql) query_mod(sql, DB) response.data['pid'] = post_id # New Post elif request.form.get('pid') is None: sql = "INSERT INTO posts(title, content, tags, category, authorid) VALUES ('{}', '{}', '{}', '{}', '{}')" \ .format(post_title, post_content, post_tags, post_category, post_by) if VERBOSE: print("insert query:" + sql) query_mod(sql, DB) # Get the generated post_id sql = "SELECT pid FROM posts WHERE category = '{}' AND content = '{}' AND authorid = '{}'" \ .format(post_category, post_content, post_by) if VERBOSE: print("get post_id query:" + sql) indicator = query_fetch(sql, DB) response = PostList() if indicator: response.data['pid'] = indicator['pid'] else: response = ErrorResponse() response.error['errorCode'] = '106' response.error['errorMsg'] = 'How did you wind up here??' return jsonify(response.__dict__) @post.route('/get', methods=['POST']) @token_required def post_get(): post_id = request.form.get('pid') sql = "SELECT title, category, tags, content FROM posts WHERE pid = '{}'".format(post_id) if VERBOSE: print("post get query:" + sql) indicator = query_fetch(sql, DB) response = PostList() if indicator: response.data['pid'] = post_id response.data['title'] = indicator['title'] response.data['category'] = indicator['category'] """ NOTE: Tags must be deserialized first. Split with comma e.g. post_tags = 'dog, 2017, happy, weekend' """ response.data['tags'] = indicator['tags'] response.data['content'] = indicator['content'] else: response = ErrorResponse() response.error['errorCode'] = '105' response.error['errorMsg'] = 'Post does not exist' return jsonify(response.__dict__) @post.route('/delete', methods=['POST']) @token_required def post_delete(): post_by = request.headers.get('userid') post_id = request.form.get('pid') # Check if requested post exists sql = "SELECT * FROM posts WHERE pid='{}'".format(post_id) if VERBOSE: print("delete post pid check" + sql) check = query_fetch(sql, DB) if check is None: response = ErrorResponse() response.error['errorCode'] = '105' response.error['errorMsg'] = 'post does not exist' return jsonify(response.__dict__) # Check if user have authorization to delete sql = "SELECT authorid FROM posts WHERE pid='{}'".format(post_id) if VERBOSE: print("delete post authorization check" + sql) indicator = query_fetch(sql, DB) # Authorid and userid matchs and have authority to delete post if indicator['authorid'] == int(post_by): # Delete the post sql = "DELETE FROM posts WHERE authorid = '{}' AND pid = '{}'"\ .format(post_by, post_id) if VERBOSE: print("delete post" + sql) query_mod(sql, DB) response = PostList() response.data['pid'] = post_id # No authority to delete post else: response = ErrorResponse() response.error['errorCode'] = '104' response.error['errorMsg'] = 'No authority.' return jsonify(response.__dict__)
6,408
package/utils/file.py
MikeCun/PersonReID
1
2170065
import os import os.path as osp import shutil import json import numpy as np import glob import sys if sys.version_info[0] == 2: import cPickle as pickle else: import pickle def may_make_dir(path): """ Args: path: a dir, e.g. result of `osp.dirname()` Note: `osp.exists('')` returns `False`, while `osp.exists('.')` returns `True`! """ # This clause has mistakes: # if path is None or '': if path in [None, '']: return if not osp.exists(path): os.makedirs(path) def load_pickle(path, verbose=True): """Check and load pickle object. According to this post: https://stackoverflow.com/a/41733927, cPickle and disabling garbage collector helps with loading speed.""" assert osp.exists(path), "File not exists: {}".format(path) # gc.disable() with open(path, 'rb') as f: ret = pickle.load(f) # gc.enable() if verbose: print('Loaded pickle file {}'.format(path)) return ret def save_pickle(obj, path, verbose=True): """Create dir and save file.""" may_make_dir(osp.dirname(osp.abspath(path))) with open(path, 'wb') as f: pickle.dump(obj, f, protocol=2) if verbose: print('Pickle file saved to {}'.format(path)) def load_json(path): """Check and load json file.""" assert osp.exists(path), "Json file not exists: {}".format(path) with open(path, 'r') as f: ret = json.load(f) print('Loaded json file {}'.format(path)) return ret def save_json(obj, path): """Create dir and save file.""" may_make_dir(osp.dirname(osp.abspath(path))) with open(path, 'w') as f: json.dump(obj, f) print('Json file saved to {}'.format(path)) def read_lines(file): with open(file) as f: lines = f.readlines() lines = [l.strip() for l in lines if l.strip()] return lines def copy_to(p1, p2): # Only when the copy can go on without error do we create destination dir. if osp.exists(p1): may_make_dir(osp.dirname(p2)) shutil.copy(p1, p2) def get_files_by_pattern(root, pattern='a/b/*.ext', strip_root=False): """Optionally to only return matched sub paths.""" ret = glob.glob(osp.join(root, pattern)) if strip_root: ret = [r[len(root) + 1:] for r in ret] return ret def walkdir(folder, exts=None, sub_path=False, abs_path=False): """Walk through each files in a directory. Reference: https://github.com/tqdm/tqdm/wiki/How-to-make-a-great-Progress-Bar Args: exts: file extensions, e.g. '.jpg', or ['.jpg'] or ['.jpg', '.png'] sub_path: whether to exclude `folder` in the resulting paths, remaining sub paths abs_path: whether to return absolute paths """ if isinstance(exts, str): exts = [exts] for dirpath, dirs, files in os.walk(folder): for filename in files: if (exts is None) or (os.path.splitext(filename)[1] in exts): path = os.path.join(dirpath, filename) if sub_path: path = path[len(folder) + 1:] elif abs_path: path = os.path.abspath(path) yield path def strip_root(path): """a/b/c -> b/c""" sep = os.sep path = sep.join(path.split(sep)[1:]) return path
3,325
habanero/request_class.py
Maocx/habanero
0
2170401
import time import requests import json import re from .filterhandler import filter_handler from .habanero_utils import switch_classes,check_json,is_json,parse_json_err,make_ua,filter_dict,rename_query_filters from .exceptions import * class Request(object): ''' Habanero: request class This is the request class for all requests ''' def __init__(self, mailto, url, path, query = None, filter = None, offset = None, limit = None, sample = None, sort = None, order = None, facet = None, select = None, cursor = None, cursor_max = None, agency = False, **kwargs): self.mailto = mailto self.url = url self.path = path self.query = query self.filter = filter self.offset = offset self.limit = limit self.sample = sample self.sort = sort self.order = order self.facet = facet self.select = select self.cursor = cursor self.cursor_max = cursor_max self.agency = agency self.kwargs = kwargs def _url(self): tmpurl = self.url + self.path return tmpurl.strip("/") def do_request(self): filt = filter_handler(self.filter) if self.select.__class__ is list: self.select = ','.join(self.select) if not isinstance(self.cursor_max, (type(None), int)): raise ValueError("cursor_max must be of class int") payload = {'query':self.query, 'filter':filt, 'offset':self.offset, 'rows':self.limit, 'sample':self.sample, 'sort':self.sort, 'order':self.order, 'facet':self.facet, 'select':self.select, 'cursor':self.cursor} payload = dict((k, v) for k, v in payload.items() if v) # add query filters payload.update(filter_dict(self.kwargs)) # rename query filters payload = rename_query_filters(payload) start_time = time.time() js = self._req(payload = payload) print("First request in ", time.time() - start_time) cu = js['message'].get('next-cursor') max_avail = js['message']['total-results'] res = self._redo_req(js, payload, cu, max_avail) return res def _redo_req(self, js, payload, cu, max_avail): print("cu", cu) print("payload",payload) print("max_avail", max_avail) print(self.cursor_max) if(cu.__class__.__name__ != 'NoneType' and self.cursor_max > len(js['message']['items'])): res = [js] total = len(js['message']['items']) while(cu.__class__.__name__ != 'NoneType' and self.cursor_max > total and total < max_avail): payload['cursor'] = cu start_time = time.time() out = self._req(payload = payload) print("Internal request in ", time.time() - start_time) cu = out['message'].get('next-cursor') res.append(out) total = sum([ len(z['message']['items']) for z in res ]) # This code is not built for resuming with a cursor! if(len(out["message"]['items'])<1): break return res else: return js def _req(self, payload): try: r = requests.get(self._url(), params = payload, headers = make_ua(self.mailto)) r.raise_for_status() except requests.exceptions.HTTPError: try: f = r.json() raise RequestError(r.status_code, f['message'][0]['message']) except: r.raise_for_status() except requests.exceptions.RequestException as e: print(e) check_json(r) return r.json()
3,403
share/pegasus/init/mpi-hw/daxgen.py
fengggli/pegasus
0
2171271
#!/usr/bin/env python from Pegasus.DAX3 import * import sys import pwd import os import time from Pegasus.DAX3 import * # The name of the DAX file is the first argument if len(sys.argv) != 2: sys.stderr.write("Usage: %s DAXFILE\n" % (sys.argv[0])) sys.exit(1) daxfile = sys.argv[1] USER = pwd.getpwuid(os.getuid())[0] # Create a abstract dag dax = ADAG("mpi-hello-world") # Add some workflow-level metadata dax.metadata("creator", "%s@%s" % (USER, os.uname()[1])) dax.metadata("created", time.ctime()) # Add input file to the DAX-level replica catalog fin = File("f.in") # optional if you want to put the file locations in the DAX # for tutorial we are picking up from --input-dir option to # pegasus-plan # fin.addPFN(PFN("file://" + os.getcwd() + "/input/f.in", "bluewaters")) # dax.addFile(fin) # Add the mpi hello world job mpi_hw_job = Job(namespace="pegasus", name="mpihw" ) fout = File("f.out") mpi_hw_job.addArguments("-i ", fin ) mpi_hw_job.addArguments("-o ", fout ) mpi_hw_job.uses(fin, link=Link.INPUT) mpi_hw_job.uses(fout, link=Link.OUTPUT) # tell pegasus it is an MPI job mpi_hw_job.addProfile( Profile( "globus", "jobtype", "mpi")) # add profiles indicating PBS specific parameters for BLUEWATERS # pegasus.cores mpi_hw_job.addProfile( Profile("pegasus", "cores", "32" )) # pegasus.nodes mpi_hw_job.addProfile( Profile("pegasus", "nodes", "2" )) # pegasus.ppn mpi_hw_job.addProfile( Profile("pegasus", "ppn", "16" )) # pegasus.runtime is walltime in seconds. mpi_hw_job.addProfile( Profile("pegasus", "runtime", "300")) dax.addJob(mpi_hw_job) # Write the DAX to stdout #dax.writeXML(sys.stdout) f = open(daxfile, "w") dax.writeXML(f) f.close()
1,691
CodingBat/Python/Logic-2/lone_sum.py
unobatbayar/codingbat
2
2171079
""" Given 3 int values, a b c, return their sum. However, if one of the values is the same as another of the values, it does not count towards the sum. lone_sum(1, 2, 3) → 6 lone_sum(3, 2, 3) → 2 lone_sum(3, 3, 3) → 0 @author unobatbayar """ def lone_sum(a, b, c): sum = 0 if a != b and a != c: sum += a if b != a and b != c: sum += b if c != a and c != b: sum += c return sum
393
python/DeepSeaVectorDraw/VectorCommandUnion.py
akb825/DeepSea
5
2170650
# automatically generated by the FlatBuffers compiler, do not modify # namespace: DeepSeaVectorDraw class VectorCommandUnion(object): NONE = 0 StartPathCommand = 1 MoveCommand = 2 LineCommand = 3 BezierCommand = 4 QuadraticCommand = 5 ArcCommand = 6 ClosePathCommand = 7 EllipseCommand = 8 RectangleCommand = 9 StrokePathCommand = 10 FillPathCommand = 11 TextCommand = 12 TextRangeCommand = 13 ImageCommand = 14
475
clustering/clustering_funcs.py
EmreHakguder/SL_VSMs
0
2171224
import pandas as pd, os, json from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import silhouette_score from scipy import cluster def cluster_glove(someSemPath, height): X = pd.read_csv(someSemPath).set_index("video") Z = cluster.hierarchy.ward(X) cutree = cluster.hierarchy.cut_tree(Z, height=height) clusterLabels = [int(x[0]) for x in cutree] clusters_len = len(list(set(clusterLabels))) if 1 < clusters_len < len(X.index): return X.index, silhouette_score(X, cutree.ravel(), metric='euclidean'), clusterLabels, clusters_len else: return False, False, False, False def merge_cluster_data(outPath): languages = ["ASL", "BSL"] dims = ["50d", "100d", "200d", "300d"] masterPath = "results/clustering/signPairs_byCluster/" if not os.path.exists(outPath): os.makedirs(outPath) for language in languages: for dim in dims: list_height_path = masterPath+language+"/"+dim+"/" heights = [x.split("_")[3] for x in os.listdir(list_height_path) if not x.startswith(".")] for heit in heights: clustered_df = pd.DataFrame(columns=["language", "dim", "height", "clusterID", "signPair", "semSim", "HS_sim", "LOC_sim", "MOV_sim", "ENTIRE_sim"]) path = list_height_path+language+"_"+dim+"_heightheight_"+str(heit)+"_signPairs_byCluster.json" print(path) with open(path, "r") as read_file: js_file = json.load(read_file) for clusterID in js_file: for pair in js_file[clusterID]: signPair = pair[0] + " + " + pair[1] clustered_df = clustered_df.append({"language":language, "dim":dim, "height":heit, "clusterID":clusterID, "signPair":signPair, "semSim":None, "HS_sim":None, "LOC_sim":None, "MOV_sim":None, "ENTIRE_sim":None}, ignore_index=True) clustered_df.to_csv(outPath+str(language)+"_"+str(dim)+"_"+str(heit)+".csv.gz", compression="gzip", index=False) return clustered_df
2,814
gratipay/models/team/closing.py
kant/gratipay.com
517
2171378
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals class Closing(object): """This mixin implements team closing. """ #: Whether the team is closed or not. is_closed = False def close(self): """Close the team account. """ with self.db.get_cursor() as cursor: cursor.run("UPDATE teams SET is_closed=true WHERE id=%s", (self.id,)) self.app.add_event( cursor , 'team' , dict(id=self.id, action='set', values=dict(is_closed=True)) ) self.set_attributes(is_closed=True) if self.package: self.package.unlink_team(cursor)
775
audcon/models.py
vikc07/audcon
0
2171465
from datetime import datetime import pytz from audcon import db from gpm import formatting from audcon import app class DefaultColumns(object): id = db.Column(db.Integer, primary_key=True) created_date = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) modified_date = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow) isdeleted = db.Column(db.Integer, default=False) def created_date_local_tz(self): utc = pytz.timezone('UTC') local_tz = pytz.timezone(app.config['UI']['TZ']) created_date_utc = utc.localize(self.created_date) created_date_local_tz = created_date_utc.astimezone(local_tz) return created_date_local_tz.replace(tzinfo=None) def modified_date_local_tz(self): utc = pytz.timezone('UTC') local_tz = pytz.timezone(app.config['UI']['TZ']) modified_date_utc = utc.localize(self.modified_date) modified_date_local_tz = modified_date_utc.astimezone(local_tz) return modified_date_local_tz.replace(tzinfo=None) def duration(self): return (self.modified_date - self.created_date).total_seconds() def duration_formatted(self): return formatting.time_pretty((self.modified_date - self.created_date).total_seconds()) class Media(DefaultColumns, db.Model): media_file_path = db.Column(db.String(255), nullable=False) media_title = db.Column(db.String(255), nullable=False) media_fsize = db.Column(db.BigInteger, nullable=False) media_format = db.Column(db.String(255), nullable=False) media_streams_count = db.Column(db.SmallInteger, nullable=False) media_a_streams_count = db.Column(db.SmallInteger, nullable=False) media_v_streams_count = db.Column(db.SmallInteger, nullable=False) media_s_streams_count = db.Column(db.SmallInteger, nullable=False) media_o_streams_count = db.Column(db.SmallInteger, nullable=False) media_a_codec = db.Column(db.String(255), nullable=False) media_a_sample_fmt = db.Column(db.String(255), nullable=False) media_a_sample_rate = db.Column(db.String(255), nullable=False) media_a_channels = db.Column(db.String(255), nullable=False) media_a_channel_layout = db.Column(db.String(255), nullable=False) media_a_bitrate = db.Column(db.String(255), nullable=False) media_full_meta = db.Column(db.JSON) def __repr__(self): return self.media_file_path def fsize_pretty(self): return formatting.fsize_pretty(self.media_fsize) def last_updated(self, formatted=False): last_updated = (datetime.utcnow() - self.modified_date).total_seconds() if last_updated < 0: last_updated = 0 if formatted: return formatting.time_pretty(last_updated) return last_updated class RunLog(DefaultColumns, db.Model): service = db.Column(db.String(255), nullable=False) status = db.Column(db.String(255)) params = db.Column(db.JSON) def last_ran(self): return (datetime.utcnow() - self.modified_date).total_seconds() class Queue(DefaultColumns, db.Model): media_file_path = db.Column(db.String(255), nullable=False) media_output_file_path = db.Column(db.String(255), nullable=False) media_output_ffmpeg_params = db.Column(db.String(255), nullable=False) media_converted = db.Column(db.Boolean, nullable=False, default=False) def __repr__(self): return self.id def conversion_status(self): if self.media_converted: return 'Complete' else: return 'Pending'
3,606
litterbox/models/google/model_google_slim.py
rwightman/tensorflow-litterbox
49
2170877
# Copyright (C) 2016 <NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # ============================================================================== """Model wrapper for Google's tensorflow/model/slim models. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import tensorflow as tf from collections import OrderedDict from fabric import model from models.google.nets import nets_factory slim = tf.contrib.slim google_default_params = { 'network': 'inception_resnet_v2', 'num_classes': 1000, } class ModelGoogleSlim(model.Model): def __init__(self, params=google_default_params): super(ModelGoogleSlim, self).__init__() params = model.merge_params(google_default_params, params) # model_name must correspond to one of google's network names in nets package, # see nets_factory.py for valid names. self.network = params['network'] assert self.network in nets_factory.networks_map self.num_classes = params['num_classes'] assert self.num_classes > 1 def build_tower(self, images, is_training=False, scope=None): weight_decay = 0.0001 network_fn = nets_factory.get_network_fn( self.network, num_classes=self.num_classes, weight_decay=weight_decay, is_training=is_training) logits, endpoints = network_fn(images) # HACK get mode variable scope set by google net code from logits op name so it can # be removed for smaller Tensorboard tags scope_search = re.search('%s_[0-9]*/(\w+)/' % self.TOWER_PREFIX, logits.op.name) if scope_search: self.model_variable_scope = scope_search.group(1) if 'AuxLogits' in endpoints: # Grab the logits associated with the side head. Employed during training. aux_logits = endpoints['AuxLogits'] else: aux_logits = None self.add_tower( scope, endpoints, logits, aux_logits ) # Add summaries for viewing model statistics on TensorBoard. self.activation_summaries() return logits def add_tower_loss(self, labels, scope=None): tower = self.tower(scope) num_classes = tower.outputs.get_shape()[-1].value labels = slim.one_hot_encoding(labels, num_classes=num_classes) slim.losses.softmax_cross_entropy( tower.outputs, labels, label_smoothing=0.1, weights=1.0) if 'AuxLogits' in tower.endpoints: slim.losses.softmax_cross_entropy( tower.aux_outputs, labels, label_smoothing=0.1, weights=0.4, scope='aux_loss') def output_scopes(self, prefix_scope=''): scopes = ['logits', 'Logits', 'AuxLogits/Aux_logits', 'AuxLogits/Logits', 'AuxLogits/Conv2d_2b_1x1'] prefix = prefix_scope + '/' if prefix_scope else '' prefix += self.model_variable_scope + '/' return [prefix + x for x in scopes] def get_predictions(self, outputs, processor): if processor is not None: logits = processor.decode_output(outputs) else: logits = outputs return tf.nn.softmax(logits) @staticmethod def eval_ops(logits, labels, processor): """Generate a simple (non tower based) loss op for use in evaluation. Args: logits: List of logits from inference(). Shape [batch_size, num_classes], dtype float32/64 labels: Labels from distorted_inputs or inputs(). batch_size vector with int32/64 values in [0, num_classes). """ top_1_op = tf.nn.in_top_k(logits, labels, 1) top_5_op = tf.nn.in_top_k(logits, labels, 5) loss_op = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='xentropy_eval') return OrderedDict([('top 5', top_5_op), ('top 1', top_1_op), ('loss', loss_op)]) def check_norm(self, norm): if ('vgg' in self.network or 'resnet' in self.network) and norm != 'caffe_rgb': print("WARNING: If you are using the pre-trained weights for Google VGG and Resnet models, " "they were imported from Caffe and expect [0, 255] inputs, not the default [-1, 1]. " "It is recommended to change the image norm method from '%s' to 'caffe_rgb' with " "the --image_norm param." % norm)
4,670
month05/spider/day07_course/day07_code/02_ydSpder.py
chaofan-zheng/tedu-python-demo
4
2170756
import requests from lxml import etree word = input('请输入要翻译的单词:') post_url = 'http://m.youdao.com/translate' post_data = { 'inputtext':word, 'type':'AUTO' } html = requests.post(url=post_url,data=post_data).text parse_html = etree.HTML(html) xpath_bds = '//ul[@id="translateResult"]/li/text()' result = parse_html.xpath(xpath_bds)[0] print(result)
355
apps/articles/admin.py
Pavel1114/blogger
0
2167383
from django.contrib import admin from django.contrib.admin import ModelAdmin from apps.articles.models import Article @admin.register(Article) class ArticleAdmin(ModelAdmin): list_display = ("__str__", "created", "author") search_fields = ["title", "text"] def save_model(self, request, obj, form, change): if not obj.author: obj.author = request.user super().save_model(request, obj, form, change)
443
backend/services/finale.py
ukibbb/goodmood-inc
0
2171185
import base64 import json import os import pandas as pd import requests # TO DO REFACTORING class FinaleDownloader: def __init__(self, start_date: str = None, end_date: str = None): self.start_date = start_date self.end_date = end_date self.purchase_filter: list[list] = self._decode_filter( self.filter_purchase_orders) @property def purchase_orders_url(self): return f"https://app.finaleinventory.com/shuzyrock/doc/report/pivotTable/1630067173922/Reports.xls?format=xls&data=orderItem&attrName=%23%23purchase008&rowDimensions=~mpnM1cDLQGY5mZmZmZrAwMDAwMCZzQEFwMtAZjmZmZmZmsDAwMDAwJnM1sDLQHUdHrhR64XAwMDAAcCZzMTAAMDAwMDAwJnNAc3Ay0Bk9HrhR64UwMDAwMDAmc0B_sDLQHUdHrhR64XAwMDAwMCZzQHUwMtAhOAo9cKPXMDAwMDAwJnMyMDLQGT0euFHrhTAwMDAwMCZzMrAy0Bk9HrhR64UwMDAwMDAmczRwMtAZPR64UeuFMDAwMDAwA&metrics=~kZnNBaXAy0Bk9HrhR64UwMDAwMDA&filters={self.purchase_filter}&reportTitle=Purchase+order+w%2F+detail" @property def warehouse_stock_url(self): return f"https://app.finaleinventory.com/shuzyrock/doc/report/pivotTable/1630072367983/Reports.xls?format=xls&data=product&attrName=%23%23stock008&rowDimensions=~lpnNAf7Ay0BzDMzMzMzNwMDAwMDAmc0B1MDLQH1eZmZmZmbAwMDAwMCZzQHNwMz-wMDAwMDAmc0B9cDM_sDAwMDAwJnNAgmrU3RkXG5iaW4gSUTLQGMMzMzMzM3AwMDAwMCZzQILrFN0ZFxucGFja2luZ8tAYwzMzMzMzcDAwMDAwA&metrics=~lZnNBuuqVW5pdHNcblFvSMtAZ9AAAAAAAMDAwMDAwJnNBvSvVW5pdHNcblJlc2VydmVky0Bn0AAAAAAAwMDAwMDAmc0G8LBVbml0c1xuUmVtYWluaW5ny0Bn0AAAAAAAwMDAwMDAmc0G5q9Vbml0c1xuT24gb3JkZXLLQGfQAAAAAADAwMDAwMCZzQbisFVuaXRzXG5BdmFpbGFibGXLQGfQAAAAAADAwMDAwMA&filters=W1sicHJvZHVjdFN0YXR1cyIsWyJQUk9EVUNUX0FDVElWRSJdXSxbInByb2R1Y3RDYXRlZ29yeSIsbnVsbF0sWyJwcm9kdWN0TWFudWZhY3R1cmVyIixudWxsXSxbInByb2R1Y3RTdGRCaW5JZCIsbnVsbF0sWyJwcm9kdWN0UHJvZHVjdFVybCIsbnVsbF0sWyJwcm9kdWN0U3RvY2tFZmZlY3RpdmVEYXRlIixudWxsXV0%3D&reportTitle=Stock+for+each+product%2C+in+units" def purchase_orders(self): session: requests.Session = self._auth_request() request_url: str = self.purchase_orders_url response: requests.Response = session.get(request_url) PurchaseOrdersProcessor(response) class WarehouseStock(FinaleDownloader): def download_warehouse_stock(self): session: requests.Session = self._auth_request() request_url: str = self.warehouse_stock_url response: requests.Response = session.get(request_url) dataframe = self._load_response_return_dataframe(response) @staticmethod def _load_response_return_dataframe( response: requests.Response) -> pd.DataFrame: dataframe = pd.read_excel(response.content) dataframe.to_excel("apps/static/finale/warehouse_stock.xlsx") return dataframe class PurchaseOrdersProcessor: STATUS = "Status" STATUS_COMPLETED = "Completed" STATUS_COMMITTED = "Committed" STATUS_EDITABLE = "Editable" QUANTITY = "Quantity" PACKING = "Packing" PACKING_QUANTITY_MUL_RES = "Multiply Result" TO_DELETE = ("Price", "Subtotal sum") def __init__(self, response: requests.Response): self.purchase_orders: pd.DataFrame = pd.read_excel(response.content) self.FUNCTIONS = ( self._delete_completed, self._split_packing, self._delete_columns, self._fill_na_cells, self._fill_empty_cells, self._multiply_packing_quantity, ) self.process_purchase_orders() def process_purchase_orders(self): [function() for function in self.FUNCTIONS] self.purchase_orders.to_excel( "apps/static/finale/purchase_orders.xlsx") def _delete_completed(self): is_completed = False for index, status in self.purchase_orders[self.STATUS].items(): if status == self.STATUS_COMPLETED: is_completed = True self.purchase_orders.drop(index, inplace=True) elif status == self.STATUS_EDITABLE or status == self.STATUS_COMMITTED: is_completed = False elif is_completed: self.purchase_orders.drop(index, inplace=True) self.purchase_orders.reset_index(drop=True, inplace=True) def _split_packing(self): for index, packing in self.purchase_orders[self.PACKING].items(): if isinstance(packing, str) and packing.startswith("cs"): self.purchase_orders[self.PACKING][index] = float( packing.split(" ")[1].split("/")[0]) def _delete_columns(self): for name in self.TO_DELETE: del self.purchase_orders[name] def _fill_empty_cells(self): packing = self.purchase_orders[self.PACKING] quantity = self.purchase_orders[self.QUANTITY] for index, quantity in self.purchase_orders[self.QUANTITY].items(): if quantity == "" and packing[index] != "": self.purchase_orders[self.QUANTITY][index] = 1 elif packing[index] == "" and quantity != "": self.purchase_orders[self.PACKING][index] = 1 def _fill_na_cells(self): self.purchase_orders.fillna("", inplace=True) def _multiply_packing_quantity(self): multiply_result = self.purchase_orders.apply( lambda row: row[self.QUANTITY] * row[self.PACKING] if not isinstance(row[self.QUANTITY], str) and not isinstance( row[self.PACKING], str) else "", axis=1, ) if not self.purchase_orders.empty: self.purchase_orders[ self.PACKING_QUANTITY_MUL_RES] = multiply_result
5,607
app/backend/src/couchers/migrations/versions/1c7784767710_add_superuser_column.py
foormea/couchers
226
2171379
"""add_superuser_column Revision ID: 1c7784767710 Revises: <PASSWORD> Create Date: 2021-06-16 15:20:23.475561 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "1c7784767710" down_revision = "4<PASSWORD>25<PASSWORD>" branch_labels = None depends_on = None def upgrade(): op.add_column("users", sa.Column("is_superuser", sa.Boolean(), server_default="false", nullable=False)) def downgrade(): op.drop_column("users", "is_superuser")
500
define.py
Vuong02011996/tools_make_labels_object_in_video
1
2170616
path_video = '/home/vuong/Videos/16:31:47.063371.mp4' name_video = path_video.split('/')[-1].split('.')[0] + path_video.split('/')[-1].split('.')[1] path_save_data = '/home/vuong/Videos/' + name_video path_file_classes = 'classes.txt' ''' Train custom object: darknet.exe detector train data/obj.data yolo-obj.cfg yolov4.conv.137 train 2 class truoc.(Cam dung va do xe vs Cam do xe) B1. Copy file .cfg and change 8 option: + batch + subdivisions + max_batches + steps to 80% and 90% of max_batches + network size width=416 height=416 or any value multiple of 32 + line classes=80 to your number of objects (in each of 3 layer) + change [filters=255] to filters=(classes + 5)x3 in the 3 [convolutional] before each [yolo] layer B2. Create file obj.data in the directory build\darknet\x64\data\ classes= 2 train = data/train.txt valid = data/test.txt names = data/obj.names backup = backup/ + Create file obj.names the same file class.txt and put to the directory build\darknet\x64\data\ + Create file train.txt and test.txt and put to the directory build\darknet\x64\data\ data/obj/img1.jpg data/obj/img2.jpg data/obj/img3.jpg .... B3. Download pre-trained weights for the convolutional layers and put to the directory build\darknet\x64 '''
1,353
adScheduler.py
summerlimes/mykijiji
1
2171084
import schedule import time import datetime from adPoster import * class adSchedule: def __init__(self, ad_id, repeat, delay=0): self.ad_id = ad_id self.repeat = repeat self.delay = delay self.job_file = "ad" + str(self.ad_id) + "/ad" + str(self.ad_id) + ".yml" def printHeader(self): logger(str(datetime.datetime.now())) logger("AD#" + str(self.ad_id) + " is being reposted every " + str(self.repeat) + " minutes\n") logger("AD FILE: " + str(self.job_file) + "\n") def printFooter(self, rc): if (rc==0): logger("No critical errors. Check ADPOSTER OUTPUTS for details") logger("=====================================================") else: logger("There was an error. Check ADPOSTER ERROR for details") logger("=====================================================") def start(self): # delay time.sleep(self.delay*60) self.printHeader() # running the ad first time rc = adPoster(self.job_file, repost=False) self.printFooter(rc) #schedule schedule.every(self.repeat).minutes.do(self.repost) def repost(self): self.printHeader() # reposting because repost=True by default [check adPoster] rc = adPoster(self.job_file) self.printFooter(rc)
1,380
modules_lib/xbmc/xbmc_server.py
hephaestus9/Ironworks
1
2170109
# -*- coding: utf-8 -*- from flask import session import netifaces as nif from ironworks import serverTools class XBMCServer(): def __init__(self): """Table for the XBMC server config""" self.bleex = serverTools.getSystemDb() self.bleex.beginTransaction() self.bleex.checkTable("xbmc_servers", [ {"name": "id", "type": "INT NOT NULL AUTO_INCREMENT PRIMARY KEY"}, {"name": "label", "type": "text"}, {"name": "position", "type": "text"}, {"name": "hostname", "type": "text"}, {"name": "port", "type": "text"}, {"name": "xbmc_username", "type": "text"}, {"name": "xbmc_password", "type": "text"}, {"name": "mac_address", "type": "text"}, {"name": "active_server", "type": "text"}, {"name": "username", "type": "text"}]) self.bleex.commitTransaction() def getNumXbmcServers(self): serverList = self.bleex.select("xbmc_servers") servers = len(serverList) return servers def getXBMCServers(self, orderBy={"position": "DESC"}): servers = [] serverDict = {} serverList = self.bleex.select("xbmc_servers", where={"username": session["username"]}, orderBy=orderBy) for server in serverList: serverDict["id"] = server[0] serverDict["label"] = server[1] serverDict["position"] = server[2] serverDict["hostname"] = server[3] serverDict["port"] = server[4] serverDict["xbmc_username"] = server[5] serverDict["xbmc_password"] = server[6] serverDict["mac_address"] = server[7] serverDict["active_server"] = server[8] servers.append(serverDict) serverDict = {} return servers def getServerById(self, server_id): serverDict = {} server = self.bleex.select("xbmc_servers", where={"username": session["username"], "id": server_id})[0] serverDict["id"] = server[0] serverDict["label"] = server[1] serverDict["position"] = server[2] serverDict["hostname"] = server[3] serverDict["port"] = server[4] serverDict["xbmc_username"] = server[5] serverDict["xbmc_password"] = server[6] serverDict["mac_address"] = server[7] serverDict["active_server"] = server[8] return serverDict def deleteServer(self, server_id): self.bleex.delete("xbmc_servers", where={"username": session["username"], "id": server_id}) return def setXBMCServer(self, label, hostname, port='8080', xbmc_username="", xbmc_password="", mac_address="", position="0", active_server="False", server={}): if server != {}: self.bleex.beginTransaction() self.bleex.insertOrUpdate("xbmc_servers", server, on={"mac_address": mac_address, "hostname": hostname, "username": session["username"]}) self.bleex.commitTransaction() else: if mac_address == "": mac_address = self.mac_for_ip(hostname) data = {"label": label, "position": position, "hostname": hostname, "port": port, "xbmc_username": xbmc_username, "xbmc_password": xbmc_password, "mac_address": mac_address, "active_server": active_server, "username": session["username"]} self.bleex.beginTransaction() self.bleex.insertOrUpdate("xbmc_servers", data, on={"mac_address": mac_address, "hostname": hostname, "username": session["username"]}) self.bleex.commitTransaction() def mac_for_ip(self, ip): 'Returns a list of MACs for interfaces that have given IP, returns None if not found' for i in nif.interfaces(): addrs = nif.ifaddresses(i) try: if_mac = addrs[nif.AF_LINK][0]['addr'] if_ip = addrs[nif.AF_INET][0]['addr'] except IndexError, KeyError: #ignore ifaces that dont have MAC or IP if_mac = if_ip = None if if_ip == ip: return if_mac return ""
4,277
pairs_crypto/binance_analysis/binance_ecm_utils.py
factorwonk/fml
0
2170932
import os import numpy as np import pandas as pd import statsmodels.api as sm from datetime import datetime from numpy.linalg import inv from scipy.stats import t def calc_residuals(df): x = df.iloc[:, 0] # e.g. BTC/USD y = df.iloc[:, 1] # e.g. ETH/USD X1 = sm.add_constant(x) # Y1 = sm.add_constant(y) ols1 = sm.OLS(y, X1).fit() # ols2 = sm.OLS(x, Y1).fit() # calculate residuals here residuals = ols1.resid # residuals2 = ols2.resid return residuals def test_stationarity(residuals): adf_data = pd.DataFrame(residuals) adf_data.columns = ["y"] adf_data["drift_constant"] = 1 # Lag residual adf_data["y-1"] = adf_data["y"].shift(1) adf_data.dropna(inplace=True) # Diff between residual and lag residual adf_data["deltay1"] = adf_data["y"] - adf_data["y-1"] # Lag difference adf_data["deltay-1"] = adf_data["deltay1"].shift(1) adf_data.dropna(inplace=True) target_y = pd.DataFrame(adf_data["deltay1"], columns=["deltay1"]) adf_data.drop(["y", "deltay1"], axis=1, inplace=True) # Auto regressing the residuals with lag1, drift constant and lagged 1 delta (delta_et-1) adf_regressor_model = sm.OLS(target_y, adf_data) adf_regressor = adf_regressor_model.fit() # Returning the results print(adf_data) print(adf_regressor.summary()) return adf_regressor
1,381
software/studio50_fly/examples/old/blob_finder_example.py
willdickson/studio50_fly
1
2170836
import cv2 import numpy as np from studio50_fly.utility import get_monitor_dict from studio50_fly.blob_finder import BlobFinder blob_finder = BlobFinder(threshold=30, min_area=50) frame_width = 1280 frame_height = 1024 monitor_dict = get_monitor_dict() user_monitor = monitor_dict['HDMI-0'] proj_monitor = monitor_dict['DP-1'] cv2.namedWindow('projector') cv2.moveWindow('projector', proj_monitor.x, proj_monitor.y) proj_image = 255*np.ones((proj_monitor.height, proj_monitor.width, 3) ,dtype=np.uint8) cv2.imshow('projector', proj_image) cap = cv2.VideoCapture('/dev/video0') if not cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')): raise RuntimeError('unable to set fourcc to mjpg') if not cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width): raise RuntimeError('unable to set frame width') if not cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height): raise RuntimeError('unable to set frame height') if not cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1): raise RuntimeError('unable to set auto exposure') if not cap.set(cv2.CAP_PROP_EXPOSURE, 200): raise RuntimeError('unable to set auto exposure') #cv2.namedWindow('camera') #cv2.moveWindow('camera', user_monitor.width - frame_width, 0) cv2.namedWindow('background') cv2.moveWindow('background', user_monitor.width - frame_width, 0) have_bg = False done = False cnt = 0 while not done: ret, image = cap.read() if ret: gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) if have_bg: diff_image = cv2.absdiff(gray_image,bg_image) blob_list, blob_image, thresh_image = blob_finder.find(diff_image) print(blob_list) #cv2.imshow('camera', image) cv2.imshow('blob_image', blob_image) else: gray_image = cv2.medianBlur(gray_image, 5) if cnt < 3: bg_image = np.zeros(gray_image.shape, dtype=np.uint8) else: pass bg_image = np.maximum(bg_image, gray_image) cv2.imshow('background', bg_image) cnt += 1 key = cv2.waitKey(1) & 0xff if key == ord('g'): have_bg = True cv2.destroyWindow('background') cv2.namedWindow('blob_image') cv2.moveWindow('blob_image', user_monitor.width - frame_width, 0) if key == ord('q'): done = True cv2.destroyAllWindows() cap.release()
2,402
bot/reviewbot/tools/tests/test_rustfmt.py
reviewboard/ReviewBot
91
2171205
"""Unit tests for reviewbot.tools.rustfmt.""" from __future__ import unicode_literals import os import kgb import six from reviewbot.tools.rustfmt import RustfmtTool from reviewbot.tools.testing import (BaseToolTestCase, ToolTestCaseMetaclass, integration_test, simulation_test) from reviewbot.utils.filesystem import tmpdirs from reviewbot.utils.process import execute @six.add_metaclass(ToolTestCaseMetaclass) class RustfmtToolTests(BaseToolTestCase): """Unit tests for reviewbot.tools.rustfmt.RustfmtTool.""" tool_class = RustfmtTool tool_exe_config_key = 'rustfmt' tool_exe_path = '/path/to/rustfmt' @integration_test() @simulation_test(stdout=( 'Diff in /test.rs at line 1:\n' ' fn main() {\n' '-println!("Hi")\n' '+ println!("Hi")\n' '}\n' )) def test_execute(self): """Testing RustfmtTool.execute""" review, review_file = self.run_tool_execute( filename='test.rs', file_contents=( b'fn main() {\n' b'println!("Hi")\n' b'}\n' )) self.assertEqual(review.comments, [ { 'filediff_id': review_file.id, 'first_line': 1, 'num_lines': 1, 'text': ( 'This file contains formatting errors and should be run ' 'through `rustfmt`.' ), 'issue_opened': True, 'rich_text': True, }, ]) self.assertSpyCalledWith( execute, [ self.tool_exe_path, '-q', '--check', '--color=never', os.path.join(tmpdirs[-1], 'test.rs'), ], ignore_errors=True, return_errors=True) @integration_test() @simulation_test(stderr=( 'error: this file contains an unclosed delimiter\n' ' --> /test.rs:2:27\n' ' |\n' '1 | afn main() {\n' ' - unclosed delimiter\n' '2 | println!("Hello world!");\n' ' | ^\n' '\n' 'error: expected one of `!` or `::`, found `main`\n' ' --> /test.rs:1:6\n' ' |\n' '1 | afn main() {\n' ' | ^^^^ expected one of `!` or `::`\n' )) def test_execute_with_syntax_error(self): """Testing RustfmtTool.execute with syntax error""" review, review_file = self.run_tool_execute( filename='test.rs', file_contents=( b'func main() {}\n' )) self.assertEqual(review.comments, [ { 'filediff_id': review_file.id, 'first_line': 1, 'num_lines': 1, 'text': ( 'expected one of `!` or `::`, found `main`\n' '\n' 'Column: 6' ), 'issue_opened': True, 'rich_text': False, }, ]) self.assertSpyCalledWith( execute, [ self.tool_exe_path, '-q', '--check', '--color=never', os.path.join(tmpdirs[-1], 'test.rs'), ], ignore_errors=True, return_errors=True) @integration_test() @simulation_test() def test_execute_with_success(self): """Testing RustfmtTool.execute with no errors""" review, review_file = self.run_tool_execute( filename='test.rs', file_contents=( b'fn main() {\n' b' println!("Hello world!");\n' b'}\n' )) self.assertEqual(review.comments, []) self.assertSpyCalledWith( execute, [ self.tool_exe_path, '-q', '--check', '--color=never', os.path.join(tmpdirs[-1], 'test.rs'), ], ignore_errors=True, return_errors=True) def setup_simulation_test(self, stdout='', stderr=''): """Set up the simulation test for rustfmt. This will spy on :py:func:`~reviewbot.utils.process.execute`, making it return the provided stdout and stderr results. Args: stdout (unicode, optional): The outputted stdout. stderr (unicode, optional): The outputted stderr. """ self.spy_on(execute, op=kgb.SpyOpReturn((stdout, stderr)))
4,795
scratch_ml/supervised_learning/random_forest.py
siAyush/scratch_ml
23
2169996
import numpy as np import progressbar import math from scratch_ml.utils import bar_widget, get_random_subsets from scratch_ml.supervised_learning import ClassificationTree class RandomForest(): """Random Forest classifier""" def __init__(self, n_estimators=100, max_features=None, min_samples_split=2, min_gain=0, max_depth=float("inf")): self.n_estimators = n_estimators # Number of trees self.max_features = max_features # Maxmimum number of features per tree self.min_samples_split = min_samples_split self.min_gain = min_gain # Minimum information gain self.max_depth = max_depth # Maximum depth for tree self.progressbar = progressbar.ProgressBar(widgets=bar_widget) self.tree = [] for i in range(n_estimators): tree = ClassificationTree( min_samples_split=self.min_samples_split, min_impurity=min_gain, max_depth=self.max_depth) self.tree.append(tree) def fit(self, x, y): n_features = np.shape(x)[1] # If max_features have not been defined select sqrt(n_features) if self.max_features is None: self.max_features = int(math.sqrt(n_features)) # Choose one random subset of the data for each tree subsets = get_random_subsets(x, y, self.n_estimators) for i in self.progressbar(range(self.n_estimators)): x_subset, y_subset = subsets[i] # select random subsets of the features idx = np.random.choice( range(n_features), size=self.max_features, replace=True) self.tree[i].feature_i = idx x_subset = x_subset[:, idx] self.tree[i].fit(x_subset, y_subset) def predict(self, x): y_preds = np.empty((x.shape[0], len(self.tree))) for i, tree in enumerate(self.tree): idx = tree.feature_i # Make a prediction based on those features prediction = tree.predict(x[:, idx]) y_preds[:, i] = prediction y_pred = [] # Select the most common class prediction for sample_predictions in y_preds: y_pred.append(np.bincount( sample_predictions.astype("int")).argmax()) return y_pred
2,339
quizzer/admin.py
kaushiksk/Qrios
0
2170859
from django.contrib import admin from .models import Quizzer # Register your models here. admin.site.register(Quizzer)
119
custom_components/frigate/binary_sensor.py
Odis/frigate-hass-integration
0
2170985
"""Binary sensor platform for Frigate.""" from __future__ import annotations import logging from typing import Any from homeassistant.components.binary_sensor import ( DEVICE_CLASS_MOTION, BinarySensorEntity, ) from homeassistant.components.mqtt.subscription import async_subscribe_topics from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity_platform import AddEntitiesCallback from . import get_friendly_name, get_frigate_device_identifier from .const import DOMAIN, NAME, VERSION _LOGGER: logging.Logger = logging.getLogger(__package__) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Binary sensor entry setup.""" frigate_config = hass.data[DOMAIN]["config"] camera_objects = set() for cam_name, cam_config in frigate_config["cameras"].items(): for obj in cam_config["objects"]["track"]: camera_objects.add((cam_name, obj)) zone_objects = set() for cam, obj in camera_objects: for zone_name in frigate_config["cameras"][cam]["zones"]: zone_objects.add((zone_name, obj)) async_add_entities( [ FrigateMotionSensor(entry, frigate_config, cam_name, obj) for cam_name, obj in camera_objects.union(zone_objects) ] ) class FrigateMotionSensor(BinarySensorEntity): """Frigate Motion Sensor class.""" def __init__( self, entry: ConfigEntry, frigate_config: dict[str, Any], cam_name: str, obj_name: str, ) -> None: """Construct a new FrigateMotionSensor.""" self._entry = entry self._frigate_config = frigate_config self._cam_name = cam_name self._obj_name = obj_name self._is_on = False self._available = False self._sub_state = None self._topic = f"{self._frigate_config['mqtt']['topic_prefix']}/{self._cam_name}/{self._obj_name}" self._availability_topic = ( f"{self._frigate_config['mqtt']['topic_prefix']}/available" ) async def async_added_to_hass(self) -> None: """Subscribe mqtt events.""" await super().async_added_to_hass() await self._subscribe_topics() async def _subscribe_topics(self) -> None: """(Re)Subscribe to topics.""" @callback def state_message_received(msg: str) -> None: """Handle a new received MQTT state message.""" try: self._is_on = int(msg.payload) > 0 except ValueError: self._is_on = False self.async_write_ha_state() @callback def availability_message_received(msg: str) -> None: """Handle a new received MQTT availability message.""" self._available = msg.payload == "online" self.async_write_ha_state() self._sub_state = await async_subscribe_topics( self.hass, self._sub_state, { "state_topic": { "topic": self._topic, "msg_callback": state_message_received, "qos": 0, }, "availability_topic": { "topic": self._availability_topic, "msg_callback": availability_message_received, "qos": 0, }, }, ) @property def unique_id(self) -> str: """Return a unique ID for this entity.""" return f"{DOMAIN}_{self._cam_name}_{self._obj_name}_binary_sensor" @property def device_info(self) -> dict[str, Any]: """Return device information.""" return { "identifiers": {get_frigate_device_identifier(self._entry, self._cam_name)}, "via_device": get_frigate_device_identifier(self._entry), "name": get_friendly_name(self._cam_name), "model": VERSION, "manufacturer": NAME, } @property def name(self) -> str: """Return the name of the sensor.""" return f"{get_friendly_name(self._cam_name)} {self._obj_name} Motion".title() @property def is_on(self) -> bool: """Return true if the binary sensor is on.""" return self._is_on @property def device_class(self) -> str: """Return the device class.""" return DEVICE_CLASS_MOTION @property def available(self) -> bool: """Determine if the entity is available.""" return self._available
4,650
example/sample/admin.py
imtapps/django-admin-ext
8
2171129
from django.contrib import admin from djadmin_ext.helpers import BaseAjaxModelAdmin from djadmin_ext.admin_forms import BaseAjaxModelForm from sample import models class MealAdminForm(BaseAjaxModelForm): ajax_change_fields = ["food_type", "main_ingredient"] @property def dynamic_fields(self): selected_food_type = self.data.get('food_type') or self.initial.get('food_type') if not selected_food_type: return {} try: selected_ingredient = int(self.get_selected_value('main_ingredient')) except (TypeError, ValueError): selected_ingredient = None food_type = models.FoodType.objects.get(pk=selected_food_type) ingredients = models.Ingredient.objects.filter(food_type=food_type) fields = self.setup_fields(ingredients, selected_ingredient) return fields def setup_fields(self, ingredients, selected_ingredient): fields = {} fields['main_ingredient'] = self.create_field_and_assign_initial_value(ingredients, selected_ingredient) if fields['main_ingredient']().initial: details = models.IngredientDetails.objects.filter(ingredient=selected_ingredient) if selected_ingredient and details: selected_ingredient_details = self.get_selected_value('ingredient_details') fields['ingredient_details'] = self.create_field_and_assign_initial_value( details, selected_ingredient_details ) return fields def create_field_and_assign_initial_value(self, queryset, selected_value): return lambda: super(MealAdminForm, self).create_field_and_assign_initial_value(queryset, selected_value) class Meta(object): fields = ['food_type'] model = models.Meal class MealAdmin(BaseAjaxModelAdmin): form = MealAdminForm admin.site.register(models.FoodType) admin.site.register(models.Ingredient) admin.site.register(models.IngredientDetails) admin.site.register(models.Meal, MealAdmin)
2,047
scrapy_doubanmovie/scrapy_doubanmovie/spiders/douban_spider.py
davidvivi/you-need-Python
4
2171461
# -*- coding: utf-8 -*- import scrapy from scrapy.http import Request from scrapy.selector import Selector from scrapy_doubanmovie.scrapy_doubanmovie.items import ScrapyDoubanmovieItem from urllib.parse import urljoin # 通过scrapy genspider douban_spider movie.douban.com生成的 class DoubanSpiderSpider(scrapy.Spider): # 爬虫名字 name = 'douban_spider' # 允许的域名 allowed_domains = ['movie.douban.com'] # 入口URL,扔到调度器里面 start_urls = ['https://movie.douban.com/top250'] def parse(self, response): item = ScrapyDoubanmovieItem() selector = Selector(response) Movies = selector.xpath('//div[@class="info"]') for eachMovie in Movies: title = eachMovie.xpath('div[@class="hd"]/a/span/text()').extract() # 多个span标签 fullTitle = "".join(title) # 将多个字符串无缝连接起来 introduce = eachMovie.xpath('div[@class="bd"]/p/text()').extract() star = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span/text()').extract()[0] evaluate = eachMovie.xpath('div[@class="bd"]/div[@class="star"]/span/text()').extract()[1] quote = eachMovie.xpath('div[@class="bd"]/p[@class="quote"]/span/text()').extract() # quote可能为空,因此需要先进行判断 if quote: quote = quote[0] else: quote = '' item['title'] = fullTitle item['introduce'] = ';'.join([x.strip() for x in introduce if x.strip() != '']) item['star'] = star item['evaluate'] = evaluate item['quote'] = quote yield item nextLink = selector.xpath('//span[@class="next"]/link/@href').extract() # 第10页是最后一页,没有下一页的链接 if nextLink: nextLink = nextLink[0] yield Request(urljoin(response.url, nextLink), callback=self.parse)
1,841
ms2ldaviz/massbank/urls.py
RP0001/ms2ldaviz
6
2170112
from django.conf.urls import include, url from massbank import views urlpatterns = [ url(r'^generate_massbank/$', views.generate_massbank, name='generate_massbank'), url(r'^generate_massbank_multi_m2m/$', views.generate_massbank_multi_m2m, name='generate_massbank_multi_m2m'), ]
296
server/architext/entities/exceptions.py
JimenaAndrea/architext
3
2171069
# Exceptions related to entities class BadItem(Exception): """Raised when saving an item that does not abide by the item prerequisites""" class EmptyName(BadItem): """Raised when creating an item with an empty name""" class WrongNameFormat(BadItem): """Raised when creating an item with a bad formatted name""" class RoomNameClash(BadItem): """Raised when creating an item with the same name of an exit at the same room""" class TakableItemNameClash(BadItem): """Raised when creating an Item or Room that may be unique in their room, but may cause problems in other ways. e.g. if there is a takable item with that name somewhere else""" class NameNotGloballyUnique(BadItem): """Raised when creating a takable item whose name is already present it any item or exit of the world.""" class CantDelete(Exception): """Raised when trying to delete something that can't be deleted""" class ValueWithLineBreaks(Exception): """Raised when a value that should not have line breaks has line breaks""" class ValueTooLong(Exception): """Raised when a value exceeds its max length""" class PublicWorldLimitReached(Exception): """Raised when trying to publish a world avobe the limit"""
1,236