{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\n\n\n\n\n\tif read_navbar not in body:\n\t\tbody = body.replace(\"\", \"\\n\" + read_navbar)\n\n\tif read_ga not in body:\n\t\tbody = body.replace(\"\", read_ga + \"\\n\")\n\n\tif read_disqus not in body:\n\t\tbody = body.replace(\"\", read_disqus + \"\\n\")\n\n\tif read_mathjax not in body:\n\t\tbody = body.replace(\"\", read_mathjax + \"\\n\")\n\n\tif read_css not in body:\n\t\tbody = body.replace(\"\", \"\\n\" + read_css)\n\t\tbody = body.replace(\"\", read_css + \"\\n\")\n\n\n\t# Put social media icons\n\t#body = body.replace(\"img src\", \"img width='100%' src\")\n\n\n\t#body = body.replace(\" rendered_html\", \"\")\n\tbody = body.replace(\".rendered_html{overflow-x:auto\" , \".rendered_html{overflow-x:auto;overflow-y: hidden;\")\n\tbody = body.replace(\"#notebook{font-size:14px;line-height:20px;\", \"#notebook{font-size:20px;line-height:29px;\")\n\tbody = body.replace(\"div.text_cell_render{outline:0;resize:none;width:inherit;border-style:none;padding:.5em .5em .5em .4em;color:#000;\",\n\t \"div.text_cell_render{outline:0;resize:none;width:inherit;border-style:none;padding:.5em .5em .5em .4em;color:#777;\")\n\n\n\n\n\n\thtml_file = notebook_file.replace(\".ipynb\", \".html\")\n\thtml_file_writer = open(html_file, 'w')\n\thtml_file_writer.write(body)\n\thtml_file_writer.close()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-5824497082864275000,"string":"-5,824,497,082,864,275,000"},"line_mean":{"kind":"number","value":28.5433070866,"string":"28.543307"},"line_max":{"kind":"number","value":138,"string":"138"},"alpha_frac":{"kind":"number","value":0.6604477612,"string":"0.660448"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109407,"cells":{"repo_name":{"kind":"string","value":"spaceone/tehbot"},"path":{"kind":"string","value":"tehbot/plugins/tmdb/__init__.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3213"},"content":{"kind":"string","value":"from tehbot.plugins import *\nimport tehbot.plugins as plugins\nimport tmdbsimple as tmdb\n\nclass MoviePlugin(StandardPlugin):\n \"\"\"Shows information about movies from themoviedb.org\"\"\"\n\n def __init__(self):\n StandardPlugin.__init__(self)\n self.parser.add_argument(\"movie\")\n\n def execute(self, connection, event, extra, dbconn):\n tmdb.API_KEY = self.settings[\"tmdb_api_key\"]\n try:\n pargs = self.parser.parse_args(extra[\"args\"])\n if self.parser.help_requested:\n return self.parser.format_help().strip()\n except Exception as e:\n return u\"Error: %s\" % str(e)\n\n id = -1\n res = tmdb.Search().movie(query=pargs.movie)\n if res[\"total_results\"] > 0:\n id = res[\"results\"][0][\"id\"]\n\n txt = \"No such movie.\"\n if id != -1:\n movie = tmdb.Movies(id)\n movie_info = movie.info()\n txt = \"\\x02%s\\x02\" % movie_info[\"title\"]\n if movie_info[\"title\"] != movie_info[\"original_title\"]:\n txt += \" (%s)\" % movie_info[\"original_title\"]\n if movie_info[\"release_date\"]:\n txt += \" | \\x02Released:\\x02 %s\" % movie_info[\"release_date\"]\n if movie_info[\"vote_count\"] > 0:\n txt += \" | \\x02Rating:\\x02 %.1f/10\" % movie_info[\"vote_average\"]\n if movie_info[\"homepage\"]:\n txt += \" | \\x02Homepage:\\x02 %s\" % movie_info[\"homepage\"]\n\n txt += \"\\n\" + plugins.split(movie_info[\"overview\"])\n\n return txt\n\nregister_plugin(\"movie\", MoviePlugin())\n\nclass TvPlugin(StandardPlugin):\n \"\"\"Shows information about TV series from themoviedb.org\"\"\"\n\n def __init__(self):\n StandardPlugin.__init__(self)\n self.parser.add_argument(\"show\")\n\n def execute(self, connection, event, extra, dbconn):\n tmdb.API_KEY = self.settings[\"tmdb_api_key\"]\n try:\n pargs = self.parser.parse_args(extra[\"args\"])\n if self.parser.help_requested:\n return self.parser.format_help().strip()\n except Exception as e:\n return u\"Error: %s\" % str(e)\n\n id = -1\n res = tmdb.Search().tv(query=pargs.show)\n if res[\"total_results\"] > 0:\n id = res[\"results\"][0][\"id\"]\n\n txt = \"No such movie.\"\n if id != -1:\n movie = tmdb.TV(id)\n movie_info = movie.info()\n txt = \"\\x02%s\\x02\" % movie_info[\"name\"]\n if movie_info[\"name\"] != movie_info[\"original_name\"]:\n txt += \" (%s)\" % movie_info[\"original_name\"]\n if movie_info[\"first_air_date\"]:\n txt += \" | \\x02First Aired:\\x02 %s\" % movie_info[\"first_air_date\"]\n if movie_info[\"number_of_seasons\"]:\n txt += \" | \\x02Nr. of Seasons:\\x02 %d\" % movie_info[\"number_of_seasons\"]\n if movie_info[\"vote_count\"] > 0:\n txt += \" | \\x02Rating:\\x02 %.1f/10\" % movie_info[\"vote_average\"]\n if movie_info[\"homepage\"]:\n txt += \" | \\x02Homepage:\\x02 %s\" % movie_info[\"homepage\"]\n\n txt += \"\\n\" + plugins.split(movie_info[\"overview\"])\n\n return txt\n\nregister_plugin(\"tv\", TvPlugin())\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":1482561011729763800,"string":"1,482,561,011,729,763,800"},"line_mean":{"kind":"number","value":35.9310344828,"string":"35.931034"},"line_max":{"kind":"number","value":88,"string":"88"},"alpha_frac":{"kind":"number","value":0.5309679427,"string":"0.530968"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109408,"cells":{"repo_name":{"kind":"string","value":"thisismyrobot/DisPy"},"path":{"kind":"string","value":"dispy.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4224"},"content":{"kind":"string","value":"# Copyright 2012 Robert Wallhead\n# robert@thisismyrobot.com\n# \n#\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport inspect\nimport xmlrpclib\n\nfrom SimpleXMLRPCServer import SimpleXMLRPCServer\n\n\nPORT = 8000\n\n\nclass Server(object):\n \"\"\" A general purpose XML-RPC-exposing server.\n \"\"\"\n\n def __init__(self, listen_ip='0.0.0.0'):\n \"\"\" Prepare the XML-RPC server, map the exposed functions.\n \"\"\"\n self.server = SimpleXMLRPCServer((listen_ip, PORT), logRequests=False)\n self.server.register_function(self._init, 'init')\n self.server.register_function(self._call, 'call')\n self.server.register_function(self._get, 'get')\n self.server.register_function(self._set, 'set')\n self.cls = {}\n\n def start(self):\n \"\"\" Start the server.\n \"\"\"\n self.server.serve_forever()\n\n def stop(self):\n \"\"\" Stop the server.\n \"\"\"\n self.server.shutdown()\n\n def _init(self, cls_src, *args):\n \"\"\" Register and initialise a class, class id. Only to be called via\n XML-RPC.\n \"\"\"\n existing_classes = dir()\n exec(cls_src)\n new_class = [c for c in dir() if c not in existing_classes][0]\n next_id = len(self.cls)\n self.cls[next_id] = eval(new_class)(*args)\n return next_id\n\n def _call(self, cls_id, method, *args):\n \"\"\" Call a method. Only to be called via XML-RPC.\n \"\"\"\n return getattr(self.cls[cls_id], method)(*args)\n\n def _get(self, cls_id, attr):\n \"\"\" Return the value of an attribute. Only to be called via XML-RPC.\n \"\"\"\n return getattr(self.cls[cls_id], attr)\n\n def _set(self, cls_id, attr, val):\n \"\"\" Set the value of an attribute. Only to be called via XML-RPC.\n \"\"\"\n setattr(self.cls[cls_id], attr, val)\n return 0\n\n\nclass WrapperTool(object):\n \"\"\" A toolkit to wrap class instances to allow them to be accessed\n transparently over XML-RPC.\n \"\"\"\n\n def __init__(self, server_ip='127.0.0.1'):\n \"\"\" Create the XML-RPC proxy connection to the server.\n \"\"\"\n address = 'http://' + server_ip + ':' + str(PORT)\n self.proxy = xmlrpclib.ServerProxy(address)\n\n def _get_src(self, cls):\n \"\"\" Return the source code of a class\n \"\"\"\n return inspect.getsource(cls)\n\n def _map_methods(self, cls, instance_id):\n \"\"\" Map the methods to XML-RPC calls. Why not use the server module's\n register_instance() method you ask? Well, it doesn't expose\n members, only methods, and adding member access again is nearly\n impossible....\n \"\"\"\n for name, member in inspect.getmembers(cls):\n if inspect.isfunction(member):\n setattr(cls, name,\n lambda x, *y: self.proxy.call(instance_id,\n name, *y))\n\n def _map_members(self, cls, instance_id):\n \"\"\" Map the members to XML-RPC calls via the magic methods.\n \"\"\"\n setattr(cls, '__init__', lambda x: None)\n setattr(cls, '__getattr__',\n lambda x, y: self.proxy.get(instance_id, y))\n setattr(cls, '__setattr__',\n lambda x, y, z: self.proxy.set(instance_id, y, z))\n\n def init_cls(self, cls, *args):\n \"\"\" Wrap a class, returning a stubb'd instance.\n \"\"\"\n cls_src = self._get_src(cls)\n instance_id = self.proxy.init(cls_src, *args)\n self._map_methods(cls, instance_id)\n self._map_members(cls, instance_id)\n return cls()\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-5889848048627380000,"string":"-5,889,848,048,627,380,000"},"line_mean":{"kind":"number","value":33.064516129,"string":"33.064516"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.595407197,"string":"0.595407"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109409,"cells":{"repo_name":{"kind":"string","value":"icomms/wqmanager"},"path":{"kind":"string","value":"apps/webservice/views.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"13148"},"content":{"kind":"string","value":"try:\n import json\nexcept ImportError:\n import simplejson as json\n\nfrom rapidsms.webui import settings\nfrom datetime import datetime\nfrom django.http import HttpResponse, HttpResponseServerError, HttpResponseBadRequest\nfrom rapidsms.webui.utils import render_to_response\nfrom django.db.models import Q\n\nfrom domain.models import Domain\nfrom wqm.models import WqmAuthority, WqmArea, SamplingPoint\nfrom standards.models import WaterUseType, Standard\nfrom samples.models import ValueRule, Sample, NormalRange, AbnormalRange, MeasuredValue, Parameter\n\nACCESS_KEY = settings.WEBSERVICE_PASSWORD\nSUPERUSER_ID = int(settings.WEBSERVICE_ADMIN_ID)\nSUPERUSER_KEY = settings.WEBSERVICE_ADMIN_PASSWORD\n\nALLOWED_TABLES = [\n 'abnormalrange',\n 'authorisedsampler',\n 'domainlookup',\n 'measuredvalue',\n 'normalrange',\n 'parameter',\n 'sample',\n 'samplingpoint',\n 'smsnotifications',\n 'standard',\n 'valuerule',\n 'waterusetype',\n 'wqmarea',\n 'wqmauthority'\n]\n\nALLOWED_DOMAINS = map(int, settings.WEBSERVICE_DOMAINS.split(','))\n\ndef check_access(request):\n domain = request.GET.get('domain', None)\n key = request.GET.get('key', None)\n \n if domain is None or domain == \"\" or key is None or key == \"\":\n return {'success': False, 'error': _error_response(\"Incorrect access key for domain\") }\n \n ok = False\n \n try:\n domain = int(domain)\n except:\n return {'success': False, 'error': _error_response(\"Incorrect access key for domain\") }\n \n if domain == SUPERUSER_ID:\n if key == SUPERUSER_KEY:\n ok = True\n else:\n if key == ACCESS_KEY:\n ok = True\n \n if ok:\n return {'success': True, 'error': None }\n else: \n return {'success': False, 'error': _error_response(\"Incorrect access key for domain\") }\n\ndef table_names(request): \n key = request.GET.get('key', None)\n \n #skip check for now, because phones in the field don't send access key for this call\n if True or key == SUPERUSER_KEY or key == ACCESS_KEY: \n return _success_response(ALLOWED_TABLES)\n else:\n return _error_response(\"Incorrect access key for domain\")\n\ndef added_rows(request):\n return _fetch_rows(request, 'added')\n\ndef updated_rows(request):\n return _fetch_rows(request, 'updated')\n\ndef deleted_rows(request):\n return _fetch_rows(request, 'deleted')\n\ndef _fetch_rows(request, type):\n result = check_access(request)\n \n if not result['success']:\n return result['error'] \n\n table = request.GET.get('table', None)\n time = _int_or_zero(request.GET.get('time', None))\n domain = _int_or_zero(request.GET.get('domain', None)) \n \n if table is None or time is None:\n return _error_response(\"One or more of the following required parameters were not specified: table, time\") \n \n if table not in ALLOWED_TABLES:\n return _error_response(\"Unknown or forbidden table name specified\")\n \n limit = None \n offset = None \n \n if request.GET.get('limit', None):\n limit = _int_or_zero(request.GET.get('limit', None))\n\n if request.GET.get('offset', None):\n offset = _int_or_zero(request.GET.get('offset', None))\n \n # backwards compatibility - match behaviour original android app expects\n if limit is None:\n limit = 1000\n \n data = _normalise_rows(type, table, time, domain) \n return _success_response(data, limit, offset)\n\ndef _int_or_zero(value):\n try:\n return int(value)\n except TypeError, e:\n return 0\n except ValueError, e:\n return 0 \n \ndef _success_response(data, limit=0, offset=0):\n if limit is None:\n limit = 0\n \n if offset is None:\n offset = 0 \n \n total_count = len(data)\n count = limit\n \n if limit and offset:\n data = data[offset:(limit + offset)]\n elif limit:\n data = data[:limit]\n elif offset:\n data = data[offset:]\n \n count = len(data) \n \n return _json_response({\n 'status': 'success',\n 'count': count,\n 'total_count': total_count,\n 'limit': limit,\n 'offset': offset,\n 'data': data\n }) \n\ndef _error_response(message):\n return _json_response({\n 'status': 'error',\n 'count': 0,\n 'total_count': 0,\n 'limit': 0,\n 'offset': 0,\n 'data': message\n })\n\ndef _json_response(object):\n return HttpResponse(json.dumps(object), content_type='application/json; charset=utf8')\n\ndef _normalise_rows(type, table, time, domain):\n domain = int(domain)\n \n data = []\n \n if type == 'deleted':\n return data\n \n # TODO this isn't entirely correct - WQM timestamps are all UTC+0, so we need to adjust the given device time which requires\n # knowing which timezone the device is operating in\n date_query = Q(created__gt=datetime.fromtimestamp(time))\n \n if type == 'updated':\n date_query = Q(modified__gt=datetime.fromtimestamp(time))\n\n if table == 'abnormalrange':\n rows = AbnormalRange.objects.filter(date_query) \n \n for row in rows:\n if (domain > 0 and row.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmauthority.id in ALLOWED_DOMAINS):\n data.append({\n 'id': row.id,\n 'description': row.description,\n 'valuerule': row.value_rule.id,\n 'minimum': str(row.minimum),\n 'maximum': str(row.maximum),\n \"remedialaction\": row.remedialaction if row.remedialaction is not None else '',\n \"colour\": row.color,\n 'wqmauthority': row.wqmauthority.id,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n }) \n elif table == 'authorisedsampler':\n # not used by android application\n pass\n elif table == 'domainlookup':\n if datetime.fromtimestamp(time) < datetime.strptime(\"2011-04-19 00:00:00\", \"%Y-%m-%d %H:%M:%S\"):\n data.append({\n 'id': 1,\n 'key': 'positive',\n 'value': 1,\n 'parameter': Parameter.objects.get(test_name_short=\"h2s\").id,\n 'modified': None,\n 'created': str(datetime.now())\n })\n \n data.append({\n 'id': 2,\n 'key': 'negative',\n 'value': 0,\n 'parameter': Parameter.objects.get(test_name_short=\"h2s\").id,\n 'modified': None,\n 'created': str(datetime.now())\n }) \n elif table == 'measuredvalue':\n rows = MeasuredValue.objects.filter(date_query, parameter__is_decimal=True) \n \n for row in rows:\n try:\n if (domain > 0 and row.sample.sampling_point.wqmarea.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.sample.sampling_point.wqmarea.wqmauthority.id in ALLOWED_DOMAINS):\n data.append({\n 'id': row.id,\n 'sample': row.sample.id,\n 'parameter': row.parameter.id,\n 'value': row.value,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n })\n except Sample.DoesNotExist:\n pass \n elif table == 'normalrange':\n rows = NormalRange.objects.filter(date_query) \n \n for row in rows:\n if (domain > 0 and row.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmauthority.id in ALLOWED_DOMAINS):\n data.append({\n 'id': row.id,\n 'description': row.description,\n 'valuerule': row.value_rule.id,\n 'minimum': str(row.minimum),\n 'maximum': str(row.maximum),\n 'wqmauthority': row.wqmauthority.id,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n }) \n elif table == 'parameter':\n rows = Parameter.objects.filter(date_query, is_decimal=True) \n \n for row in rows:\n data.append({\n 'id': row.id,\n 'testname': row.test_name,\n 'units': row.unit,\n 'lookuphint': row.lookup_hint,\n 'testnameshort': row.test_name_short, \n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n })\n elif table == 'sample':\n rows = Sample.objects.filter(date_query) \n \n for row in rows:\n if (domain > 0 and row.sampling_point.wqmarea.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.sampling_point.wqmarea.wqmauthority.id in ALLOWED_DOMAINS):\n data.append({\n 'id': row.id,\n 'samplingpoint': row.sampling_point.id,\n 'takenby': row.taken_by.id,\n 'notes': row.notes,\n 'datetaken': str(row.date_taken.date()),\n 'date_received': str(row.date_received),\n 'datasource': 'xform',\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created),\n }) \n elif table == 'samplingpoint':\n rows = SamplingPoint.objects.filter(date_query) \n \n for row in rows:\n if (domain > 0 and row.wqmarea.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmarea.wqmauthority.id in ALLOWED_DOMAINS):\n data.append({\n 'id': row.id,\n 'pointname': row.name,\n 'pointcode': row.code,\n 'wqmarea': row.wqmarea.id,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created),\n 'waterusetype': None,\n 'the_geom': None,\n 'x_coord': row.point.get_x(),\n 'y_coord': row.point.get_y() \n }) \n elif table == 'smsnotifications':\n # not used by android application\n pass \n elif table == 'standard':\n rows = Standard.objects.filter(date_query)\n \n for row in rows:\n data.append({\n 'id': row.id,\n 'name': row.name,\n 'governingbody': row.governing_body,\n 'dateeffective': str(row.date_effective),\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n }) \n elif table == 'valuerule':\n rows = ValueRule.objects.filter(date_query)\n \n for row in rows:\n if row.standard is not None: #temporary\n data.append({\n 'id': row.id,\n 'description': row.description,\n 'parameter': row.parameter.id,\n 'standard': row.standard.id if row.standard is not None else None,\n 'waterusetype': row.water_use_type.id if row.water_use_type is not None else None,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n }) \n elif table == 'waterusetype':\n rows = WaterUseType.objects.filter(date_query)\n \n for row in rows:\n data.append({\n 'id': row.id,\n 'description': row.description,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n }) \n elif table == 'wqmarea':\n rows = WqmArea.objects.filter(date_query) \n \n for row in rows:\n if (domain > 0 and row.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmauthority.id in ALLOWED_DOMAINS):\n data.append({\n 'id': row.id,\n 'name': row.name,\n 'wqmauthority': row.wqmauthority.id,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n }) \n elif table == 'wqmauthority':\n rows = WqmAuthority.objects.filter(date_query) \n \n for row in rows:\n data.append({\n 'id': row.id,\n 'name': row.name,\n 'modified': str(row.modified) if row.modified is not None else None,\n 'created': str(row.created)\n })\n \n return data\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-1153064495839629800,"string":"-1,153,064,495,839,629,800"},"line_mean":{"kind":"number","value":35.7262569832,"string":"35.726257"},"line_max":{"kind":"number","value":197,"string":"197"},"alpha_frac":{"kind":"number","value":0.535898996,"string":"0.535899"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109410,"cells":{"repo_name":{"kind":"string","value":"srp33/ShinyLearner"},"path":{"kind":"string","value":"BuildTests/CheckAccuracy.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3555"},"content":{"kind":"string","value":"import os, sys\n\ntaskType = sys.argv[1]\nvalidationType = sys.argv[2]\ndescription = sys.argv[3]\nmetricFilePath = sys.argv[4]\nalgorithmColumnName = sys.argv[5]\nexpectedNumAlgorithms = int(sys.argv[6])\nexpectedNumEnsemble = int(sys.argv[7])\n\nif not os.path.exists(metricFilePath):\n print(\"[FAILED] No metric file found!\")\n exit(1)\n\nsuccessfulAlgorithms = set()\nfailedAlgorithms = []\nfailedAlgorithmOutput = \"\"\n\nmetricFile = open(metricFilePath)\nmetricData = [line.rstrip().split(\"\\t\") for line in metricFile]\nmetricFile.close()\n\nheaderItems = metricData.pop(0)\nmetricNameIndex = headerItems.index(\"Metric\")\nvalueIndex = headerItems.index(\"Value\")\nalgorithmIndex = headerItems.index(algorithmColumnName)\n\nuniqueAlgorithms = list(set([row[algorithmIndex] for row in metricData]))\n\nif len(uniqueAlgorithms) == 0:\n print(\"[FAILED] No algorithm scripts could be found.\")\n exit(1)\n\nactualNumAlgorithms = len([x for x in uniqueAlgorithms if not x.startswith(\"Ensemble\")])\nactualNumEnsemble = len([x for x in uniqueAlgorithms if x.startswith(\"Ensemble\")])\n\nif actualNumAlgorithms != expectedNumAlgorithms:\n print(\"[FAILED] The number of classification algorithms in {} [{}] does not match the expected number [{}].\".format(metricFilePath, actualNumAlgorithms, expectedNumAlgorithms))\n exit(1)\n\nif actualNumEnsemble != expectedNumEnsemble:\n print(\"[FAILED] The number of ensemble algorithms in {} [{}] does not match the expected number [{}].\".format(metricFilePath, actualNumEnsemble, expectedNumEnsemble))\n exit(1)\n\nfor algorithm in uniqueAlgorithms:\n if \"ZeroR\" in algorithm:\n continue\n if \"demo\" in algorithm:\n continue\n\n idText = \"{} - {} - {} - {}\".format(taskType, validationType, metricFilePath, algorithm)\n\n aucValues = [float(row[valueIndex]) for row in metricData if row[algorithmIndex] == algorithm and row[metricNameIndex] == \"AUROC\"]\n meanAUC = sum(aucValues) / float(len(aucValues))\n\n if description.startswith(\"StrongSignal\"):\n lowerThreshold = 0.75\n if meanAUC >= lowerThreshold:\n print(\"[PASSED] The mean AUROC was {:.3f} for {} and {}. ({})\".format(meanAUC, description, algorithm, idText))\n successfulAlgorithms.add(algorithm)\n else:\n error = \"[FAILED] The mean AUROC was {:.3f} for {} and {}. The expected lower threshold is {:.3f}. ({})\".format(meanAUC, description, algorithm, lowerThreshold, idText)\n print(error)\n failedAlgorithms.append(algorithm)\n failedAlgorithmOutput += error + \"\\n\"\n elif description.startswith(\"NoSignal\"):\n upperThreshold = 0.75\n if meanAUC <= upperThreshold:\n print(\"[PASSED] The mean AUROC was {:.3f} for {} and {}. ({})\".format(meanAUC, description, algorithm, idText))\n successfulAlgorithms.add(algorithm)\n else:\n error = \"[FAILED] The mean AUROC was {:.3f} for {} and {}. The expected upper threshold is {:.3f}. ({})\".format(meanAUC, description, algorithm, upperThreshold, idText)\n print(error)\n failedAlgorithms.append(algorithm)\n failedAlgorithmOutput += error + \"\\n\"\n\nprint(\"\\n[TEST SUMMARY]\\n\")\n\nif len(successfulAlgorithms) == 0:\n print(\"[FAILED] No algorithms successfully passed any of the tests.\")\n exit(1)\n\nif len(failedAlgorithms) > 0:\n print(\"The following algorithm(s) failed at least once:\")\n for algorithm in failedAlgorithms:\n print(\" {}\".format(algorithm))\n print(\"\\n\" + failedAlgorithmOutput)\n exit(1)\nelse:\n print(\"Tests passed!\\n\")\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-7391869750426212000,"string":"-7,391,869,750,426,212,000"},"line_mean":{"kind":"number","value":38.5,"string":"38.5"},"line_max":{"kind":"number","value":180,"string":"180"},"alpha_frac":{"kind":"number","value":0.682137834,"string":"0.682138"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109411,"cells":{"repo_name":{"kind":"string","value":"orlox/hall_evolution"},"path":{"kind":"string","value":"as_full/field_variation.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9535"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\nProgram to compare how much the structure of a field changes with respect to its initial form\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom pylab import *\nimport sys\nimport os\nimport math\nfrom scipy import special\n\n#Fundamental Ohm mode for rmin=0.75\nA1=-0.52004\nA2=-0.55882\nkk=7.03266\ndef A_Ohm_function(r,th):\n return r*pow(sin(th),2)*(((sin(kk*r)/(kk*r)-cos(kk*r))*A2)/(kk*r)+((-sin(kk*r)-cos(kk*r)/(kk*r))*A1)/(kk*r))\n\n#Hall equilibrium field\nrmin=-0.75\ndef A_eq_function(r,th):\n\treturn 1/2.0*pow(sin(th),2)*((3*pow(rmin,5)-5*pow(rmin,3))/r+5*pow(r,2)-3*pow(r,4))/(2-5*pow(rmin,3)+3*pow(rmin,5));\n\n\n#Get name of folder with results\nfolder=sys.argv[1]\n#add \"/\" if it is not given in folder name\nif folder[len(folder)-1]!=\"/\":\n folder=folder+\"/\"\n\n#open output file and print header\nf = open(folder+\"compare.dat\", 'w')\nf.write(\"#num t/t_h int(dB)\\n\")\n\n#Show what folder is being used\nprint(\"Analyzing data in \"+folder)\n\n# Open parameters file\nparams=open(folder+'params.dat','r')\ndata=params.readline().split(\":\");\nrNum=int(data[1])\ndata=params.readline().split(\":\");\nthNum=int(data[1])\ndata=params.readline().split(\":\");\nfactor=float(data[1])\ndata=params.readline().split(\":\");\ntNum=int(data[1])\ndata=params.readline().split(\":\");\nplotSteps=int(data[1])\ndata=params.readline().split(\":\");\nrmin=float(data[1])\ndata=params.readline().split(\":\");\nthtd=float(data[1])\ndata=params.readline().split(\":\");\nlNum=int(data[1])\nparams.close()\n\n#solve values of dr and dth\ndr=(1.0-rmin)/(rNum-1)\ndth=math.pi/(thNum-1)\n\n#store P_l^1(cos(theta)) at the surface, used to solve multipoles. These are calculated at grid mid-points to perform integrations\nplone=zeros((lNum,thNum));\nfor j in range(thNum):\n alp=special.lpmn(1, lNum+1, math.cos((j+0.5)*dth))\n for l in range(lNum):\n plone[l][j]=alp[0][1][l+1]\n\n#Solve multipole coefficient l (l is actually l-1, so the dipole is l=0) of the field with poloidal field function A\ndef multipole_l(l,A):\n value=0\n for j in range(0,thNum-1):\n value+=(A[rNum-1][j]+A[rNum-1][j+1])/2*plone[l][j]*dth\n return value*math.sqrt(math.pi*(2.0*l+3))/(l+2.0)*sqrt(1.0/8/math.pi)\n\n#Create array to store A and B at each step\nA=zeros((rNum,thNum));\nA_Ohm=zeros((rNum,thNum));\nA_eq=zeros((rNum,thNum));\nB=zeros((rNum,thNum));\n\n#Fill array with ohm eigenmode for A_Ohm\nfor i in range(rNum):\n r=i*dr+rmin\n for j in range(thNum):\n th=j*dth\n A_Ohm[i][j]=A_Ohm_function(r,th)\n A_eq[i][j]=A_eq_function(r,th)\n\n#Create array for multipole coefficients\nmultipoles=zeros((lNum));\nmultipoles_i=zeros((lNum));\ndipoleOhm=multipole_l(0,A_Ohm)\ndipoleEq=multipole_l(0,A_eq)\n\n#Create array to store the vector values of the field at the initial and later times\n#field is solved at midpoints in the grid.\n#B_field_i=initial field\nB_field_i=zeros((rNum,thNum,3));\n#B_field_k=field at each timestep\nB_field_k=zeros((rNum,thNum,3));\n#B_field_Ohm=fundamental Ohm mode, with the same polarity that the equilibrium\nB_field_Ohm=zeros((rNum,thNum,3));\n#B_field_eq=equilibrium field due to rigid rotation of constant electron density in the shell.\nB_field_eq=zeros((rNum,thNum,3));\n\n#solve vector magnetic field of the fundamental Ohm mode and the equilibrium field\nfor i in range(rNum-1):\n r=i*dr+dr/2+rmin\n for j in range(thNum-1):\n th=j*dth+dth/2\n #Solve each component\n #r component\n B_field_Ohm[i][j][0]=1/r/r/sin(th)*(A_Ohm[i][j+1]-A_Ohm[i][j]+A_Ohm[i+1][j+1]-A_Ohm[i+1][j])/2/dth\n B_field_eq[i][j][0]=1/r/r/sin(th)*(A_eq[i][j+1]-A_eq[i][j]+A_eq[i+1][j+1]-A_eq[i+1][j])/2/dth\n #th component\n B_field_Ohm[i][j][1]=-1/r/sin(th)*(A_Ohm[i+1][j]-A_Ohm[i][j]+A_Ohm[i+1][j+1]-A_Ohm[i][j+1])/2/dr\n B_field_eq[i][j][1]=-1/r/sin(th)*(A_eq[i+1][j]-A_eq[i][j]+A_eq[i+1][j+1]-A_eq[i][j+1])/2/dr\n #phi component\n B_field_Ohm[i][j][2]=0\n B_field_eq[i][j][2]=0\n\n#solve energy of Ohm eigenmode and equilibrium field, first the internal energy\nenergy_Ohm=0\nenergy_eq=0\nfor i in range(rNum-1):\n r=i*dr+dr/2+rmin\n for j in range(thNum-1):\n th=j*dth+dth/2\n energy_Ohm+=(pow(B_field_Ohm[i][j][0],2)+pow(B_field_Ohm[i][j][1],2)+pow(B_field_Ohm[i][j][2],2))*pow(r,2)*sin(th)\n energy_eq+=(pow(B_field_eq[i][j][0],2)+pow(B_field_eq[i][j][1],2)+pow(B_field_eq[i][j][2],2))*pow(r,2)*sin(th)\nenergy_Ohm=energy_Ohm*dr*dth/4\nenergy_eq=energy_eq*dr*dth/4\n#now the external energy\nenergy_Ohm=energy_Ohm+2*pow(dipoleOhm,2)\nenergy_eq=energy_eq+2*pow(dipoleEq,2)\n\n#analyze all data\nk=0\ninitial_energy=0;\nwhile 1:\n #add zeros to the number of the plot, so they are ordered appropately\n num_file=str(k)\n diff_zeros=len(str(tNum))-len(str(k))\n while diff_zeros>0:\n num_file=\"0\"+num_file\n diff_zeros-=1\n\n #read A file\n data\n try:\n data=open(folder+\"A_\"+str(k),'r')\n except IOError as e:\n break\n #first line has simulation time\n t=float(data.readline())\n i,j=0,0\n for line in data:\n values=line.split(\" \")\n for value in values:\n if j==thNum:\n break\n A[i][j]=float(value)\n j+=1\n j=0\n i+=1\n data.close()\n #read B file\n try:\n data=open(folder+\"B_\"+str(k),'r')\n except IOError as e:\n break\n #first line has simulation time\n t=float(data.readline())\n i,j=0,0\n for line in data:\n values=line.split(\" \")\n for value in values:\n if j==thNum:\n break\n B[i][j]=float(value)\n j+=1\n j=0\n i+=1\n data.close()\n\n #solve multipole coefficients\n for l in range(lNum):\n multipoles[l]=multipole_l(l,A)\n\n Bmaxth=0\n #solve vector magnetic field\n for i in range(rNum-1):\n r=i*dr+dr/2+rmin\n for j in range(thNum-1):\n th=j*dth+dth/2\n #Solve each component\n #r component\n B_field_k[i][j][0]=1/r/r/sin(th)*(A[i][j+1]-A[i][j]+A[i+1][j+1]-A[i+1][j])/2/dth\n #th component\n B_field_k[i][j][1]=-1/r/sin(th)*(A[i+1][j]-A[i][j]+A[i+1][j+1]-A[i][j+1])/2/dr\n #phi component\n B_field_k[i][j][2]=1/r/sin(th)*(B[i][j]+B[i+1][j]+B[i][j+1]+B[i+1][j+1])/4\n\n #solve total internal energy\n energy=0\n for i in range(rNum-1):\n r=i*dr+dr/2+rmin\n for j in range(thNum-1):\n th=j*dth+dth/2\n energy+=(pow(B_field_k[i][j][0],2)+pow(B_field_k[i][j][1],2)+pow(B_field_k[i][j][2],2))*pow(r,2)*sin(th)\n energy=energy*dr*dth/4\n #add total external energy\n for l in range(lNum):\n energy+=(l+2)*pow(multipoles[l],2)\n\n #if this is the first timestep, store initial_energy and field and multipole coefficients.\n if k==0:\n initial_energy=energy\n for i in range(rNum-1):\n for j in range(thNum-1):\n B_field_i[i][j][0]=B_field_k[i][j][0]\n B_field_i[i][j][1]=B_field_k[i][j][1]\n B_field_i[i][j][2]=B_field_k[i][j][2]\n for l in range(lNum):\n multipoles_i[l]=multipoles[l]\n \n #solve integrals of the different dB^2 inside the star, fields must be corrected to have energy equal to 1\n dB_energy=0\n dB_energy_Ohm=0\n dB_energy_eq=0\n for i in range(rNum-1):\n r=i*dr+dr/2+rmin\n for j in range(thNum-1):\n th=j*dth+dth/2\n #solve dB with respect to initial field\n dB_energy+= (pow(B_field_i[i][j][0]*sqrt(1/initial_energy)-B_field_k[i][j][0]*sqrt(1/energy),2)\n +pow(B_field_i[i][j][1]*sqrt(1/initial_energy)-B_field_k[i][j][1]*sqrt(1/energy),2)\n +pow(B_field_i[i][j][2]*sqrt(1/initial_energy)-B_field_k[i][j][2]*sqrt(1/energy),2))*pow(r,2)*sin(th)\n #solve dB with respect to Ohm fundamental mode\n dB_energy_Ohm+= (pow(B_field_Ohm[i][j][0]*sqrt(1/energy_Ohm)-B_field_k[i][j][0]*sqrt(1/energy),2)\n +pow(B_field_Ohm[i][j][1]*sqrt(1/energy_Ohm)-B_field_k[i][j][1]*sqrt(1/energy),2)\n +pow(B_field_Ohm[i][j][2]*sqrt(1/energy_Ohm)-B_field_k[i][j][2]*sqrt(1/energy),2))*pow(r,2)*sin(th)\n #solve dB with respect to equilibrium field\n dB_energy_eq+= (pow(B_field_eq[i][j][0]*sqrt(1/energy_eq)-B_field_k[i][j][0]*sqrt(1/energy),2)\n +pow(B_field_eq[i][j][1]*sqrt(1/energy_eq)-B_field_k[i][j][1]*sqrt(1/energy),2)\n +pow(B_field_eq[i][j][2]*sqrt(1/energy_eq)-B_field_k[i][j][2]*sqrt(1/energy),2))*pow(r,2)*sin(th)\n dB_energy=dB_energy*dth*dr/4\n dB_energy_Ohm=dB_energy_Ohm*dth*dr/4\n dB_energy_eq=dB_energy_eq*dth*dr/4\n #add contribution to dBs outside the star\n dB_energy_Ohm=dB_energy_Ohm+2*pow(dipoleOhm,2)/energy_Ohm-4*dipoleOhm*multipoles[0]/sqrt(energy_Ohm*energy)\n dB_energy_eq=dB_energy_eq+2*pow(dipoleEq,2)/energy_eq-4*dipoleEq*multipoles[0]/sqrt(energy_eq*energy)\n for l in range(0,lNum):\n dB_energy_Ohm=dB_energy_Ohm+(l+2)*pow(multipoles[l],2)/energy\n dB_energy_eq=dB_energy_eq+(l+2)*pow(multipoles[l],2)/energy\n dB_energy=dB_energy+(l+2)*(pow(multipoles_i[l],2)/initial_energy-2*multipoles_i[l]*multipoles[l]/sqrt(energy*initial_energy)+pow(multipoles[l],2)/energy)\n\n f.write(str(t) + \" \" + str(dB_energy) + \" \" + str(dB_energy_Ohm)+ \" \" + str(dB_energy_eq)+\"\\n\")\n print str(num_file)+\" \"+str(energy)+\" \"+str(dB_energy)+\" \"+str(dB_energy_Ohm)+\" \"+str(dB_energy_eq)\n k+=plotSteps\n\nf.close()\nsys.exit()\n\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":2847259777698597400,"string":"2,847,259,777,698,597,400"},"line_mean":{"kind":"number","value":34.9811320755,"string":"34.981132"},"line_max":{"kind":"number","value":161,"string":"161"},"alpha_frac":{"kind":"number","value":0.5943366544,"string":"0.594337"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109412,"cells":{"repo_name":{"kind":"string","value":"stfc/cvmfs-stratum-uploader"},"path":{"kind":"string","value":"uploader/custom_auth/admin.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2869"},"content":{"kind":"string","value":"from django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom uploader.custom_auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserChangeForm, UserCreationForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom guardian.admin import GuardedModelAdmin\n\n\nclass CustomUserChangeForm(UserChangeForm):\n username = forms.RegexField(\n label=_(\"Username\"), max_length=200, regex=r\"^[\\w.@+-=/ ]+$\",\n help_text=_(\"Required. 200 characters or fewer. Letters, digits and \"\n \"@/./+/-/_/=/ // only.\"),\n error_messages={\n 'invalid': _(\"This value may contain only letters, numbers and \"\n \"@/./+/-/_/=/ // characters.\")},\n widget=forms.TextInput(attrs={'style': 'width: 70%;'})\n )\n\n\nclass CustomUserCreationForm(UserCreationForm):\n username = forms.RegexField(label=_(\"Username\"), max_length=200,\n regex=r'^[\\w.@+-=/ ]+$',\n help_text=_(\"Required. 200 characters or fewer. Letters, digits and \"\n \"@/./+/-/_/=/ // only.\"),\n error_messages={\n 'invalid': _(\"This value may contain only letters, numbers and \"\n \"@/./+/-/_/=/ // characters.\")},\n widget=forms.TextInput(attrs={'style': 'width: 70%;'})\n )\n password1 = forms.CharField(required=False)\n password2 = forms.CharField(required=False)\n\n def clean_username(self):\n # Since User.username is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n username = self.cleaned_data[\"username\"]\n try:\n User._default_manager.get(username=username)\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(self.error_messages['duplicate_username'])\n\n class Meta:\n model = User\n fields = (\"username\",)\n\n\nclass CustomUserAdmin(UserAdmin, ModelAdmin):\n add_form_template = 'admin/custom_auth/user/add_form.html'\n form = CustomUserChangeForm\n add_form = CustomUserCreationForm\n readonly_fields = ('last_login', 'date_joined', )\n\n fieldsets = (\n (None, {'fields': ('username', 'password')}),\n (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),\n (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups',)}),\n (_('Important dates'), {'fields': ('last_login', 'date_joined')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username',),\n }),\n )\n\n\nadmin.site.register(User, CustomUserAdmin)"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":2708395672979926500,"string":"2,708,395,672,979,926,500"},"line_mean":{"kind":"number","value":39.4225352113,"string":"39.422535"},"line_max":{"kind":"number","value":101,"string":"101"},"alpha_frac":{"kind":"number","value":0.5653537818,"string":"0.565354"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109413,"cells":{"repo_name":{"kind":"string","value":"daviessm/heating"},"path":{"kind":"string","value":"usbrelay.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2262"},"content":{"kind":"string","value":"import usb, logging\nfrom relay import Relay\n\nlogger = logging.getLogger('heating')\n\nclass USBRelay(Relay):\n def __init__(self,device):\n #Assume relay is on until turned off\n self._status = [1,1,1,1,1,1,1,1]\n\n self._hid_device = device\n if self._hid_device.is_kernel_driver_active(0):\n try:\n self._hid_device.detach_kernel_driver(0)\n except usb.core.USBError as e:\n raise Exception(\"Could not detatch kernel driver: %s\" % str(e))\n try:\n self._hid_device.set_configuration()\n self._hid_device.reset()\n except usb.core.USBError as e:\n raise Exception(\"Could not set configuration: %s\" % str(e))\n\n #Turn off at start\n self.all_off()\n\n def __sendmsg(self,data):\n self._hid_device.ctrl_transfer(0x21,0x09,0x0300,0x00,bytes(data),1000)\n\n def all_status(self):\n return self._status\n\n def one_status(self,relay_num):\n return self._status[relay_num-1]\n\n def all_on(self):\n if not self._status == [1,1,1,1,1,1,1,1]:\n logger.debug(\"Relay all on\")\n self.__sendmsg([0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])\n self._status = [1,1,1,1,1,1,1,1]\n\n def all_off(self):\n if not self._status == [0,0,0,0,0,0,0,0]:\n logger.debug(\"Relay all off\")\n self.__sendmsg([0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])\n self._status = [0,0,0,0,0,0,0,0]\n\n def one_on(self,relay_num):\n if self._status[relay_num-1] == 0 and relay_num > 0 and relay_num <= 8:\n logger.debug(\"Relay \" + str(relay_num) + \" on\")\n self.__sendmsg([0xFF, relay_num, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])\n self._status[relay_num-1] = 1\n\n def one_off(self,relay_num):\n if self._status[relay_num-1] == 1 and relay_num > 0 and relay_num <= 8:\n logger.debug(\"Relay \" + str(relay_num) + \" off\")\n self.__sendmsg([0xFD, relay_num, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])\n self._status[relay_num-1] = 0\n\n @staticmethod\n def find_relay():\n hid_devices = usb.core.find(find_all=True,idVendor=0x16c0,idProduct=0x05df)\n relays = []\n for hid_device in hid_devices:\n relays.append(USBRelay(hid_device))\n if len(relays) < 1:\n raise Exception(\"No relays found\")\n if len(relays) > 1:\n raise Exception(\"Only one relay allowed!\")\n\n return relays[0]\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-8713284299106219,"string":"-8,713,284,299,106,219"},"line_mean":{"kind":"number","value":31.3142857143,"string":"31.314286"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.6215738285,"string":"0.621574"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109414,"cells":{"repo_name":{"kind":"string","value":"bitglue/shinysdr"},"path":{"kind":"string","value":"shinysdr/plugins/rtl_433.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"10985"},"content":{"kind":"string","value":"# Copyright 2016, 2017 Kevin Reid \n#\n# This file is part of ShinySDR.\n# \n# ShinySDR is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# ShinySDR is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with ShinySDR. If not, see .\n\nfrom __future__ import absolute_import, division\n\nimport json\nimport time\n\nfrom twisted.internet import reactor as the_reactor # TODO eliminate\nfrom twisted.internet.protocol import ProcessProtocol\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.python import log\nfrom zope.interface import implementer\n\nfrom gnuradio import analog\nfrom gnuradio import gr\n\nfrom shinysdr.i.blocks import make_sink_to_process_stdin\nfrom shinysdr.filters import MultistageChannelFilter\nfrom shinysdr.math import dB\nfrom shinysdr.interfaces import BandShape, ModeDef, IDemodulator\nfrom shinysdr.signals import no_signal\nfrom shinysdr.telemetry import ITelemetryMessage, ITelemetryObject\nfrom shinysdr.twisted_ext import test_subprocess\nfrom shinysdr.types import EnumRow, TimestampT\nfrom shinysdr.values import ExportedState, LooseCell, exported_value\n\n\ndrop_unheard_timeout_seconds = 120\nupper_preferred_demod_rate = 400000\n\n\n@implementer(IDemodulator)\nclass RTL433Demodulator(gr.hier_block2, ExportedState):\n def __init__(self, mode='433', input_rate=0, context=None):\n assert input_rate > 0\n assert context is not None\n gr.hier_block2.__init__(\n self, type(self).__name__,\n gr.io_signature(1, 1, gr.sizeof_gr_complex),\n gr.io_signature(0, 0, 0))\n \n # The input bandwidth chosen is not primarily determined by the bandwidth of the input signals, but by the frequency error of the transmitters. Therefore it is not too critical, and we can choose the exact rate to make the filtering easy.\n if input_rate <= upper_preferred_demod_rate:\n # Skip having a filter at all.\n self.__band_filter = None\n demod_rate = input_rate\n else:\n # TODO: This gunk is very similar to the stuff that MultistageChannelFilter does. See if we can share some code.\n lower_rate = input_rate\n lower_rate_prev = None\n while lower_rate > upper_preferred_demod_rate and lower_rate != lower_rate_prev:\n lower_rate_prev = lower_rate\n if lower_rate % 5 == 0 and lower_rate > upper_preferred_demod_rate * 3:\n lower_rate /= 5\n elif lower_rate % 2 == 0:\n lower_rate /= 2\n else:\n # non-integer ratio\n lower_rate = upper_preferred_demod_rate\n break\n demod_rate = lower_rate\n \n self.__band_filter = MultistageChannelFilter(\n input_rate=input_rate,\n output_rate=demod_rate,\n cutoff_freq=demod_rate * 0.4,\n transition_width=demod_rate * 0.2)\n \n # Subprocess\n # using /usr/bin/env because twisted spawnProcess doesn't support path search\n # pylint: disable=no-member\n process = the_reactor.spawnProcess(\n RTL433ProcessProtocol(context.output_message),\n '/usr/bin/env',\n env=None, # inherit environment\n args=[\n 'env', 'rtl_433',\n '-F', 'json',\n '-r', '-', # read from stdin\n '-m', '3', # complex float input\n '-s', str(demod_rate),\n ],\n childFDs={\n 0: 'w',\n 1: 'r',\n 2: 2\n })\n sink = make_sink_to_process_stdin(process, itemsize=gr.sizeof_gr_complex)\n \n agc = analog.agc2_cc(reference=dB(-4))\n agc.set_attack_rate(200 / demod_rate)\n agc.set_decay_rate(200 / demod_rate)\n \n if self.__band_filter:\n self.connect(\n self,\n self.__band_filter,\n agc)\n else:\n self.connect(\n self,\n agc)\n self.connect(agc, sink)\n \n @exported_value(type=BandShape, changes='never')\n def get_band_shape(self):\n \"\"\"implements IDemodulator\"\"\"\n if self.__band_filter:\n return self.__band_filter.get_shape()\n else:\n # TODO Reuse UnselectiveAMDemodulator's approach to this\n return BandShape(stop_low=0, pass_low=0, pass_high=0, stop_high=0, markers={})\n \n def get_output_type(self):\n \"\"\"implements IDemodulator\"\"\"\n return no_signal\n\n\nclass RTL433ProcessProtocol(ProcessProtocol):\n def __init__(self, target):\n self.__target = target\n self.__line_receiver = LineReceiver()\n self.__line_receiver.delimiter = '\\n'\n self.__line_receiver.lineReceived = self.__lineReceived\n \n def outReceived(self, data):\n \"\"\"Implements ProcessProtocol.\"\"\"\n # split lines\n self.__line_receiver.dataReceived(data)\n \n def errReceived(self, data):\n \"\"\"Implements ProcessProtocol.\"\"\"\n # we should inherit stderr, not pipe it\n raise Exception('shouldn\\'t happen')\n \n def __lineReceived(self, line):\n # rtl_433's JSON encoder is not perfect (e.g. it will emit unescaped newlines), so protect against parse failures\n try:\n message = json.loads(line)\n except ValueError:\n log.msg('bad JSON from rtl_433: %s' % line)\n return\n log.msg('rtl_433 message: %r' % (message,))\n # rtl_433 provides a time field, but when in file-input mode it assumes the input is not real-time and generates start-of-file-relative timestamps, so we can't use them.\n wrapper = RTL433MessageWrapper(message, time.time())\n self.__target(wrapper)\n\n\n_message_field_is_id = {\n # common\n u'model': True,\n u'time': False,\n \n # id fields\n u'device': True, # common\n u'channel': True, # some\n u'id': True, # some, frequenrly labeled 'house code'\n u'dev_id': True, # one\n u'node': True, # one\n u'address': True, # one\n u'ws_id': True, # one\n u'sid': True, # one\n u'rid': True, # one\n u'unit': True, # one\n \n # data fields - device\n u'battery': False,\n u'rc': False,\n \n # data fields - weather\n u'temperature_F': False,\n u'temperature_C': False,\n u'temperature': False,\n u'humidity': False,\n u'wind_speed': False,\n u'wind_speed_ms': False,\n u'wind_gust': False,\n u'wind_gust_ms': False,\n u'wind_direction': False,\n u'direction': False,\n u'direction_str': False,\n u'direction_deg': False,\n u'speed': False,\n u'gust': False,\n u'rain': False,\n u'rain_total': False,\n u'rain_rate': False,\n u'rainfall_mm': False,\n u'total_rain': False,\n \n # data fields - other\n u'cmd': False,\n u'cmd_id': False,\n u'command': False,\n u'tristate': False,\n u'power0': False,\n u'power1': False,\n u'power2': False,\n u'ct1': False,\n u'ct2': False,\n u'ct3': False,\n u'ct4': False,\n u'Vrms/batt': False,\n u'pulse': False,\n u'temp1_C': False,\n u'temp2_C': False,\n u'temp3_C': False,\n u'temp4_C': False,\n u'temp5_C': False,\n u'temp6_C': False,\n u'msg_type': False,\n u'hours': False,\n u'minutes': False,\n u'seconds': False,\n u'year': False,\n u'month': False,\n u'day': False,\n u'button': False,\n u'button1': False,\n u'button2': False,\n u'button3': False,\n u'button4': False,\n u'group_call': False,\n u'dim': False,\n u'dim_value': False,\n u'maybetemp': False,\n u'flags': False,\n u'binding_countdown': False,\n u'depth': False,\n u'state': False,\n}\n\n\n@implementer(ITelemetryMessage)\nclass RTL433MessageWrapper(object):\n def __init__(self, message, receive_time):\n self.message = message # a parsed rtl_433 JSON-format message\n self.receive_time = float(receive_time)\n \n id_keys = [k for k in message if _message_field_is_id.get(k, False)]\n id_keys.sort()\n self.object_id = u'-'.join(unicode(message[k]) for k in id_keys)\n \n def get_object_id(self):\n return self.object_id\n \n def get_object_constructor(self):\n return RTL433MsgGroup\n\n\n# TODO: It would make sense to make this a CollectionState object to have simple dynamic fields.\n@implementer(ITelemetryObject)\nclass RTL433MsgGroup(ExportedState):\n def __init__(self, object_id):\n \"\"\"Implements ITelemetryObject.\"\"\"\n self.__cells = {}\n self.__last_heard_time = None\n \n def state_is_dynamic(self):\n \"\"\"Overrides ExportedState.\"\"\"\n return True\n \n def state_def(self):\n \"\"\"Overrides ExportedState.\"\"\"\n for d in super(RTL433MsgGroup, self).state_def():\n yield d\n for d in self.__cells.iteritems():\n yield d\n \n # not exported\n def receive(self, message_wrapper):\n \"\"\"Implements ITelemetryObject.\"\"\"\n self.__last_heard_time = message_wrapper.receive_time\n shape_changed = False\n for k, v in message_wrapper.message.iteritems():\n if _message_field_is_id.get(k, False) or k == u'time':\n continue\n if k not in self.__cells:\n shape_changed = True\n self.__cells[k] = LooseCell(\n key=k,\n value=None,\n type=object,\n writable=False,\n persists=False,\n label=k)\n self.__cells[k].set_internal(v)\n self.state_changed()\n if shape_changed:\n self.state_shape_changed()\n \n def is_interesting(self):\n \"\"\"Implements ITelemetryObject.\"\"\"\n return True\n \n def get_object_expiry(self):\n \"\"\"implement ITelemetryObject\"\"\"\n return self.__last_heard_time + drop_unheard_timeout_seconds\n \n @exported_value(type=TimestampT(), changes='explicit', label='Last heard')\n def get_last_heard_time(self):\n return self.__last_heard_time\n\n\n# TODO: Arrange for a way for the user to see why it is unavailable.\n_rtl_433_available = test_subprocess(\n ['rtl_433', '-r', '/dev/null'],\n 'Reading samples from file',\n shell=False)\n\n\nplugin_mode = ModeDef(mode='433',\n info=EnumRow(label='rtl_433', description='OOK telemetry decoded by rtl_433 mostly found at 433 MHz'),\n demod_class=RTL433Demodulator,\n available=_rtl_433_available)\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":2245055274137531400,"string":"2,245,055,274,137,531,400"},"line_mean":{"kind":"number","value":32.2878787879,"string":"32.287879"},"line_max":{"kind":"number","value":246,"string":"246"},"alpha_frac":{"kind":"number","value":0.5967228038,"string":"0.596723"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109415,"cells":{"repo_name":{"kind":"string","value":"devlpr/mpptCommander"},"path":{"kind":"string","value":"src/gui.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4218"},"content":{"kind":"string","value":"import copy\nimport os\nimport sys\nimport time\nimport threading\nfrom multiprocessing import Process, Queue\n\nfrom PyQt4 import uic\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nfrom PyQt4.uic import loadUiType\n\nimport commander\nimport mappings\n\n\nTHISDIR = os.path.realpath(os.path.dirname(__file__))\n\n\nclass Commander(QtGui.QMainWindow):\n def __init__(self, parent=None):\n \"\"\"\n MPPT Commander simple UI for viewing the state of the controller.\n \"\"\"\n QtGui.QMainWindow.__init__(self, parent)\n uic.loadUi(os.path.join(THISDIR, \"ui\", \"commander.ui\"), self)\n self.__queue = Queue()\n self.__widgets = {}\n self.__running = True\n\n self.__timer = QtCore.QTimer(self)\n self.connect(self.__timer, QtCore.SIGNAL(\"timeout()\"), self.update)\n self.__timer.start(10)\n\n self.__process = Process(target=self.populateColumns, args=())\n self.__process.start()\n\n def update(self):\n \"\"\"\n Update the UI in the main thread. This is triggered by a timer.\n \"\"\"\n if self.__queue.empty():\n return\n key, name, register, num = self.__queue.get()\n key = \"%s-%s\" % (key, name)\n mess = \"%s (%s): %s\" % (name, register.unit, register.value)\n self.statusBar().showMessage(mess)\n if key not in self.__widgets:\n self.__widgets[key] = QtGui.QListWidgetItem(mess)\n if num <= 32:\n self.arrayInfoListWidget.addItem(self.__widgets[key])\n elif num <= 64:\n self.batteryInfoListWidget.addItem(self.__widgets[key])\n else:\n self.loadInfoListWidget.addItem(self.__widgets[key])\n else:\n #self.__widgets[key].setText(mess)\n #self.statusBar().showMessage(mess)\n pass\n\n def closeEvent(self, event):\n \"\"\"\n Event that is triggered on close.\n \"\"\"\n reply = QtGui.QMessageBox.question(self, 'Message',\n \"Are you sure to quit?\", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)\n\n if reply == QtGui.QMessageBox.Yes:\n self.__running = False\n self.__timer.stop()\n self.__process.terminate()\n event.accept()\n else:\n event.ignore()\n\n def populateColumns(self):\n \"\"\"\n Query the Commander unit and push all items into a queue to get popped\n off in the main thread. This is run in a new process using the\n multiprocessing module in order to make the UI responsive while\n querying the device. A thread would probably also work.\n \"\"\"\n # The ID of the device we are going to communicate with. Default is 1.\n deviceId = 0x01\n\n while self.__running:\n ser = commander.getRs485()\n\n try:\n num = 1\n for addr, reg in sorted(mappings.REGISTERS.iteritems()):\n if not self.__running:\n break\n results = commander.communicate(ser,\n deviceId,\n addr,\n reg,\n debug=False)\n wasList = False\n if not isinstance(results, list):\n wasList = True\n results = [results, ]\n\n for item in results:\n if wasList:\n key = \"%s-%s\" % (addr, reg.unit)\n self.__queue.put([key, reg.name, item, num])\n else:\n self.__queue.put([addr, reg.name, item, num])\n num += 1\n except:\n raise\n finally:\n # Close the port regardless of which errors occur\n ser.close()\n\n\nif __name__ == \"__main__\":\n # Create the QApplication and spawn a Commander window. Block until it is\n # done.\n app = QtGui.QApplication(sys.argv)\n w = Commander()\n w.setWindowTitle('MPPT Commander')\n w.show()\n sys.exit(app.exec_())\n\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":6608296891409699000,"string":"6,608,296,891,409,699,000"},"line_mean":{"kind":"number","value":32.4761904762,"string":"32.47619"},"line_max":{"kind":"number","value":81,"string":"81"},"alpha_frac":{"kind":"number","value":0.5139876719,"string":"0.513988"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109416,"cells":{"repo_name":{"kind":"string","value":"bfrascher/passpy"},"path":{"kind":"string","value":"passpy/__main__.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"21211"},"content":{"kind":"string","value":"# coding: utf-8\n\n# passpy -- ZX2C4's pass compatible library and cli\n# Copyright (C) 2016 Benedikt Rascher-Friesenhausen \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nimport locale\nimport os\n\nimport click\nimport pyperclip\n\nfrom git import (\n GitCommandError\n)\n\nfrom passpy import (\n Store,\n StoreNotInitialisedError,\n RecursiveCopyMoveError\n)\n\n\n# Message constants\nMSG_STORE_NOT_INITIALISED_ERROR = ('You need to call {0} init first.'\n .format(__name__))\nMSG_PERMISSION_ERROR = 'Nah-ah!'\nMSG_FILE_NOT_FOUND = 'Error: {0} is not in the password store.'\nMSG_RECURSIVE_COPY_MOVE_ERROR = 'Error: Can\\'t {0} a directory into itself.'\n\n# Tree constants\nif locale.getdefaultlocale()[1].startswith('UTF'):\n SPACES = ' '\n BRIDGE = '│ '\n BRANCH = '├── '\n ENDING = '└── '\nelse:\n SPACES = ' '\n BRIDGE = '| '\n BRANCH = '|-- '\n ENDING = '`-- '\n\n\ndef _gen_tree(lines):\n \"\"\"Create hierarchical file tree from key names.\n\n :param list lines: A list of key names from the password store.\n\n :rtype: dict\n :returns: A nested dictionary with directories and key names as\n it's keys.\n\n \"\"\"\n tree = {}\n for line in lines:\n ctree = tree\n for segment in line.split(os.sep):\n if segment not in ctree:\n ctree[segment] = {}\n ctree = ctree[segment]\n\n return tree\n\n\ndef _print_name(name, num_children):\n \"\"\"Print a name with added styles.\n\n If `num_children` is larger than nil, `name` will be printed in\n bold face and in blue, to differentiate it as a directory and not\n a key.\n\n :param str name: The name to be printed.\n\n :param int num_children: The number of children the leaf has.\n\n \"\"\"\n # pass colors folders blue, so we do too.\n if num_children > 0:\n click.secho(name, bold=True, fg='blue')\n else:\n click.echo(name)\n\n\ndef _print_tree(tree, seperators=None):\n \"\"\"Print a depth indented listing.\n\n The algorithm for printing the tree has been taken from `doctree`_\n written by Mihai Ciumeică and licensed under the MIT licence. The\n code has been adapted to fit our needs.\n\n .. _doctree: https://github.com/cmihai/docktree\n\n :param dict tree: A dictionary created by\n :func:`passpy.__main__._gen_tree`.\n\n :param list seperators: (optional) The seperators to print before\n the leaf name. Leave empty when calling this function.\n\n \"\"\"\n if seperators is None:\n seperators = []\n\n length = len(tree)\n for i, entry in enumerate(sorted(tree, key=str.lower)):\n num_children = len(tree[entry])\n for seperator in seperators:\n if seperator:\n click.echo(BRIDGE, nl=False)\n else:\n click.echo(SPACES, nl=False)\n if i < length - 1:\n click.echo(BRANCH, nl=False)\n _print_name(entry, num_children)\n _print_tree(tree[entry], seperators + [1])\n else:\n click.echo(ENDING, nl=False)\n _print_name(entry, num_children)\n _print_tree(tree[entry], seperators + [0])\n\n\nclass PassGroup(click.Group):\n \"\"\"Custom group for command name aliases.\n \"\"\"\n def get_command(self, ctx, cmd_name):\n \"\"\"Allow aliases for commands.\n \"\"\"\n if cmd_name == 'list':\n cmd_name = 'ls'\n elif cmd_name == 'search':\n cmd_name = 'find'\n elif cmd_name == 'gen':\n cmd_name = 'generate'\n elif cmd_name == 'add':\n cmd_name = 'insert'\n elif cmd_name in ['remove', 'delete']:\n cmd_name = 'rm'\n elif cmd_name == 'rename':\n cmd_name = 'mv'\n elif cmd_name == 'copy':\n cmd_name = 'cp'\n\n # TODO(benedikt) Figure out how to make 'show' the default\n # command and pass cmd_name as the first argument.\n rv = click.Group.get_command(self, ctx, cmd_name)\n if rv is not None:\n return rv\n\n\n@click.group(cls=PassGroup)\n@click.option('--gpg-bin', envvar='PYPASS_GPG_BIN', default='gpg2',\n help='The path to your gpg2 binary. Only necessary '\n 'if gpg2 is not already in your PATH. Alternatively '\n 'you can set the PYPASS_GPG_BIN environment variable '\n 'with the path.')\n@click.option('--git-bin', envvar='PYPASS_GIT_BIN', default='git',\n help='The path to your git binary. Only necessary '\n 'if git is not already in your PATH. Alternatively '\n 'you can set the PYPASS_GIT_BIN environment variable '\n 'with the path.')\n@click.option('--store-dir', envvar=['PYPASS_STORE_DIR', 'PASSWORD_STORE_DIR'],\n default='~/.password-store',\n help='The path to the directory to use for the '\n 'password store. Alternatively you can set the '\n 'PYPASS_STORE_DIR environment variable with the path.')\n@click.option('--no-agent', envvar='PYPASS_NO_AGENT', is_flag=True,\n help='Pass this along if you don\\'t have an ssh agent '\n 'running. Alternatively you can set the PYPASS_NO_AGENT '\n 'environment variable.', default=False)\n@click.pass_context\ndef cli(ctx, gpg_bin, git_bin, store_dir, no_agent):\n \"\"\"passpy is a password manager compatible with ZX2C4's pass written\n in Python.\n\n \"\"\"\n if no_agent:\n use_agent = False\n else:\n use_agent = True\n ctx.obj = Store(gpg_bin, git_bin, store_dir, use_agent, True, True)\n\n\n@cli.command(options_metavar='[ --path,-p ]')\n@click.option('-p', '--path', type=str,\n help='Only set the gpg-ids for the given subfolder.')\n@click.argument('gpg_ids', nargs=-1, metavar='gpg-id')\n@click.pass_context\ndef init(ctx, gpg_ids, path):\n \"\"\"Initialize new password storage and use `gpg-id` for encryption.\n Mutliple gpg-ids may be specified, in order to encrypt each\n password with multiple ids. This command must be run first before\n a password store can be used. If the specified `gpg-id` is\n different from the ye used in any existing files, these files will\n be reencrypted to use the new id. Note that use of an gpg agent\n is recommended so that the batch decryption does not require as\n much user intervention. If `--path` or `-p` is specified, along\n with an argument, a specific gpg-id or a set of gpg-ids is\n assigned for that specific sub folder of the password store. If\n only the gpg-id is given, and it is an empty string then the\n current `.gpg-id` file for the specfified `sub-folder` (or root if\n unspecified) is removed.\n\n \"\"\"\n try:\n ctx.obj.init_store(list(gpg_ids), path=path)\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n\n click.echo('Password store initialised for {0}.'\n .format(','.join(gpg_ids)))\n\n\n@cli.command()\n@click.argument('subfolder', type=str, default='.')\n@click.pass_context\ndef ls(ctx, subfolder, passthrough=False):\n \"\"\"List names of passwords inside the tree at `subfolder`. This\n command is alternatively names `list`.\n\n \"\"\"\n # TODO(benedikt) Generate pretty output\n try:\n keys = list(ctx.obj.iter_dir(subfolder))\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n # If subfolder is actually a key in the password store pass shows\n # the contents of that key.\n except FileNotFoundError:\n if not passthrough:\n return ctx.invoke(show, pass_name=subfolder, clip=False,\n passthrough=True)\n else:\n click.echo(MSG_FILE_NOT_FOUND.format(subfolder))\n return 1\n\n click.echo('Password Store')\n tree = _gen_tree(keys)\n _print_tree(tree)\n\n\n@cli.command()\n@click.argument('search_string', type=str, metavar='search-string')\n@click.pass_context\ndef grep(ctx, search_string):\n \"\"\"Searches inside each decrypted password file for `search-string`,\n and displays line containing matched string along with filename.\n `search-string` can be a regular expression.\n\n \"\"\"\n try:\n results = ctx.obj.search(search_string)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n\n for key in results:\n if os.path.dirname(key) != '':\n click.secho(os.path.dirname(key) + os.sep, fg='blue', nl=False)\n click.secho(os.path.basename(key), fg='blue', bold=True, nl=False)\n click.secho(':')\n for line, match in results[key]:\n start = match.start()\n end = match.end()\n click.echo(line[:start], nl=False)\n click.secho(line[start:end], nl=False, fg='red', bold=True)\n click.echo(line[end:])\n\n\n@cli.command()\n@click.argument('pass_names', type=str, nargs=-1, metavar='pass-name')\n@click.pass_context\ndef find(ctx, pass_names):\n \"\"\"List names of passwords inside the tree that match `pass-names` and\n print them to the command line. This command is alternatively\n named `search`.\n\n \"\"\"\n try:\n keys = ctx.obj.find(list(pass_names))\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n\n click.echo('Search Terms: {0}'.format(','.join(pass_names)))\n tree = _gen_tree(keys)\n _print_tree(tree)\n\n\n@cli.command(options_metavar='[ --clip,-c ]')\n@click.option('-c', '--clip', is_flag=True,\n help='Copy the password to the clipboard instead of '\n 'printing it to the command line.')\n@click.argument('pass_name', type=str, metavar='pass-name', default='.')\n@click.pass_context\ndef show(ctx, pass_name, clip, passthrough=False):\n \"\"\"Decrypt and print a password named `pass-name`. If `--clip` or\n `-c` is specified, do not print the password but instead copy the\n first line to the clipboard using pyperclip. On Linux you will\n need to have xclip/xsel and on OSX pbcopy/pbpaste installed.\n\n \"\"\"\n try:\n data = ctx.obj.get_key(pass_name)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n # If pass_name is actually a folder in the password store pass\n # lists the folder instead.\n except FileNotFoundError:\n if not passthrough:\n return ctx.invoke(ls, subfolder=pass_name, passthrough=True)\n else:\n click.echo(MSG_FILE_NOT_FOUND.format(pass_name))\n return 1\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n\n if clip:\n pyperclip.copy(data.split('\\n')[0])\n click.echo('Copied {0} to the clipboard.'.format(pass_name))\n else:\n # The key data always ends with a newline. So no need to add\n # another one.\n click.echo(data, nl=False)\n\n\n@cli.command(options_metavar='[ --echo,-e | --multiline,-m ] [ --force,-f ]')\n@click.option('-e', '--echo', 'input_method', flag_value='echo',\n help='Don\\'t ask to repeat the password.')\n@click.option('-m', '--multiline', 'input_method', flag_value='multiline',\n help='Allows entering multiple lines of text for the key.')\n@click.option('-f', '--force', is_flag=True,\n help='Any existing key at pass-name will be '\n 'silently overwritten.')\n@click.argument('pass_name', type=str, metavar='pass-name')\n@click.pass_context\ndef insert(ctx, pass_name, input_method, force):\n \"\"\"Insert a new password into the password store called `pass-name`.\n This will read the new password from standard in. If `--echo` or\n `-e` are NOT specified, disable keyboard echo when the password is\n entered and confirm the password by asking for it twice. If\n `--multiline` or `-m` is specified, lines will be read until EOF\n or Ctrl+D is reached. Otherwise, only a single line from standard\n in read. Prompt before overwriting an existing password, unless\n `--force` or `-f` is specified. This command is alternatively\n named `add`\n\n \"\"\"\n if input_method is None:\n input_method = 'neither'\n if input_method == 'multiline':\n click.echo('Enter contents of {0} and press Ctrl+D on an empty '\n 'line when finished:'.format(pass_name))\n lines = []\n while True:\n try:\n line = input('> ')\n lines.append(line)\n except EOFError:\n break\n data = '\\n'.join(lines)\n else:\n echo = (input_method != 'echo')\n data = click.prompt('Enter password for {0}'.format(pass_name),\n hide_input=True, confirmation_prompt=echo,\n type=str)\n\n try:\n ctx.obj.set_key(pass_name, data, force=force)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n\n\n@cli.command()\n@click.argument('pass_name', type=str, metavar='pass-name')\n@click.pass_context\ndef edit(ctx, pass_name):\n \"\"\"Insert a new password or edit an existing one using the editor\n specified by either EDITOR or VISUAL or falling back on the\n platform default if both are not set.\n\n \"\"\"\n try:\n data = ctx.obj.get_key(pass_name)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n except FileNotFoundError:\n data = ''\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n\n if 'EDITOR' in os.environ:\n data = click.edit(text=data, editor=os.environ['EDITOR'])\n else:\n data = click.edit(text=data)\n\n if data is None:\n click.echo('Password unchanged.')\n return 1\n\n ctx.obj.set_key(pass_name, data, force=True)\n\n\n@cli.command(options_metavar='[ --no-symbols,-n ] [ --clip,-c ] '\n '[ --in-place,-i ] [ --force,-f ]')\n@click.option('-n', '--no-symbols', is_flag=True,\n help='If specified the password will only consist '\n 'of alphanumeric characters.')\n@click.option('-c', '--clip', is_flag=True,\n help='Copy the password to the clipboard instead of '\n 'printing it on the command line.')\n@click.option('-i', '--in-place', is_flag=True,\n help='Replace the first line of an existing key at '\n 'path-name with the newly generated password.')\n@click.option('-f', '--force', is_flag=True,\n help='Overwrite an existing key at pass-name without '\n 'prompting the user first.')\n@click.argument('pass_name', type=str, metavar='pass-name')\n@click.argument('pass_length', type=int, metavar='pass-length')\n@click.pass_context\ndef generate(ctx, pass_name, pass_length, no_symbols, clip, in_place, force):\n \"\"\"Generate a new password of length `pass-length` and insert into\n `pass-name`. If `--no-symbols` or `-n` is specified, do not use\n any non-alphanumeric characters in the generated password. If\n `--clip` or `-c` is specified, do not print the password but\n instead copy it to the clipboard. On Linux you will need to have\n xclip/xsel and on OSX pbcopy/pbpaste installed. Prompt before\n overwriting an existing password, unless `--force` or `-f` is\n specified. If `--in-place` or `-i` is specified, do not\n interactively prompt, and only replace the first line of the\n password file with the new generated password, keeping the\n remainder of the file intact.\n\n \"\"\"\n symbols = not no_symbols\n try:\n password = ctx.obj.gen_key(pass_name, pass_length, symbols,\n force, in_place)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n except FileNotFoundError:\n click.echo(MSG_FILE_NOT_FOUND.format(pass_name))\n return 1\n\n if password is None:\n return 1\n\n if clip:\n pyperclip.copy(password)\n click.echo('Copied {0} to the clipboard.'.format(pass_name))\n else:\n click.echo('The generated password for {0} is:'.format(pass_name))\n click.echo(password)\n\n\n@cli.command(options_metavar='[ --recursive,-r ] [ --force,-f ]')\n@click.option('-r', '--recursive', is_flag=True,\n help='If pass-name is a directory, also remove all '\n 'it\\'s contents.')\n@click.option('-f', '--force', is_flag=True, default=False,\n help='Don\\'t prompt for confirmation when removing a key.')\n@click.argument('pass_name', type=str, metavar='pass-name')\n@click.pass_context\ndef rm(ctx, pass_name, recursive, force):\n \"\"\"Remove the password names `pass-name` from the password store.\n This command is alternatively named `remove` or `delete`. If\n `--recursive` or `-r` is specified, delete pass-name recursively\n if it is a directory. If `--force` or `-f` is specified, do not\n interactively prompt before removal.\n\n \"\"\"\n try:\n ctx.obj.remove_path(pass_name, recursive, force)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n except FileNotFoundError:\n click.echo('{0} is not in the password store.'.format(pass_name))\n return 1\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n\n\n@cli.command(options_metavar='[ --force,-f ]')\n@click.option('-f', '--force', is_flag=True,\n help='If specified existing files at `new-path` '\n 'will be silently overwritten.')\n@click.argument('old_path', type=str, metavar='old-path')\n@click.argument('new_path', type=str, metavar='old-path')\n@click.pass_context\ndef mv(ctx, old_path, new_path, force):\n \"\"\"Renames the password or directory named `old-path` to `new-path`.\n This command is alternatively named `rename`. If `--force` or\n `-f` is specified, silently overwrite `new-path` if it exists. If\n `new-path` ends in a trailing '/', it is always treated as a\n directory. Passwords are selectively reencrypted to the\n corresponding keys of their new destination.\n\n \"\"\"\n try:\n ctx.obj.move_path(old_path, new_path, force)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n except FileNotFoundError:\n click.echo('{0} is not in the password store.'.format(old_path))\n return 1\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n except RecursiveCopyMoveError:\n click.echo(MSG_RECURSIVE_COPY_MOVE_ERROR.format('move'))\n return 1\n\n\n@cli.command(options_metavar='[ --force,-f ]')\n@click.option('-f', '--force', is_flag=True,\n help='If specified existing files at `new-path` '\n 'will be silently overwritten.')\n@click.argument('old_path', type=str, metavar='old-path')\n@click.argument('new_path', type=str, metavar='new-path')\n@click.pass_context\ndef cp(ctx, old_path, new_path, force):\n \"\"\"Copies the password or directory names `old-path` to `new-path`.\n This command is alternatively named `copy`. If `--force` is\n specified, silently overwrite `new_path` if it exists. If\n `new-path` ends in a trailing `/`, it is always treated as a\n directory. Passwords are selectively reencrypted to the\n corresponding keys of their new destination.\n\n \"\"\"\n try:\n ctx.obj.copy_path(old_path, new_path, force)\n except StoreNotInitialisedError:\n click.echo(MSG_STORE_NOT_INITIALISED_ERROR)\n return 1\n except FileNotFoundError:\n click.echo('{0} is not in the password store.'.format(old_path))\n return 1\n except PermissionError:\n click.echo(MSG_PERMISSION_ERROR)\n return 1\n except RecursiveCopyMoveError:\n click.echo(MSG_RECURSIVE_COPY_MOVE_ERROR.format('copy'))\n return 1\n\n\n@cli.command()\n@click.argument('git_args', type=str, metavar='git-command-args', nargs=-1)\n@click.pass_context\ndef git(ctx, git_args):\n \"\"\"If the password store is a git repository, pass `args` as arguments\n to `git` using the password store as the git repository. If\n `args` is `init`, in addition to initializing the git repository,\n add the current contents of the password store to the repository\n in an initial commit.\n\n \"\"\"\n try:\n ctx.obj.git(*list(git_args))\n except GitCommandError as e:\n click.echo(e)\n return 1\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":2915392858262720500,"string":"2,915,392,858,262,720,500"},"line_mean":{"kind":"number","value":34.986417657,"string":"34.986418"},"line_max":{"kind":"number","value":92,"string":"92"},"alpha_frac":{"kind":"number","value":0.6312983582,"string":"0.631298"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109417,"cells":{"repo_name":{"kind":"string","value":"AQORN/thunder-engine"},"path":{"kind":"string","value":"thunder_web/monitor_node.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2757"},"content":{"kind":"string","value":"# @author: Binoy\n# @create_date: 17-Apr-2015\n# @modified by: binoy \n# @modified_date: 17-Apr-2015\n# @linking to other page: \n# @description: manage cron job module\n\n# importing required modules\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"thunder.settings\")\nfrom cloud.models import *\nfrom cloud.views import *\nimport re\nfrom django.db import connections\nfrom deployment.common import *\n\ncursor = connections['zabbix'].cursor()\n\n# Getting all the nodes in the system\nnodes = Nodelist.objects.all()\n\n# Looping through the nodes\nfor node in nodes:\n \n # Creating the sql query to fetch the details from the history table.\n try:\n \n currStatus = isNodeActive(node.host_name)\n preStatus = node.node_up\n \n # check both status are not equal, then add it to alert\n if currStatus != preStatus:\n \n # save current node status\n node.node_up = currStatus\n node.save()\n \n # if node is up\n if currStatus:\n \n # parameters for the zabbix alert\n msgTxt = 'Node '+ node.host_name +' is UP.'\n params = {\n 'alert_type': 'Node', \n 'referece_id': node.id,\n 'alert_content': msgTxt, \n 'alert_status' : 'S'\n }\n thunderAlertAdd(params, True)\n \n # Saving the node details into the node log\n saveJobNodeLog(0, node, msgTxt, msgTxt, 1)\n print 'up........'\n \n else:\n \n # parameters for the zabbix alert\n msgTxt = 'Node '+ node.host_name +' is Down.'\n params = {\n 'alert_type': 'Node', \n 'referece_id': node.id,\n 'alert_content': msgTxt, \n 'alert_status' : 'F'\n }\n thunderAlertAdd(params, True)\n \n # Saving the node details into the node log\n saveJobNodeLog(0, node, msgTxt, msgTxt, 0)\n print 'Down........'\n \n except Exception, e:\n print e\n\n\n'''\n###\ncheck existing services are down like cobbler,chef, zabbix etc\nIf down enable it\n###\n''' \n \n# Getting the services from the tables\ngetServices = MonitorService.objects.filter(status = 1)\n\n# looping through the services available\nfor getService in getServices:\n \n # Get the service status\n outputStr = getServiceDetails(getService.command)\n \n # if error occurred while deployment\n if \"down\" in outputStr:\n executeServiceCommnd(getService.name)\n "},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-527449063338068400,"string":"-527,449,063,338,068,400"},"line_mean":{"kind":"number","value":28.3404255319,"string":"28.340426"},"line_max":{"kind":"number","value":73,"string":"73"},"alpha_frac":{"kind":"number","value":0.5342763874,"string":"0.534276"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109418,"cells":{"repo_name":{"kind":"string","value":"StefanRijnhart/odoomrp-wip"},"path":{"kind":"string","value":"purchase_secondary_unit/__openerp__.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1517"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n#\n##############################################################################\n\n{\n \"name\": \"Unit of Purchase\",\n \"version\": \"1.0\",\n \"summary\": \"Purchase secondary unit\",\n \"depends\": [\n \"product\",\n \"purchase\",\n ],\n \"author\": \"OdooMRP team\",\n \"website\": \"http://www.odoomrp.com\",\n \"contributors\": [\n \"Oihane Crucelaegui \",\n \"Pedro M. Baeza \",\n \"Ana Juaristi \"\n ],\n \"category\": \"Purchase Management\",\n \"data\": [\n \"data/product_data.xml\",\n \"views/product_view.xml\",\n \"views/purchase_view.xml\",\n \"views/pricelist_view.xml\",\n ],\n \"installable\": True,\n}\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-7948394462924185000,"string":"-7,948,394,462,924,185,000"},"line_mean":{"kind":"number","value":35.119047619,"string":"35.119048"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.5754779169,"string":"0.575478"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109419,"cells":{"repo_name":{"kind":"string","value":"coolsnow77/bankeasytrader_v1"},"path":{"kind":"string","value":"setup.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2870"},"content":{"kind":"string","value":"from setuptools import setup\r\n\r\nimport easytrader\r\n\r\nlong_desc = \"\"\"\r\neasytrader\r\n===============\r\n\r\n* easy to use to trade in China Stock\r\n\r\nInstallation\r\n--------------\r\n\r\npip install easytrader\r\n\r\nUpgrade\r\n---------------\r\n\r\n pip install easytrader --upgrade\r\n\r\nQuick Start\r\n--------------\r\n\r\n::\r\n\r\n import easytrader\r\n\r\n user = easytrader.use('ht')\r\n\r\n user.prepare('account.json')\r\n\r\n user.balance\r\n\r\nreturn::\r\n\r\n [{ 'asset_balance': '资产总值',\r\n 'current_balance': '当前余额',\r\n 'enable_balance': '可用金额',\r\n 'market_value': '证券市值',\r\n 'money_type': '币种',\r\n 'pre_interest': '预计利息' ]}\r\n\r\n user.position\r\n\r\nreturn::\r\n\r\n [{'cost_price': '摊薄成本价',\r\n 'current_amount': '当前数量',\r\n 'enable_amount': '可卖数量',\r\n 'income_balance': '摊薄浮动盈亏',\r\n 'keep_cost_price': '保本价',\r\n 'last_price': '最新价',\r\n 'market_value': '证券市值',\r\n 'position_str': '定位串',\r\n 'stock_code': '证券代码',\r\n 'stock_name': '证券名称'}]\r\n\r\n user.entrust\r\n\r\nreturn::\r\n\r\n [{'business_amount': '成交数量',\r\n 'business_price': '成交价格',\r\n 'entrust_amount': '委托数量',\r\n 'entrust_bs': '买卖方向',\r\n 'entrust_no': '委托编号',\r\n 'entrust_price': '委托价格',\r\n 'entrust_status': '委托状态', # 废单 / 已报\r\n 'report_time': '申报时间',\r\n 'stock_code': '证券代码',\r\n 'stock_name': '证券名称'}]\r\n\r\n user.buy('162411', price=5.55)\r\n\r\n user.sell('16411', price=5.65)\r\n\r\n\"\"\"\r\n\r\nsetup(\r\n name='easytrader',\r\n version=easytrader.__version__,\r\n description='A utility for China Stock Trade',\r\n long_description = long_desc,\r\n author='shidenggui',\r\n author_email='longlyshidenggui@gmail.com',\r\n license='BSD',\r\n url='https://github.com/shidenggui/easytrader',\r\n keywords='China stock trade',\r\n install_requires=[\r\n 'demjson',\r\n 'requests',\r\n 'logbook',\r\n 'anyjson',\r\n 'six'\r\n ],\r\n classifiers=['Development Status :: 4 - Beta',\r\n 'Programming Language :: Python :: 2.6',\r\n 'Programming Language :: Python :: 2.7',\r\n 'Programming Language :: Python :: 3.2',\r\n 'Programming Language :: Python :: 3.3',\r\n 'Programming Language :: Python :: 3.4',\r\n 'Programming Language :: Python :: 3.5',\r\n 'License :: OSI Approved :: BSD License'],\r\n packages=['easytrader', 'easytrader.config', 'easytrader.thirdlibrary'],\r\n package_data={'': ['*.jar', '*.json'], 'config': ['config/*.json'], 'thirdlibrary': ['thirdlibrary/*.jar']},\r\n)\r\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":1468537289616456000,"string":"1,468,537,289,616,456,000"},"line_mean":{"kind":"number","value":23.0754716981,"string":"23.075472"},"line_max":{"kind":"number","value":116,"string":"116"},"alpha_frac":{"kind":"number","value":0.4969902182,"string":"0.49699"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109420,"cells":{"repo_name":{"kind":"string","value":"narendrameena/featuerSelectionAssignment"},"path":{"kind":"string","value":"gloub.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1478"},"content":{"kind":"string","value":"import numpy as np\nimport csv\nfrom sklearn.datasets import load_svmlight_file\n\n\n\n\ndef golub(data, labels):\n c = []\n for i in range(0,len(data)):\n c.append([labels[i],data[i]]) # append labels with data matrix\n\n negative = []\n positive = []\n\n for i in range(0,len(c)):\n if c[i][0] == [-1]: # for negative condition\n negative.append(c[i][1])\n if c[i][0] == [1]: # for positive condition\n positive.append(c[i][1])\n negMean = np.mean(np.asarray(negative),axis=0).tolist() # calculate mean\n negStd = np.std(np.asarray(negative),axis=0).tolist() # calculate standard deviation\n posMean = np.mean(np.asarray(positive),axis=0).tolist() # calculating mean\n posStd = np.std(np.asarray(positive),axis=0).tolist() # calculating Standard Deviation\n\n score =[]\n for i in range(0,len(negMean)):\n if((posStd[i]+negStd[i]) != 0):\n score.append((posMean[i]-negMean[i])/(posStd[i]+negStd[i]))\n else:\n score.append(0) # zero if denominator is zero\n print(len(score))\n return (score, score) # return score as score , score as asked\n\n\ndef main():\n data = load_svmlight_file(\"leu\")\n X_1 = data[0].todense().tolist()\n y_1 = map(int,data[1])\n\n lables= []\n for i in range(0,len(y_1)):\n lables = lables + [[y_1[i]]]\n\n #print(X_1)\n #print(y_1)\n #print(lables)\n print golub(X_1,lables)\n\nif __name__ == \"__main__\":\n main()"},"license":{"kind":"string","value":"cc0-1.0"},"hash":{"kind":"number","value":-885232497795173100,"string":"-885,232,497,795,173,100"},"line_mean":{"kind":"number","value":28,"string":"28"},"line_max":{"kind":"number","value":93,"string":"93"},"alpha_frac":{"kind":"number","value":0.5744248985,"string":"0.574425"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109421,"cells":{"repo_name":{"kind":"string","value":"mmcauliffe/linguistic-helper-functions"},"path":{"kind":"string","value":"linghelper/distance/dtw.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3026"},"content":{"kind":"string","value":"from numpy import zeros,inf\nfrom scipy.spatial.distance import euclidean\nimport operator\n\ndef dtw_distance(rep_one, rep_two,norm=True):\n \"\"\"Computes the distance between two representations with the same \n number of filters using Dynamic Time Warping.\n \n Parameters\n ----------\n rep_one : 2D array\n First representation to compare. First dimension is time in frames\n or samples and second dimension is the features.\n rep_two : 2D array\n Second representation to compare. First dimension is time in frames\n or samples and second dimension is the features.\n \n Returns\n -------\n float\n Distance of dynamically time warping `rep_one` to `rep_two`.\n \n \"\"\"\n \n assert(rep_one.shape[1] == rep_two.shape[1])\n distMat = generate_distance_matrix(rep_one,rep_two)\n return regularDTW(distMat,norm=norm)\n \ndef generate_distance_matrix(source,target):\n \"\"\"Generates a local distance matrix for use in dynamic time warping.\n \n Parameters\n ----------\n source : 2D array\n Source matrix with features in the second dimension.\n target : 2D array\n Target matrix with features in the second dimension.\n \n Returns\n -------\n 2D array\n Local distance matrix.\n \n \"\"\"\n \n sLen = source.shape[0]\n tLen = target.shape[0]\n distMat = zeros((sLen,tLen))\n for i in range(sLen):\n for j in range(tLen):\n distMat[i,j] = euclidean(source[i,:],target[j,:])\n return distMat\n\ndef regularDTW(distMat,norm=True):\n \"\"\"Use a local distance matrix to perform dynamic time warping.\n \n Parameters\n ----------\n distMat : 2D array\n Local distance matrix.\n \n Returns\n -------\n float\n Total unweighted distance of the optimal path through the\n local distance matrix.\n \n \"\"\"\n sLen,tLen = distMat.shape\n totalDistance = zeros((sLen,tLen))\n totalDistance[0:sLen,0:tLen] = distMat\n \n minDirection = zeros((sLen,tLen))\n \n for i in range(1,sLen):\n totalDistance[i,0] = totalDistance[i,0] + totalDistance[i-1,0]\n \n for j in range(1,tLen):\n totalDistance[0,j] = totalDistance[0,j] + totalDistance[0,j-1]\n \n \n \n for i in range(1,sLen):\n for j in range(1,tLen):\n #direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1))\n #totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance\n #minDirection[i,j] = direction\n minDirection[i,j],totalDistance[i,j] = min(enumerate([totalDistance[i-1,j-1] + 2*totalDistance[i,j],\n totalDistance[i-1,j] + totalDistance[i,j],\n totalDistance[i,j-1] + totalDistance[i,j]]), key=operator.itemgetter(1))\n if norm:\n return totalDistance[sLen-1,tLen-1] / (sLen+tLen)\n return totalDistance[sLen-1,tLen-1]\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-5921349442274469000,"string":"-5,921,349,442,274,469,000"},"line_mean":{"kind":"number","value":31.5376344086,"string":"31.537634"},"line_max":{"kind":"number","value":147,"string":"147"},"alpha_frac":{"kind":"number","value":0.6057501652,"string":"0.60575"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109422,"cells":{"repo_name":{"kind":"string","value":"expert360/cfn-params"},"path":{"kind":"string","value":"tests/test_resolution.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2629"},"content":{"kind":"string","value":"import collections\nimport unittest\n\nimport cfnparams.exceptions\nimport cfnparams.resolution\n\n\nBotoCfnStack = collections.namedtuple(\n 'BotoCfnStack',\n ['stack_id', 'stack_name', 'outputs', 'tags']\n)\n\nStackOutput = collections.namedtuple('Output', ['key', 'value'])\n\n\nclass MockStrategy(object):\n def __init__(self, stacks):\n self.stacks = stacks\n\n def __call__(self, cfn, name):\n for stack in self.stacks:\n if stack.stack_name == name:\n yield cfnparams.resolution.Stack(stack)\n\n\nclass ResolverTestCase(unittest.TestCase):\n def setUp(self):\n stacks = [\n BotoCfnStack(\n '1',\n 'example',\n [StackOutput('Foo', 'foo')],\n {}\n ),\n BotoCfnStack(\n '2',\n 'foo',\n [StackOutput('Foo', 'staging'), StackOutput('Bar', 'bar')],\n {'Environment': 'staging'}\n ),\n BotoCfnStack(\n '2',\n 'foo',\n [StackOutput('Foo', 'production'), StackOutput('Baz', 'baz')],\n {'Environment': 'production'}\n ),\n ]\n self.strategy = MockStrategy(stacks)\n\n def test_no_filters(self):\n resolver = cfnparams.resolution.Resolver(None, self.strategy, {})\n self.assertEqual(resolver('example', 'Foo'), 'foo')\n self.assertEqual(resolver('foo', 'Bar'), 'bar')\n self.assertEqual(resolver('foo', 'Baz'), 'baz')\n\n # undefined behaviour, but should not raise an exception\n self.assertIsNotNone(resolver('foo', 'Foo'))\n\n with self.assertRaises(cfnparams.exceptions.ResolutionError):\n resolver('example', 'NotPresent')\n\n def test_with_filter(self):\n resolver = cfnparams.resolution.Resolver(\n None,\n self.strategy,\n {'Environment': 'staging'}\n )\n self.assertEqual(resolver('foo', 'Foo'), 'staging')\n self.assertEqual(resolver('foo', 'Bar'), 'bar')\n with self.assertRaises(cfnparams.exceptions.ResolutionError):\n resolver('foo', 'Baz')\n with self.assertRaises(cfnparams.exceptions.ResolutionError):\n resolver('example', 'Foo')\n\n resolver = cfnparams.resolution.Resolver(\n None,\n self.strategy,\n {'Environment': 'production'}\n )\n self.assertEqual(resolver('foo', 'Foo'), 'production')\n self.assertEqual(resolver('foo', 'Baz'), 'baz')\n with self.assertRaises(cfnparams.exceptions.ResolutionError):\n resolver('foo', 'Bar')\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-4560765489366899000,"string":"-4,560,765,489,366,899,000"},"line_mean":{"kind":"number","value":30.6746987952,"string":"30.674699"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.5542031191,"string":"0.554203"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109423,"cells":{"repo_name":{"kind":"string","value":"calpaterson/recall"},"path":{"kind":"string","value":"src/recall/bookmarks.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5227"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n# Recall is a program for storing bookmarks of different things\n# Copyright (C) 2012 Cal Paterson\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\nfrom urllib.parse import unquote\n\nfrom bottle import abort, request, Bottle, response\n\nfrom recall import convenience as conv\nfrom recall import (\n plugins,\n search,\n data,\n jobs,\n )\n\nfrom bs4 import BeautifulSoup\n\nlogger = conv.logger(\"bookmarks\")\n\napp = Bottle()\napp.install(plugins.ppjson)\napp.install(plugins.auth)\napp.install(plugins.cors)\napp.install(plugins.exceptions)\napp.error_handler = plugins.handler_dict\n\n@app.post(\"//public//\")\ndef add_public(who, when, user):\n if \"~\" not in request.json or \"@\" not in request.json:\n abort(400, \"You must include @ and ~ with all bookmarks\")\n if request.json[\"@\"] != who or who != user[\"email\"]:\n abort(400, \"You may only add bookmarks as yourself\")\n if request.json[\"~\"] != int(when):\n abort(400, \"You must use the same time in the bookmark as you post to\")\n if data.has_problematic_keys(request.json):\n abort(400, \"Bookmarks must not have keys prefixed with $ or £\")\n request.json[\"£created\"] = conv.unixtime()\n conv.db().bookmarks.insert(request.json)\n del request.json[\"_id\"]\n jobs.enqueue(search.IndexRecord(request.json), priority=1)\n response.status = 202\n\n@app.post(\"//private//\")\ndef add_private(who, when, user):\n request.json[\"%private\"] = True\n add_public(who, when, user)\n\n@app.get(\"/public/\")\ndef public_bookmarks():\n query = search.SearchQueryBuilder()\n if \"q\" in request.params:\n query.with_keywords(request.params[\"q\"])\n query.anonymously()\n total, results = search.search(query)\n response.set_header(\"X-Recall-Total\", total)\n if results == []:\n response.status = 404\n data.strip_generated_keys(results)\n return results\n\n@app.get(\"//all/\")\ndef user_all_bookmarks(who, user):\n if who != user[\"email\"]:\n abort(400, \"You may only look at your own bookmarks\")\n query = search.SearchQueryBuilder()\n if \"q\" in request.params:\n query.with_keywords(request.params[\"q\"])\n query.as_user(user)\n total, results = search.search(query)\n if results == []:\n response.status = 404\n data.strip_generated_keys(results)\n return results\n\n@app.route(\"//\", method=\"POST\")\ndef import_(who, user):\n soup = BeautifulSoup(request.body)\n if soup.contents[0] != \"NETSCAPE-Bookmark-file-1\":\n abort(400, \"You must send a bookmark file with the doctype \" +\n \" 'NETSCAPE-Bookmarks-file-1'\")\n anchors = soup.find_all(\"a\")\n bookmarks = []\n add_dates = set()\n for anchor in anchors:\n bookmark = {\n \"~\": int(anchor.attrs.get(\"add_date\", conv.unixtime()))\n }\n while bookmark[\"~\"] in add_dates:\n bookmark[\"~\"] += 1\n add_dates.add(bookmark[\"~\"])\n bookmark[\"hyperlink\"] = anchor.attrs[\"href\"]\n if bookmark[\"hyperlink\"].startswith(\"place\"):\n continue\n bookmark[\"title\"] = anchor.string\n bookmark[\"@\"] = user[\"email\"]\n bookmark[\"%private\"] = True\n bookmark[\"£created\"] = conv.unixtime()\n bookmarks.append(bookmark)\n for each in bookmarks:\n conv.db().eachs.insert(each)\n del each[\"_id\"]\n jobs.enqueue(search.IndexRecord(each), priority=1)\n response.status = 202\n\n@app.get(\"//all/recent/\")\ndef recent(who, user):\n if who != user[\"email\"]:\n abort(400, \"You may only look at your own bookmarks\")\n total, hits = search.search(search.SearchQueryBuilder()\n .sort_by_when()\n .of_size(75)\n .as_user(user)\n .only_user(user))\n response.set_header(\"X-Recall-Total\", total)\n data.strip_generated_keys(hits)\n return hits\n\n@app.get(\"//url/\")\ndef url(who, url_encoded, user):\n if who != user[\"email\"]:\n abort(400, \"You may only look at your own bookmarks\")\n url_decoded = unquote(url_encoded)\n query = search.SearchQueryBuilder().of_size(1).as_user(user)\n query.the_url(url_decoded)\n total, hits = search.search(query)\n if total > 0:\n return hits\n else:\n response.status(404)\n\n#### NOT IMPLEMENTED:\n\n@app.get(\"//public/\")\ndef user_public_bookmarks(unused_who):\n abort(501)\n\n# @app.post(\"///edits//=5.1.1',\n 'bitstring>=3.0.2',\n 'python-patterns>=0.0.1',\n ],\n author='RokuSigma Inc.',\n author_email='haiku-lang@monetize.io',\n url='http://www.github.com/monetizeio/haiku-lang/',\n download_url='http://pypi.python.org/packages/source/h/haiku-lang/haiku-lang-%s.tar.gz' % version,\n package_dir={'haiku': 'haiku'},\n packages=packages,\n package_data={'haiku': data_files},\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: Other/Proprietary License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Lisp',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Interpreters',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Utilities',\n ],\n)\n\n# ===----------------------------------------------------------------------===\n# End of File\n# ===----------------------------------------------------------------------===\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":3591831845850132500,"string":"3,591,831,845,850,132,500"},"line_mean":{"kind":"number","value":38.1682242991,"string":"38.168224"},"line_max":{"kind":"number","value":100,"string":"100"},"alpha_frac":{"kind":"number","value":0.6475781436,"string":"0.647578"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109426,"cells":{"repo_name":{"kind":"string","value":"viaict/viaduct"},"path":{"kind":"string","value":"app/repository/custom_form_repository.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3977"},"content":{"kind":"string","value":"from app import db\nfrom app.models.activity import Activity\nfrom app.models.custom_form import CustomFormFollower, CustomForm, \\\n CustomFormResult\n\nfilter_unarchived = db.or_(CustomForm.archived == False,\n CustomForm.archived == None) # noqa\nfilter_active = db.or_(Activity.id == None,\n db.and_(Activity.id != None,\n db.func.now() < Activity.end_time)) # noqa\n\nfilter_archived_inactive = db.or_(CustomForm.archived == True,\n db.and_(Activity.id != None,\n db.func.now() >= Activity.end_time)) # noqa\n\n\ndef get_active_followed_forms_by_user(user_id, group_ids):\n \"\"\"\n Get all forms followed by the user.\n\n Filter archived forms and forms with activities in the past.\n \"\"\"\n q = db.session.query(CustomForm) \\\n .outerjoin(Activity, CustomForm.id == Activity.form_id) \\\n .filter(CustomFormFollower.query\n .filter(CustomForm.id == CustomFormFollower.form_id,\n CustomFormFollower.owner_id == user_id)\n .exists(),\n filter_unarchived, filter_active)\n\n if group_ids is not None:\n q = q.filter(CustomForm.group_id.in_(group_ids))\n\n return q.order_by(CustomForm.id.desc()) \\\n .all()\n\n\ndef get_active_unfollowed_by_user(user_id, group_ids):\n \"\"\"\n Get all active forms not followed by the user.\n\n Filter archived forms and forms with activities in the past.\n \"\"\"\n q = db.session.query(CustomForm) \\\n .outerjoin(Activity, CustomForm.id == Activity.form_id) \\\n .filter(db.not_(CustomFormFollower.query\n .filter(CustomForm.id == CustomFormFollower.form_id,\n CustomFormFollower.owner_id == user_id)\n .exists()),\n filter_unarchived, filter_active)\n\n if group_ids is not None:\n q = q.filter(CustomForm.group_id.in_(group_ids))\n\n return q.order_by(CustomForm.id.desc()) \\\n .all()\n\n\ndef get_inactive_forms(group_ids):\n \"\"\"Get all inactive or ssarchived forms.\"\"\"\n q = db.session.query(CustomForm) \\\n .outerjoin(Activity, CustomForm.id == Activity.form_id) \\\n .filter(filter_archived_inactive)\n\n if group_ids is not None:\n q = q.filter(CustomForm.group_id.in_(group_ids))\n\n return q.order_by(CustomForm.id.desc()) \\\n .all()\n\n\ndef get_form_entries_by_form_id(form_id):\n return db.session.query(CustomFormResult) \\\n .filter(CustomFormResult.form_id == form_id) \\\n .order_by(CustomFormResult.created) \\\n .all()\n\n\ndef get_form_by_form_id(form_id):\n return db.session.query(CustomForm) \\\n .filter(CustomForm.id == form_id) \\\n .one_or_none()\n\n\ndef get_form_submission_by_id(form_id, submit_id):\n return db.session.query(CustomFormResult) \\\n .filter(CustomFormResult.id == submit_id,\n CustomFormResult.form_id == form_id) \\\n .one_or_none()\n\n\ndef get_form_submission_by_user_id(form_id, user_id):\n return db.session.query(CustomFormResult) \\\n .filter(CustomFormResult.owner_id == user_id,\n CustomFormResult.form_id == form_id) \\\n .one_or_none()\n\n\ndef get_form_following_by_user_id(form, user_id):\n return db.session.query(CustomFormFollower) \\\n .filter(CustomFormFollower.form == form,\n CustomFormFollower.owner_id == user_id) \\\n .one_or_none()\n\n\ndef delete_form_follow(follower):\n print(follower)\n db.session.delete(follower)\n db.session.commit()\n\n\ndef follow_form(form, user_id):\n cf = CustomFormFollower(form=form, owner_id=user_id)\n db.session.add(cf)\n db.session.commit()\n\n\ndef form_set_archive_status(form, archived):\n form.archived = archived\n db.session.commit()\n\n\ndef form_set_paid_status(submission, paid):\n submission.has_paid = not submission.has_paid\n db.session.commit()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":7212193541876776000,"string":"7,212,193,541,876,776,000"},"line_mean":{"kind":"number","value":30.816,"string":"30.816"},"line_max":{"kind":"number","value":86,"string":"86"},"alpha_frac":{"kind":"number","value":0.6110133266,"string":"0.611013"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109427,"cells":{"repo_name":{"kind":"string","value":"ashtonmv/twod_materials"},"path":{"kind":"string","value":"examples/pbe_bandstructure.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1676"},"content":{"kind":"string","value":"\"\"\"\nRelaxes 2D materials in all subdirectories of the current working\ndirectory, along with their most stable competing species. At a\nspecified INTERVAL, checks if all relaxations have converged. Once all\nare converged, calculates and plots the formation energies of all 2D\nmaterials as stability_plot.pdf.\n\"\"\"\nfrom __future__ import print_function, division, unicode_literals\n\nimport os\n\nimport time\n\nfrom twod_materials.utils import is_converged\nfrom twod_materials.electronic_structure.startup import (\n run_linemode_calculation\n )\nfrom twod_materials.electronic_structure.analysis import (\n plot_normal_band_structure\n )\n\nINTERVAL = 360 # Seconds between convergence checks\n\ndirectories = [dir for dir in os.listdir(os.getcwd()) if os.path.isdir(dir)\n and dir not in ['all_competitors']]\n\nif __name__ == '__main__':\n\n for directory in directories:\n os.chdir(directory)\n run_linemode_calculation()\n os.chdir('../')\n\n loop = True\n while loop:\n print('>> Checking convergence')\n finished = []\n\n for directory in directories:\n if is_converged('{}/pbe_bands'.format(directory)):\n finished.append(directory)\n\n if len(finished) == len(directories):\n print('>> Plotting band structures')\n for directory in finished:\n os.chdir('{}/pbe_bands'.format(directory))\n plot_normal_band_structure()\n os.chdir('../../')\n loop = False\n else:\n print('>> Not all directories converged ({}/{})'.format(\n len(finished), len(directories)))\n\n time.sleep(INTERVAL)\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":513743030525387700,"string":"513,743,030,525,387,700"},"line_mean":{"kind":"number","value":30.037037037,"string":"30.037037"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.6366348449,"string":"0.636635"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109428,"cells":{"repo_name":{"kind":"string","value":"RedhawkSDR/framework-codegen"},"path":{"kind":"string","value":"redhawk/codegen/jinja/cpp/component/pull/mapping.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2959"},"content":{"kind":"string","value":"#\n# This file is protected by Copyright. Please refer to the COPYRIGHT file\n# distributed with this source distribution.\n#\n# This file is part of REDHAWK core.\n#\n# REDHAWK core is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation, either version 3 of the License, or (at your option) any\n# later version.\n#\n# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n#\n\nfrom redhawk.codegen.model.softwarecomponent import ComponentTypes\nfrom redhawk.codegen.lang.idl import IDLInterface\n\nfrom redhawk.codegen.jinja.cpp.component.base import BaseComponentMapper\n\nclass PullComponentMapper(BaseComponentMapper):\n def _mapComponent(self, softpkg):\n cppcomp = {}\n cppcomp['baseclass'] = self.baseClass(softpkg)\n cppcomp['userclass'] = self.userClass(softpkg)\n cppcomp['superclasses'] = self.superClasses(softpkg)\n cppcomp['interfacedeps'] = tuple(self.getInterfaceDependencies(softpkg))\n cppcomp['hasmultioutport'] = self.hasMultioutPort(softpkg)\n return cppcomp\n\n @staticmethod\n def userClass(softpkg):\n return {'name' : softpkg.basename()+'_i',\n 'header': softpkg.basename()+'.h',\n 'file' : softpkg.basename()+'.cpp'}\n\n @staticmethod\n def baseClass(softpkg):\n baseclass = softpkg.basename() + '_base'\n return {'name' : baseclass,\n 'header': baseclass+'.h',\n 'file' : baseclass+'.cpp'}\n\n @staticmethod\n def superClasses(softpkg):\n if softpkg.type() == ComponentTypes.RESOURCE:\n name = 'Component'\n elif softpkg.type() == ComponentTypes.DEVICE:\n name = 'Device_impl'\n aggregate = 'virtual POA_CF::AggregatePlainDevice'\n elif softpkg.type() == ComponentTypes.LOADABLEDEVICE:\n name = 'LoadableDevice_impl'\n aggregate = 'virtual POA_CF::AggregateLoadableDevice'\n elif softpkg.type() == ComponentTypes.EXECUTABLEDEVICE:\n name = 'ExecutableDevice_impl'\n aggregate = 'virtual POA_CF::AggregateExecutableDevice'\n else:\n raise ValueError, 'Unsupported software component type', softpkg.type()\n classes = [{'name': name, 'header': ''}]\n if softpkg.descriptor().supports('IDL:CF/AggregateDevice:1.0'):\n classes.append({'name': aggregate, 'header': ''})\n classes.append({'name': 'AggregateDevice_impl', 'header': ''})\n return classes\n"},"license":{"kind":"string","value":"lgpl-3.0"},"hash":{"kind":"number","value":454499221827718600,"string":"454,499,221,827,718,600"},"line_mean":{"kind":"number","value":42.5147058824,"string":"42.514706"},"line_max":{"kind":"number","value":104,"string":"104"},"alpha_frac":{"kind":"number","value":0.6708347415,"string":"0.670835"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109429,"cells":{"repo_name":{"kind":"string","value":"ohld/miptnews"},"path":{"kind":"string","value":"objs/rssparser.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1446"},"content":{"kind":"string","value":"import ssl\nimport time\nimport binascii\nimport feedparser\nfrom tqdm import tqdm\n\nfrom objs.news import News\n\n\ndef conv_to_rss(link):\n if \"vk.com\" in link:\n group = link[link.find(\"vk.com\") + 7:]\n return \"http://feed.exileed.com/vk/feed/%s\" % (group + \"?count=5\")\n return link\n\n\nclass RssParser(object):\n \"\"\"\n Класс для парсинга RSS-канала.\n Выделяет из общей информации только интереующие нас поля: Заголовок, ссылку, дату публикации.\n \"\"\"\n\n def __init__(self, config_links):\n self.links = [conv_to_rss(config_links[i]) for i in config_links]\n self.news = []\n\n def refresh(self):\n self.news = []\n for link in tqdm(self.links, desc=\"Getting news\"):\n data = 0\n if hasattr(ssl, '_create_unverified_context'):\n ssl._create_default_https_context = ssl._create_unverified_context\n data = feedparser.parse(link)\n self.news += [News(binascii.b2a_base64(data['feed']['title'].replace(' VK feed', '').encode()).decode(),\n binascii.b2a_base64(entry['link'].encode()).decode(),\n int(time.mktime(entry['published_parsed']))) for entry in data['entries']]\n time.sleep(1)\n\n def __repr__(self):\n return \"\" % (self.link, len(self.news))\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-8413308440841147000,"string":"-8,413,308,440,841,147,000"},"line_mean":{"kind":"number","value":32.65,"string":"32.65"},"line_max":{"kind":"number","value":116,"string":"116"},"alpha_frac":{"kind":"number","value":0.5787518574,"string":"0.578752"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109430,"cells":{"repo_name":{"kind":"string","value":"tudelft3d/val3dity"},"path":{"kind":"string","value":"tools/python/gml2poly/geomtools.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"10595"},"content":{"kind":"string","value":"\n# val3dity - Copyright (c) 2011-2016, Hugo Ledoux. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the authors nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL HUGO LEDOUX BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n\n\nimport math\nimport random\n\nTOLERANCE = 1e-3\n\nclass Point:\n def __init__(self, cx=0.0, cy=0, cz=0, cid=0):\n self.x = float(cx)\n self.y = float(cy)\n self.z = float(cz)\n self.id = int(cid)\n def __repr__(self):\n return str(self.x) + \" \" + str(self.y) + \" \" + str(self.z)\n def str_poly(self):\n return str(self.id) + \" \" + \"{:f}\".format(self.x) + \" \" + \"{:f}\".format(self.y) + \" \" + \"{:f}\".format(self.z)\n def str_off(self):\n return \"{:f}\".format(self.x) + \" \" + \"{:f}\".format(self.y) + \" \" + \"{:f}\".format(self.z)\n def __str__(self):\n return \"{:f}\".format(self.x) + \",\" + \"{:f}\".format(self.y) + \",\" + \"{:f}\".format(self.z)\n def __getitem__(self, index):\n if index < 0 or index > 2:\n raise Exception(\"out of bound for Point access.\")\n if index == 0:\n return self.x\n if index == 1:\n return self.y\n if index == 2:\n return self.z\n def __eq__(self, other):\n if (cmp_doubles(self.x, other.x) == 0 and \n cmp_doubles(self.y, other.y) == 0 and\n cmp_doubles(self.z, other.z) == 0 ):\n return True\n else:\n return False\n def __ne__(self, other):\n return not self.__eq__(other)\n def __neg__(self):\n self.x = -(self.x)\n self.y = -(self.y)\n self.z = -(self.z)\n return self\n def cmp_one_dim(self, other, dim):\n if dim == 'x':\n return cmp_doubles(self.x, other.x)\n if dim == 'y':\n return cmp_doubles(self.y, other.y)\n if dim == 'z':\n return cmp_doubles(self.z, other.z)\n def distance_to(self, other):\n return math.sqrt( pow(other.x - self.x, 2) +\n pow(other.y - self.y, 2) + \n pow(other.z - self.z, 2) ) \n def distance_to_proj(self, other, m, n):\n return math.sqrt( pow(other[m] - self[m], 2) +\n pow(other[n] - self[n], 2) )\n def translate_random(self, dist):\n a = random.uniform(-dist, dist)\n self.x += a\n self.y += a\n self.z += a\n def copy(self):\n return Point(self.x, self.y, self.z)\n\n \nclass Vector(Point):\n def __str__(self):\n return \"VECTOR(\" + str(self.x) + \" \" + str(self.y) + \" \" + str(self.z) + \")\"\n def __add__(self, o):\n r = Vector()\n r.x = self.x + o.x\n r.y = self.y + o.y\n r.z = self.z + o.z\n return r\n def set_vector(self, a, b):\n self.x = b.x - a.x\n self.y = b.y - a.y\n self.z = b.z - a.z \n def length(self):\n return math.sqrt((self.x * self.x) +\n (self.y * self.y) +\n (self.z * self.z))\n def cross_product(self, o):\n r = Vector()\n r.x = (self.y * o.z) - (self.z * o.y)\n r.y = -( (self.x * o.z) - (self.z * o.x) )\n r.z = (self.x * o.y) - (self.y * o.x)\n return r\n def dot_product(self, o):\n return (self.x * o.x) + (self.y * o.y) + (self.z * o.z) \n def normalise(self):\n length = self.length()\n #-- raise an if the lenght of the vector is 0 (or near 0)\n if abs(length) < TOLERANCE:\n 1.0 / 0 #-- will raise a ZeroDivisionError :)\n else:\n self.x = self.x / length\n self.y = self.y / length\n self.z = self.z / length\n return self\n \n \ndef get_projection_plane(p1, p2, p3):\n normal = geomtools.get_normal_rhr(p1, p2, p3)\n #-- check if the plane if vertical, and assign the projection plane\n m = 0 #-- xy plane\n n = 1\n if geomtools.cmp_doubles(normal.z, 0.0) == 0:\n if geomtools.cmp_doubles(normal.y, 0.0) == 00:\n m = 1 #-- yz plane\n n = 2\n else:\n m = 0 #-- xz plane\n n = 2 \n return m, n\n \n\ndef cmp_doubles(a, b):\n if abs(a-b) <= TOLERANCE:\n return 0\n else:\n if a - b > 0:\n return 1\n else:\n return -1\n \n \ndef orient2D(a, b, p):\n \"\"\" \n Determine if a Point_25 pt is above or below the plane defined by a-b-c (anti-clockwise order)\n \n Input: a,b,c : the Point_25 in anti-clockwise order\n p : the point to test\n\n Output: 1 -> pt is BELOW of the plane (OK for left-hand rule)\n 0 -> 4 points are coplanar\n -1 -> pt is ABOVE of the plane (NOT OK for left-hand rule)\n\n Note: \"above and below\" means when looking from above;\n or when using the left-hand rule\n \"\"\"\n re = det3x3t(a, b, p)\n if abs(re) < TOLERANCE:\n return 0\n elif re > 0:\n return 1\n else:\n return -1\n\n \ndef orient2D_proj(a, b, p, m, n):\n \"\"\" \n Determine if a Point_25 pt is above or below the plane defined by a-b-c (anti-clockwise order)\n \n Input: a,b,c : the Point_25 in anti-clockwise order\n p : the point to test\n\n Output: 1 -> pt is BELOW of the plane (OK for left-hand rule)\n 0 -> 4 points are coplanar\n -1 -> pt is ABOVE of the plane (NOT OK for left-hand rule)\n\n Note: \"above and below\" means when looking from above;\n or when using the left-hand rule\n \"\"\"\n re = det3x3t_expand(a[m], a[n], b[m], b[n], p[m], p[n])\n if abs(re) < TOLERANCE:\n return 0\n elif re > 0:\n return 1\n else:\n return -1 \n \n \ndef det3x3t(a, b, c):\n at = Point(a.x - c.x, a.y - c.y)\n bt = Point(b.x - c.x, b.y - c.y)\n return (at.x * bt.y) - (at.y * bt.x)\n\n\ndef det3x3t_expand(ax, ay, bx, by, cx, cy):\n at = Point(ax - cx, ay - cy)\n bt = Point(bx - cx, by - cy)\n return (at.x * bt.y) - (at.y * bt.x)\n\n\ndef det3x3_point(a, b, c):\n return det3x3(a.x, a.y, a.z, \n b.x, b.y, b.z, \n c.x, c.y, c.z)\n\ndef det3x3(ax, ay, az, bx, by, bz, cx, cy, cz):\n temp1 = ax * (by * cz - bz * cy)\n temp2 = ay * (bx * cz - bz * cx)\n temp3 = az * (bx * cy - by * cx)\n return temp1 - temp2 + temp3\n\n\ndef det4x4t(a, b, c, d):\n return det3x3(a.x-d.x, a.y-d.y, a.z-d.z, \n b.x-d.x, b.y-d.y, b.z-d.z,\n c.x-d.x, c.y-d.y, c.z-d.z) \n\n \ndef orient3D(a, b, c, p):\n \"\"\" \n Determine if a Point p is above or below the plane defined by the Points\n abc (in anti-clockwise order looking from above)\n \n Input: a,b,c : the Points in anti-clockwise order looking from above\n p : the Point to test\n\n Output: 1 -> pt is BELOW of the plane (OK for left-hand rule)\n 0 -> 4 points are coplanar\n -1 -> pt is ABOVE of the plane (NOT OK for left-hand rule)\n\n Note: \"above and below\" means when looking from above;\n or when using the left-hand rule\n \"\"\"\n re = det4x4t(a, b, c, p)\n if abs(re) < TOLERANCE:\n return 0\n elif re > 0:\n return 1\n else:\n return -1\n\n\n#def get_area_triangle(a, b, c):\n# \"\"\"Area of triangle (projected on the 2D plane.)\n# \n# Input: a,b,c: the Points\n# Ouput: value of area\n# \n# \"\"\"\n# return abs(det3x3t(a, b, c) / 2)\n\n\ndef get_volume_tetra(a, b, c, d):\n \"\"\"Volume of a tetrahedron.\"\"\"\n return abs(det4x4t(a, b, c, d) / 6)\n\n\ndef get_normal_rhr(a, b, c):\n \"\"\" \n Return the normal Vector to the 3 Points a-b-c, acc. to a right-hand rule.\n If a-b-c are CCW viewed from above, then the Vector points above\n \n Input: a,b,c : the Points in anti-clockwise order looking from above\n\n Output: the Vector\n \n \"\"\"\n v1 = Vector(b.x - a.x, b.y - a.y, b.z - a.z)\n v2 = Vector(c.x - a.x, c.y - a.y, c.z - a.z)\n return v1.cross_product(v2)\n\n\ndef point_in_triangle_3D(a, b, c, p, m, n):\n re = True\n oTr = orient2D_proj(a, b, c, m, n)\n if ((orient2D_proj(a, b, p, m, n) != oTr) or\n (orient2D_proj(b, c, p, m, n) != oTr) or\n (orient2D_proj(c, a, p, m, n) != oTr)): \n re = False\n return re\n \n\ndef get_midpoint_of_segment(a, b):\n mid = Point()\n mid.x = (a.x + b.x) / 2.0\n mid.y = (a.y + b.y) / 2.0\n mid.z = (a.z + b.z) / 2.0\n return mid\n\n\ndef point_in_tetra(a, b, c, d, p):\n otetra = orient3D(a, b, c, d)\n assert(otetra != 0)\n if ( (orient3D(a, b, c, p) == otetra) and\n (orient3D(b, d, c, p) == otetra) and\n (orient3D(c, d, a, p) == otetra) and\n (orient3D(d, b, a, p) == otetra) ):\n return True\n else:\n return False\n \n \n oTr = orient2D_proj(a, b, c, m, n)\n if ((orient2D_proj(a, b, p, m, n) != oTr) or\n (orient2D_proj(b, c, p, m, n) != oTr) or\n (orient2D_proj(c, a, p, m, n) != oTr)): \n re = False\n return re\n\ndef intersection_plane_segment(ring, a, b):\n pass \n \n \ndef get_circumsphere(a, b, c, d):\n pass\n \n \n \n \n \n\nif __name__ == \"__main__\":\n p1 = Point(3, 0, 1)\n print p1[0], p1.x\n print p1[-1]\n \n# p1 = Point(1.0, 2.00000000001, 3)\n# p2 = Point(1, 2, 3)\n# print p1 == p2\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":7664310428974760000,"string":"7,664,310,428,974,760,000"},"line_mean":{"kind":"number","value":30.2536873156,"string":"30.253687"},"line_max":{"kind":"number","value":117,"string":"117"},"alpha_frac":{"kind":"number","value":0.5220386975,"string":"0.522039"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109431,"cells":{"repo_name":{"kind":"string","value":"opendatateam/udata"},"path":{"kind":"string","value":"udata/tests/frontend/test_topic_frontend.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2471"},"content":{"kind":"string","value":"from udata import search\nfrom udata.tests import SearchTestMixin, TestCase\nfrom udata.core.dataset.factories import VisibleDatasetFactory\nfrom udata.core.dataset.search import DatasetSearch\nfrom udata.core.topic.factories import TopicFactory\nfrom udata.core.topic.search import topic_search_for\n\n\nclass TopicSearchTest(SearchTestMixin, TestCase):\n def test_empty_search_no_match(self):\n '''Should return no result if no data match the tags'''\n with self.autoindex():\n VisibleDatasetFactory.create_batch(2, tags=['whatever'])\n topic = TopicFactory(tags=['no-match'])\n\n query = topic_search_for(topic, DatasetSearch)\n result = search.query(query)\n\n self.assertEqual(len(result), 0)\n\n def test_empty_search_with_match(self):\n '''Should only return data with at least one tag'''\n with self.autoindex():\n included = VisibleDatasetFactory.create_batch(2, tags=['in'])\n excluded = VisibleDatasetFactory.create_batch(2, tags=['out'])\n topic = TopicFactory(tags=['in', 'no-match'])\n\n query = topic_search_for(topic, DatasetSearch)\n result = search.query(query)\n\n found = [d.id for d in result]\n\n self.assertEqual(len(found), 2)\n\n for dataset in included:\n self.assertIn(dataset.id, found)\n for dataset in excluded:\n self.assertNotIn(dataset.id, found)\n\n def test_empty_search_with_filter_and_match(self):\n '''Should match both the topic criteria and the query'''\n with self.autoindex():\n # Match both the topic condition but the queried tag\n match = VisibleDatasetFactory.create_batch(2, tags=[\n 'in', 'filtered'\n ])\n # Match the topic condition but not the queried tag\n no_match = VisibleDatasetFactory.create_batch(2, tags=['in'])\n # Excluded because not matching one of the topic tag\n excluded = VisibleDatasetFactory.create_batch(2, tags=[\n 'out', 'filtered'\n ])\n topic = TopicFactory(tags=['in', 'no-match'])\n\n query = topic_search_for(topic, DatasetSearch, tag='filtered')\n result = search.query(query)\n\n found = [d.id for d in result]\n\n self.assertEqual(len(found), 2)\n\n for dataset in match:\n self.assertIn(dataset.id, found)\n for dataset in no_match + excluded:\n self.assertNotIn(dataset.id, found)\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":911704196157743100,"string":"911,704,196,157,743,100"},"line_mean":{"kind":"number","value":37.0153846154,"string":"37.015385"},"line_max":{"kind":"number","value":74,"string":"74"},"alpha_frac":{"kind":"number","value":0.634965601,"string":"0.634966"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109432,"cells":{"repo_name":{"kind":"string","value":"mysql/mysql-utilities"},"path":{"kind":"string","value":"mysql-test/t/export_exclude.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8611"},"content":{"kind":"string","value":"#\n# Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#\n\n\"\"\"\nexport_exclude test.\n\"\"\"\n\nimport os\n\nimport export_parameters_def\n\nfrom mysql.utilities.exception import MUTLibError\n\n\nclass test(export_parameters_def.test):\n \"\"\"check exclude parameter for export utility\n This test executes a series of export database operations on a single\n server using a variety of exclude options. It uses the\n export_parameters_def test as a parent for setup and teardown methods.\n \"\"\"\n\n def check_prerequisites(self):\n return export_parameters_def.test.check_prerequisites(self)\n\n def setup(self, spawn_servers=True):\n return export_parameters_def.test.setup(self)\n\n def run(self):\n self.res_fname = \"result.txt\"\n\n from_conn = \"--server={0}\".format(\n self.build_connection_string(self.server1))\n\n cmd_str = (\"mysqldbexport.py --skip=events,grants --no-headers {0} \"\n \"--format=CSV util_test --skip-gtid\".format(from_conn))\n\n test_num = 1\n comment = \"Test case {0} - exclude by name.\".format(test_num)\n cmd_opts = (\"{0} --exclude=util_test.v1 \"\n \"--exclude=util_test.t4\".format(cmd_str))\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude by name using \"\n \"backticks.\".format(test_num))\n if os.name == 'posix':\n cmd_opts = (\"{0} --exclude='`util_test`.`v1`' \"\n \"--exclude='`util_test`.`t4`'\".format(cmd_str))\n else:\n cmd_opts = ('{0} --exclude=\"`util_test`.`v1`\" '\n '--exclude=\"`util_test`.`t4`\"'.format(cmd_str))\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude using SQL LIKE \"\n \"pattern #1.\".format(test_num))\n cmd_opts = \"{0} -x f% -x _4\".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude using SQL LIKE \"\n \"pattern #2.\".format(test_num))\n cmd_opts = \"{0} -x util_test.t%\".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude using SQL LIKE \"\n \"pattern #3.\".format(test_num))\n cmd_opts = \"{0} -x %\".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude using REGEXP \"\n \"pattern.\".format(test_num))\n cmd_opts = \"{0} -x ^f -x 4$ --regexp\".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude by name and SQL LIKE \"\n \"pattern.\".format(test_num))\n cmd_opts = (\"{0} --exclude=f% --exclude=_4 -x p% --exclude=v1 \"\n \"--exclude=util_test.trg\".format(cmd_str))\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude by name and REGEXP \"\n \"pattern.\".format(test_num))\n cmd_opts = (\"{0} --exclude=^f --exclude=4$ -x ^p --exclude=v1 \"\n \"--exclude=util_test.trg --regexp\".format(cmd_str))\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude everything using SQL LIKE \"\n \"pattern.\".format(test_num))\n cmd_opts = \"{0} -x % \".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - exclude everything using REGEXP \"\n \"pattern.\".format(test_num))\n if os.name == 'posix':\n cmd_opts = \"{0} -x '.*' --regexp\".format(cmd_str)\n else:\n cmd_opts = '{0} -x \".*\" --regexp'.format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n # Note: Unlike SQL LIKE pattern that matches the entire value, with a\n # SQL REGEXP pattern match succeeds if the pattern matches anywhere in\n # the value being tested.\n # See: http://dev.mysql.com/doc/en/pattern-matching.html\n test_num += 1\n comment = (\"Test case {0}a - SQL LIKE VS REGEXP pattern (match entire \"\n \"value VS match anywhere in value).\".format(test_num))\n cmd_opts = \"{0} -x 1 -x t\".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n comment = (\"Test case {0}b - SQL LIKE VS REGEXP pattern (match entire \"\n \"value VS match anywhere in value).\".format(test_num))\n cmd_opts = \"{0} -x 1 -x t --regexp\".format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - try exclude everything without using \"\n \"pattern.\".format(test_num))\n if os.name == 'posix':\n cmd_opts = \"{0} -x 'u*' \".format(cmd_str)\n else:\n cmd_opts = '{0} -x \"u*\" '.format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n test_num += 1\n comment = (\"Test case {0} - try exclude everything with using \"\n \"pattern and regexp.\".format(test_num))\n if os.name == 'posix':\n cmd_opts = \"{0} -x 'u*' --regexp\".format(cmd_str)\n else:\n cmd_opts = '{0} -x \"u*\" --regexp'.format(cmd_str)\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n export_parameters_def.test._mask_csv(self)\n\n test_num += 1\n comment = (\"Test case {0} - exclude by name using --export=both.\"\n \"\".format(test_num))\n cmd_opts = (\"{0} --export=both \"\n \"--exclude=util_test.t1 --exclude=util_test.t2 \"\n \"--exclude=util_test.t3 --exclude=util_test.t4 \"\n \"\".format(cmd_str))\n res = self.run_test_case(0, cmd_opts, comment)\n if not res:\n raise MUTLibError(\"{0}: failed\".format(comment))\n\n # Mask known source.\n self.replace_result(\"# Source on localhost: ... connected.\",\n \"# Source on XXXX-XXXX: ... connected.\\n\")\n self.replace_result(\"# Source on [::1]: ... connected.\",\n \"# Source on XXXX-XXXX: ... connected.\\n\")\n # Mask GTID warning when servers with GTID enabled are used\n self.remove_result(\"# WARNING: The server supports GTIDs but you\")\n\n return True\n\n def get_result(self):\n return self.compare(__name__, self.results)\n\n def record(self):\n return self.save_result_file(__name__, self.results)\n\n def cleanup(self):\n return export_parameters_def.test.cleanup(self)\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":4174144891480443000,"string":"4,174,144,891,480,443,000"},"line_mean":{"kind":"number","value":39.6179245283,"string":"39.617925"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5605620718,"string":"0.560562"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109433,"cells":{"repo_name":{"kind":"string","value":"scottidler/gimport"},"path":{"kind":"string","value":"gimport.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6213"},"content":{"kind":"string","value":"#!/usr/bin/env python2.7\n\n# to use gimport: use wget or curl to download the gimport.py file locally\n#ie. os.system('wget -q https://github.com/scottidler/gimport/raw/master/gimport.py -O gimport.py')\n\nimport os\nimport re\nimport imp\nimport sys\nimport contextlib\nfrom subprocess import Popen, PIPE\n\nsys.dont_write_bytecode = True\n\nclass RepospecDecompositionError(Exception):\n '''\n exception when repospec can't be decomposed\n '''\n pass\n\n@contextlib.contextmanager\ndef cd(*args, **kwargs):\n '''\n helper change dir function to be used with 'with' expressions\n '''\n mkdir = kwargs.pop('mkdir', True)\n verbose = kwargs.pop('verbose', False)\n path = os.path.sep.join(args)\n path = os.path.normpath(path)\n path = os.path.expanduser(path)\n prev = os.getcwd()\n if path != prev:\n if mkdir:\n run('mkdir -p %(path)s' % locals(), verbose=verbose)\n os.chdir(path)\n curr = os.getcwd()\n sys.path.append(curr)\n if verbose:\n print 'cd %s' % curr\n try:\n yield\n finally:\n if path != prev:\n sys.path.remove(curr)\n os.chdir(prev)\n if verbose:\n print 'cd %s' % prev\n\ndef run(*args, **kwargs):\n '''\n thin wrapper around Popen; returns exitcode, stdout and stderr\n '''\n nerf = kwargs.pop('nerf', False)\n shell = kwargs.pop('shell', True)\n verbose = kwargs.pop('verbose', False)\n if (verbose or nerf) and args[0]:\n print args[0]\n if nerf:\n return (None, 'nerfed', 'nerfed')\n process = Popen(shell=shell, *args, **kwargs)\n stdout, stderr = process.communicate()\n exitcode = process.poll()\n if verbose and stdout:\n print stdout\n return exitcode, stdout, stderr\n\ndef expand(path):\n '''\n converts ~ -> /home/%{USER}\n '''\n if path:\n return os.path.expanduser(path)\n\ndef decompose(repospec, giturl=None):\n '''\n decompoes repospec into giturl, sep, reponame and revision\n '''\n pattern = r'(((((ssh|https)://)?([a-zA-Z0-9_.\\-]+@)?)([a-zA-Z0-9_.\\-]+))([:/]{1,2}))?([a-zA-Z0-9_.\\-\\/]+)@?([a-zA-Z0-9_.\\-\\/]+)?'\n match = re.search(pattern, repospec)\n if match:\n return match.group(2) or giturl, match.group(8), match.group(9), match.group(10) or 'HEAD'\n raise RepospecDecompositionError(repospec)\n\ndef divine(giturl, sep, reponame, revision):\n '''\n divines refname and commit from supplied args\n '''\n r2c = {} # revisions to commits\n c2r = {} # commits to revisions\n result = run('git ls-remote %(giturl)s%(sep)s%(reponame)s' % locals(), stdout=PIPE)[1].strip()\n for line in result.split('\\n'):\n commit, refname = line.split('\\t')\n r2c[refname] = commit\n c2r[commit] = refname\n\n refnames = [\n 'refs/heads/' + revision,\n 'refs/tags/' + revision,\n revision\n ]\n\n commit = None\n for refname in refnames:\n commit = r2c.get(refname, None)\n if commit:\n break\n\n if not commit:\n commit = revision\n\n return c2r.get(commit, None), commit\n\ndef clone(giturl, sep, reponame, commit, cachepath, mirrorpath, versioning):\n '''\n wraps clone command with mirroring and caching\n '''\n mirror = ''\n if mirrorpath:\n mirror = '--reference %(mirrorpath)s/%(reponame)s.git' % locals()\n path = os.path.join(cachepath, reponame)\n repopath = reponame\n if versioning:\n repopath = os.path.join(repopath, commit)\n with cd(cachepath, mkdir=True):\n if not os.path.isdir(commit):\n run('git clone %(mirror)s %(giturl)s%(sep)s%(reponame)s %(repopath)s' % locals(), stdout=PIPE, stderr=PIPE)\n with cd(repopath):\n run('git clean -x -f -d', stdout=PIPE, stderr=PIPE)\n run('git checkout %(commit)s' % locals(), stdout=PIPE, stderr=PIPE)\n return os.path.join(cachepath, repopath)\n\ndef rmtree(path, empties=False):\n '''\n removes a folder path\n '''\n try:\n if empties:\n run('rmdir ' + path)\n else:\n run('rm -rf ' + path)\n dpath = os.path.dirname(path)\n if dpath:\n return rmtree(dpath)\n return path\n except:\n return path\n\ndef gimport(repospec, filepath, giturl=None, imports=None, cachepath='.gimport', mirrorpath=None, versioning=True, persist=False):\n '''\n main function alows user to import code from a git url\n '''\n cachepath = expand(cachepath)\n mirrorpath = expand(mirrorpath)\n giturl, sep, reponame, revision = decompose(repospec, giturl)\n _, commit = divine(giturl, sep, reponame, revision)\n path = clone(giturl, sep, reponame, commit, cachepath, mirrorpath, versioning)\n with cd(path):\n modname = os.path.splitext(os.path.basename(filepath))[0]\n module = imp.load_source(modname, filepath)\n if not persist:\n rmtree(path)\n\n if imports:\n return [module[import_] for import_ in imports]\n return module\n\ndef main():\n '''\n only provided as an easy way to test module; usually used via import\n '''\n try:\n import argparse\n except:\n print 'missing argparse; gimport.py can be used as a library without argparse installed'\n sys.exit(-1)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--cachepath',\n default='.gimport',\n help='path to store all gimport cached files')\n parser.add_argument(\n '--mirrorpath',\n help='path to cached repos to support fast cloning')\n parser.add_argument(\n '--imports',\n nargs='+',\n help='list of imports')\n parser.add_argument(\n '--giturl',\n help='the giturl to be used with git clone')\n parser.add_argument(\n '--no-versioning',\n action='store_false',\n dest='versioning',\n help='turn versioning off; checkout in reponame rather than reponame/commit')\n parser.add_argument(\n 'repospec',\n help='repospec schema is giturl?reponame@revision?')\n parser.add_argument(\n 'filepath',\n help='the filepath inside the git repo')\n ns = parser.parse_args()\n print gimport(**ns.__dict__)\n\n sys.exit(0)\n\nif __name__ == '__main__':\n main()\n\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":8092490262048937000,"string":"8,092,490,262,048,937,000"},"line_mean":{"kind":"number","value":28.5857142857,"string":"28.585714"},"line_max":{"kind":"number","value":133,"string":"133"},"alpha_frac":{"kind":"number","value":0.5992274264,"string":"0.599227"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109434,"cells":{"repo_name":{"kind":"string","value":"pybel/pybel"},"path":{"kind":"string","value":"src/pybel/struct/query/query.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6479"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"Query builder.\"\"\"\n\nimport json\nimport logging\nfrom typing import Dict, Iterable, List, Mapping, Optional, Set, TextIO, Union\n\nfrom .exc import QueryMissingNetworksError\nfrom .seeding import Seeding\nfrom ..pipeline import Pipeline\nfrom ...dsl import BaseEntity\n\n__all__ = [\n 'Query',\n]\n\nlogger = logging.getLogger(__name__)\n\n\nclass Query:\n \"\"\"Represents a query over a network store.\"\"\"\n\n def __init__(\n self,\n network_ids: Union[None, int, Iterable[int]] = None,\n seeding: Optional[Seeding] = None,\n pipeline: Optional[Pipeline] = None,\n ) -> None:\n \"\"\"Build a query.\n\n :param network_ids: Database network identifiers identifiers\n \"\"\"\n if not network_ids:\n self.network_ids = []\n elif isinstance(network_ids, int):\n self.network_ids = [network_ids]\n elif isinstance(network_ids, Iterable):\n network_ids = list(network_ids)\n\n for network_id in network_ids:\n if not isinstance(network_id, int):\n raise TypeError(network_ids)\n\n self.network_ids = network_ids\n else:\n raise TypeError(network_ids)\n\n if seeding is not None and not isinstance(seeding, Seeding):\n raise TypeError('Not a Seeding: {}'.format(seeding))\n self.seeding = seeding or Seeding()\n\n if pipeline is not None and not isinstance(pipeline, Pipeline):\n raise TypeError('Not a pipeline: {}'.format(pipeline))\n self.pipeline = pipeline or Pipeline()\n\n def append_network(self, network_id: int) -> 'Query':\n \"\"\"Add a network to this query.\n\n :param network_id: The database identifier of the network\n :returns: self for fluid API\n \"\"\"\n self.network_ids.append(network_id)\n return self\n\n def append_seeding_induction(self, nodes: Union[BaseEntity, List[BaseEntity], List[Dict]]) -> Seeding:\n \"\"\"Add a seed induction method.\n\n :returns: seeding container for fluid API\n \"\"\"\n return self.seeding.append_induction(nodes)\n\n def append_seeding_neighbors(self, nodes: Union[BaseEntity, List[BaseEntity], List[Dict]]) -> Seeding:\n \"\"\"Add a seed by neighbors.\n\n :returns: seeding container for fluid API\n \"\"\"\n return self.seeding.append_neighbors(nodes)\n\n def append_seeding_annotation(self, annotation: str, values: Set[str]) -> Seeding:\n \"\"\"Add a seed induction method for single annotation's values.\n\n :param annotation: The annotation to filter by\n :param values: The values of the annotation to keep\n \"\"\"\n return self.seeding.append_annotation(annotation, values)\n\n def append_seeding_sample(self, **kwargs) -> Seeding:\n \"\"\"Add seed induction methods.\n\n Kwargs can have ``number_edges`` or ``number_seed_nodes``.\n \"\"\"\n return self.seeding.append_sample(**kwargs)\n\n def append_pipeline(self, name, *args, **kwargs) -> Pipeline:\n \"\"\"Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`.\n\n :param name: The name of the function\n :type name: str or types.FunctionType\n :return: This pipeline for fluid query building\n \"\"\"\n return self.pipeline.append(name, *args, **kwargs)\n\n def __call__(self, manager):\n \"\"\"Run this query and returns the resulting BEL graph with :meth:`Query.run`.\n\n :param pybel.manager.Manager manager: A cache manager\n :rtype: Optional[pybel.BELGraph]\n \"\"\"\n return self.run(manager)\n\n def run(self, manager):\n \"\"\"Run this query and returns the resulting BEL graph.\n\n :param manager: A cache manager\n :rtype: Optional[pybel.BELGraph]\n \"\"\"\n universe = self._get_universe(manager)\n graph = self.seeding.run(universe)\n return self.pipeline.run(graph, universe=universe)\n\n def _get_universe(self, manager):\n if not self.network_ids:\n raise QueryMissingNetworksError('can not run query without network identifiers')\n\n logger.debug('query universe consists of networks: %s', self.network_ids)\n\n universe = manager.get_graph_by_ids(self.network_ids)\n logger.debug('query universe has %d nodes/%d edges', universe.number_of_nodes(), universe.number_of_edges())\n\n return universe\n\n def to_json(self) -> Dict:\n \"\"\"Return this query as a JSON object.\"\"\"\n rv = {\n 'network_ids': self.network_ids,\n }\n\n if self.seeding:\n rv['seeding'] = self.seeding.to_json()\n\n if self.pipeline:\n rv['pipeline'] = self.pipeline.to_json()\n\n return rv\n\n def dump(self, file: TextIO, **kwargs) -> None:\n \"\"\"Dump this query to a file as JSON.\"\"\"\n json.dump(self.to_json(), file, **kwargs)\n\n def dumps(self, **kwargs) -> str:\n \"\"\"Dump this query to a string as JSON.\"\"\"\n return json.dumps(self.to_json(), **kwargs)\n\n @staticmethod\n def from_json(data: Mapping) -> 'Query':\n \"\"\"Load a query from a JSON dictionary.\n\n :param data: A JSON dictionary\n :raises: QueryMissingNetworksError\n \"\"\"\n network_ids = data.get('network_ids')\n if network_ids is None:\n raise QueryMissingNetworksError('query JSON did not have key \"network_ids\"')\n\n seeding_data = data.get('seeding')\n seeding = (\n Seeding.from_json(seeding_data)\n if seeding_data is not None else\n None\n )\n\n pipeline_data = data.get('pipeline')\n pipeline = (\n Pipeline.from_json(pipeline_data)\n if pipeline_data is not None else\n None\n )\n\n return Query(\n network_ids=network_ids,\n seeding=seeding,\n pipeline=pipeline,\n )\n\n @staticmethod\n def load(file: TextIO) -> 'Query':\n \"\"\"Load a query from a JSON file.\n\n :raises: QueryMissingNetworksError\n \"\"\"\n return Query.from_json(json.load(file))\n\n @staticmethod\n def loads(s: str) -> 'Query':\n \"\"\"Load a query from a JSON string.\n\n :param s: A stringified JSON query\n :raises: QueryMissingNetworksError\n \"\"\"\n return Query.from_json(json.loads(s))\n\n def __str__(self):\n return 'Query(networks={}, seeding={}, pipeline={})'.format(self.network_ids, self.seeding, self.pipeline)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-7717907304301928000,"string":"-7,717,907,304,301,928,000"},"line_mean":{"kind":"number","value":30.7598039216,"string":"30.759804"},"line_max":{"kind":"number","value":116,"string":"116"},"alpha_frac":{"kind":"number","value":0.6045686063,"string":"0.604569"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109435,"cells":{"repo_name":{"kind":"string","value":"rsennrich/nematus"},"path":{"kind":"string","value":"nematus/translate.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2461"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\n\"\"\"Translates a source file using a translation model (or ensemble).\"\"\"\n\nimport argparse\nimport logging\n\nimport tensorflow as tf\n\nfrom config import load_config_from_json_file\nimport inference\nimport model_loader\nimport rnn_model\nfrom settings import TranslationSettings\nfrom transformer import Transformer as TransformerModel\n\n\ndef main(settings):\n \"\"\"\n Translates a source language file (or STDIN) into a target language file\n (or STDOUT).\n \"\"\"\n # Start logging.\n level = logging.DEBUG if settings.verbose else logging.INFO\n logging.basicConfig(level=level, format='%(levelname)s: %(message)s')\n\n # Create the TensorFlow session.\n tf_config = tf.ConfigProto()\n tf_config.allow_soft_placement = True\n session = tf.Session(config=tf_config)\n\n # Load config file for each model.\n configs = []\n for model in settings.models:\n config = load_config_from_json_file(model)\n setattr(config, 'reload', model)\n configs.append(config)\n\n # Create the model graphs and restore their variables.\n logging.debug(\"Loading models\\n\")\n models = []\n for i, config in enumerate(configs):\n with tf.variable_scope(\"model%d\" % i) as scope:\n if config.model_type == \"transformer\":\n model = TransformerModel(config)\n else:\n model = rnn_model.RNNModel(config)\n saver = model_loader.init_or_restore_variables(config, session,\n ensemble_scope=scope)\n models.append(model)\n\n # TODO Ensembling is currently only supported for RNNs, so if\n # TODO len(models) > 1 then check models are all rnn\n\n # Translate the source file.\n inference.translate_file(input_file=settings.input,\n output_file=settings.output,\n session=session,\n models=models,\n configs=configs,\n beam_size=settings.beam_size,\n nbest=settings.n_best,\n minibatch_size=settings.minibatch_size,\n maxibatch_size=settings.maxibatch_size,\n normalization_alpha=settings.normalization_alpha)\n\n\nif __name__ == \"__main__\":\n # Parse console arguments.\n settings = TranslationSettings(from_console_arguments=True)\n main(settings)\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-8753197665726532000,"string":"-8,753,197,665,726,532,000"},"line_mean":{"kind":"number","value":33.661971831,"string":"33.661972"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.6078829744,"string":"0.607883"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109436,"cells":{"repo_name":{"kind":"string","value":"cgvarela/fileserver"},"path":{"kind":"string","value":"kontalk/fileserver/auth.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4332"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"Authentication utilities.\"\"\"\n\"\"\"\n Kontalk Fileserver\n Copyright (C) 2015 Kontalk Devteam \n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\"\"\"\n\n\nfrom zope.interface import implements\n\nfrom twisted.web import iweb\nfrom twisted.cred import credentials, checkers, error, portal\nfrom twisted.python import failure\nfrom twisted.internet import defer\nfrom twisted.words.protocols.jabber import jid, sasl\n\nfrom gnutls.crypto import OpenPGPCertificate\nfrom OpenSSL.crypto import X509\n\nimport log\nimport keyring\n\n\nclass IKontalkCertificate(credentials.ICredentials):\n\n def check(fingerprint, kr, verify_cb=None):\n pass\n\n\nclass KontalkCertificate(object):\n implements(IKontalkCertificate)\n\n def __init__(self, cert):\n self.cert = cert\n\n def check(self, fingerprint, kr, verify_cb=None):\n _jid = None\n fpr = None\n\n if isinstance(self.cert, OpenPGPCertificate):\n uid = self.cert.uid(0)\n _jid = jid.JID(uid.email)\n fpr = self.cert.fingerprint\n\n elif isinstance(self.cert, X509):\n fpr = keyring.verify_certificate(self.cert, kr)\n if fpr:\n pkey = kr.get_key(fpr)\n uid = pkey.uids[0]\n if uid:\n _jid = jid.JID(uid.email)\n fpr = kr.check_user_key(pkey, _jid.user)\n if not fpr:\n _jid = None\n\n if _jid:\n def _continue(userjid):\n return userjid\n def _error(reason):\n return None\n\n # deferred to check fingerprint against JID cache data\n if verify_cb:\n d = verify_cb(_jid, fpr)\n d.addCallback(_continue)\n d.addErrback(_error)\n return d\n else:\n return _jid\n\n return None\n\n\nclass IKontalkToken(credentials.ICredentials):\n\n def check(fingerprint, kr, verify_cb):\n pass\n\n\nclass KontalkToken(object):\n implements(IKontalkToken)\n\n def __init__(self, token, decode_b64=False):\n self.token = token\n self.decode_b64 = decode_b64\n\n def check(self, fingerprint, kr, verify_cb):\n try:\n if self.decode_b64:\n data = sasl.fromBase64(self.token)\n else:\n data = self.token\n\n return kr.check_token(data)\n except:\n # TODO logging or throw exception back\n import traceback\n traceback.print_exc()\n log.debug(\"token verification failed!\")\n\n\nclass AuthKontalkChecker(object):\n implements(checkers.ICredentialsChecker)\n\n credentialInterfaces = IKontalkToken, IKontalkCertificate\n\n def __init__(self, fingerprint, kr, verify_cb=None):\n self.fingerprint = str(fingerprint)\n self.keyring = kr\n self.verify_cb = verify_cb\n\n def _cbTokenValid(self, userid):\n if userid:\n return userid\n else:\n return failure.Failure(error.UnauthorizedLogin())\n\n def requestAvatarId(self, credentials):\n return defer.maybeDeferred(\n credentials.check, self.fingerprint, self.keyring, self.verify_cb).addCallback(\n self._cbTokenValid)\n\n\nclass AuthKontalkTokenFactory(object):\n implements(iweb.ICredentialFactory)\n\n scheme = 'kontalktoken'\n\n def __init__(self, fingerprint, kr):\n self.fingerprint = fingerprint\n self.keyring = kr\n\n def getChallenge(self, request):\n return {}\n\n def decode(self, response, request):\n key, token = response.split('=', 1)\n if key == 'auth':\n return KontalkToken(token, True)\n\n raise error.LoginFailed('Invalid token')\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":7967290889403946000,"string":"7,967,290,889,403,946,000"},"line_mean":{"kind":"number","value":27.1298701299,"string":"27.12987"},"line_max":{"kind":"number","value":91,"string":"91"},"alpha_frac":{"kind":"number","value":0.622345337,"string":"0.622345"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109437,"cells":{"repo_name":{"kind":"string","value":"killthekitten/kaggle-carvana-2017"},"path":{"kind":"string","value":"find_bounding_boxes.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2471"},"content":{"kind":"string","value":"import numpy as np\nimport pandas as pd\nfrom scipy import ndimage\nimport os\nfrom params import args\n\nMARGIN = 64\n\n\ndef find_slices(mask_img):\n mask = mask_img > 100\n label_im, nb_labels = ndimage.label(mask)\n # Find the largest connect component\n sizes = ndimage.sum(mask, label_im, range(nb_labels + 1))\n mask_size = sizes < 50000\n remove_pixel = mask_size[label_im]\n label_im[remove_pixel] = 0\n labels = np.unique(label_im)\n label_im = np.searchsorted(labels, label_im)\n # Now that we have only one connect component, extract it's bounding box\n slice_y, slice_x = ndimage.find_objects(label_im == 1)[0]\n return slice_x, slice_y\n\n\ndef find_bounding_boxes():\n img_width = args.img_width\n img_height = args.img_height\n masks_dir = args.pred_mask_dir\n boxes = process_images(img_height, img_width, masks_dir)\n df = pd.DataFrame(boxes)\n df.to_csv(\"boxes.csv\", header=['filename', 'y_start', 'y_end', 'x_start', 'x_end'], index=False)\n\n\ndef process_images(img_height, img_width, masks_dir):\n boxes = []\n for i, filename in enumerate(sorted(os.listdir(masks_dir))):\n mask_img = ndimage.imread(os.path.join(masks_dir, filename), mode='L')\n expanded = np.zeros((1280, 1920), dtype=mask_img.dtype)\n expanded[:, 1:-1] = mask_img\n mask_img = expanded\n slice_x, slice_y = find_slices(mask_img)\n # we should expand by at least 32px + ceil to closest divisible 32\n x_start = max(slice_x.start - MARGIN, 0)\n x_end = min(slice_x.stop + MARGIN, img_width)\n y_start = max(slice_y.start - MARGIN, 0)\n y_end = min(slice_y.stop + MARGIN, img_height)\n\n bb_height = y_end - y_start\n bb_width = x_end - x_start\n\n if bb_width % MARGIN != 0:\n bb_width_expand = (bb_width // MARGIN + 1) * MARGIN\n x_start = min(x_start, max(0, x_start - MARGIN))\n x_end = x_start + bb_width_expand\n\n if bb_height % MARGIN != 0:\n bb_height_expand = (bb_height // MARGIN + 1) * MARGIN\n y_start = min(y_start, max(0, y_start - MARGIN))\n y_end = y_start + bb_height_expand\n assert (x_end - x_start) % MARGIN == 0\n assert (y_end - y_start) % MARGIN == 0\n\n boxes.append((filename[:-4] + \".jpg\", y_start, y_end, x_start, x_end))\n if i % 100 == 0:\n print(\"processed {} images\".format(i))\n return boxes\n\n\nif __name__ == '__main__':\n find_bounding_boxes()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":3856632177011683000,"string":"3,856,632,177,011,683,000"},"line_mean":{"kind":"number","value":34.3,"string":"34.3"},"line_max":{"kind":"number","value":100,"string":"100"},"alpha_frac":{"kind":"number","value":0.5985431,"string":"0.598543"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109438,"cells":{"repo_name":{"kind":"string","value":"saullocastro/pyNastran"},"path":{"kind":"string","value":"pyNastran/converters/tecplot/test_tecplot.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2061"},"content":{"kind":"string","value":"import os\n\nfrom pyNastran.converters.tecplot.tecplot import read_tecplot\nfrom pyNastran.converters.tecplot.tecplot_to_nastran import tecplot_to_nastran_filename\nfrom pyNastran.converters.nastran.nastran_to_tecplot import nastran_to_tecplot, nastran_to_tecplot_filename\nimport pyNastran\n\npkg_path = pyNastran.__path__[0]\nmodel_path = os.path.join(pkg_path, 'converters', 'tecplot', 'models')\nnastran_path = os.path.join(pkg_path, '..', 'models')\n\nimport unittest\n\nclass TestTecplot(unittest.TestCase):\n\n def test_tecplot_01(self):\n tecplot_filename1 = os.path.join(model_path, 'ascii', 'point_fetri_2d_02.dat')\n tecplot_filename2 = os.path.join(model_path, 'ascii', 'point_fetri_2d_02.dat_out')\n\n tecplot = read_tecplot(tecplot_filename1)\n #tecplot.write_tecplot(tecplot_filename2, res_types=None,\n #is_points=True, adjust_nids=True)\n #os.remove(tecplot_filename2)\n\n def test_tecplot_02(self):\n nastran_filename1 = os.path.join(nastran_path, 'solid_bending', 'solid_bending.bdf')\n nastran_filename2 = os.path.join(nastran_path, 'solid_bending', 'solid_bending2.bdf')\n tecplot_filename = os.path.join(nastran_path, 'solid_bending', 'solid_bending.plt')\n tecplot = nastran_to_tecplot_filename(nastran_filename1, tecplot_filename)\n #tecplot.write_tecplot(tecplot_filename)\n #tecplot_to_nastran_filename(tecplot_filename, nastran_filename2)\n #os.remove(nastran_filename2)\n #os.remove(tecplot_filename)\n\n def _test_tecplot_02(self):\n nastran_filename1 = os.path.join(nastran_path, 'solid_bending', 'solid_bending.bdf')\n nastran_filename2 = os.path.join(nastran_path, 'solid_bending', 'solid_bending2.bdf')\n tecplot_filename = os.path.join(nastran_path, 'solid_bending', 'solid_bending.plt')\n tecplot = nastran_to_tecplot_filename(nastran_filename1, tecplot_filename)\n tecplot_to_nastran_filename(tecplot_filename, nastran_filename2)\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n\n"},"license":{"kind":"string","value":"lgpl-3.0"},"hash":{"kind":"number","value":-7380779099647959000,"string":"-7,380,779,099,647,959,000"},"line_mean":{"kind":"number","value":44.8,"string":"44.8"},"line_max":{"kind":"number","value":107,"string":"107"},"alpha_frac":{"kind":"number","value":0.6972343523,"string":"0.697234"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109439,"cells":{"repo_name":{"kind":"string","value":"Balannen/LSMASOMM"},"path":{"kind":"string","value":"atom3/Kernel/UserInterface/popupMenuCreator.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"13756"},"content":{"kind":"string","value":"\"\"\"\r\npopupMenuCreator.py\r\n\r\nThis constructs context senstive menus that present only relevent information\r\nto the user depending on the state of the canvas.\r\nIn order to make this fast & intuitive, most of the actual implementations of \r\nthe menu elements have been pushed into another file. \r\n\r\nCreated June 17, 2004 by Denis Dube\r\n\"\"\"\r\n\r\nfrom Tkinter import Menu, IntVar\r\nimport time\r\n\r\nfrom popupMenuElements import *\r\nfrom OptionDialog import OptionDialog\r\nfrom Embedded_Images import Embedded_Images\r\n\r\nclass PopupMenuCreator:\r\n \r\n def __init__(self, atom3i ):\r\n self.master = atom3i.parent\r\n self.atom3i = atom3i\r\n self.cb = atom3i.cb\r\n self.optionsDatabase = atom3i.optionsDatabase\r\n self.popupLogoPhotoimage = Embedded_Images().getPopupLogo()\r\n\r\n self.popupMenu = None\r\n self.event = None\r\n \r\n \r\n # --------------------------- Popup Utilities -------------------------------\r\n \r\n def initilizePopupMenu( self, event ):\r\n \"\"\" Create a new popup menu \"\"\"\r\n if( self.popupMenu ):\r\n self.popupMenu.unpost()\r\n self.popupMenu = Menu(self.master , tearoff=0, bg = \"white\")\r\n self.event = event\r\n \r\n def showPopupMenu( self ):\r\n \"\"\" Display the popup menu \"\"\"\r\n if( self.popupMenu ):\r\n self.popupMenu.post(self.event.x_root, self.event.y_root)\r\n \r\n def swapMenu(self, menu ):\r\n \"\"\" \r\n This is a fix for a problem that no longer exists :p\r\n It essentially takes one menu and slaps another one in its place.\r\n \"\"\"\r\n raise Exception, \"No one uses this method! But if you see this, maybe not so...\" \r\n self.popupMenu.unpost()\r\n self.popupMenu = menu\r\n self.showPopupMenu()\r\n \r\n def popupRemover(self):\r\n \"\"\" Goodbye popup! \"\"\"\r\n if( self.popupMenu ):\r\n self.popupMenu.unpost()\r\n self.popupMenu = None\r\n \r\n \r\n # ---------------------- Context Sensitive Menus -------------------------- \r\n \r\n def NoCursorNoSelectPopup( self,event ):\r\n \"\"\" Popup menu to show when no items under the mouse, and no items selected \"\"\"\r\n\r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addModelAction( self )\r\n addSelectAll( self ) \r\n addPaste( self )\r\n addUndo( self )\r\n addRedo( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addFileMenu( self )\r\n addModelMenu( self )\r\n addTransformationMenu( self )\r\n addLayoutMenu( self ) \r\n addExportMenu( self )\r\n #.........................\r\n addSeperator( self )\r\n #......................... \r\n addOpenLastModel( self )\r\n addOpenLastMetaModel(self)\r\n addSourcePath( self )\r\n #.........................\r\n addSeperator( self )\r\n #......................... \r\n addToggleSmoothMode( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addExit( self )\r\n \r\n self.showPopupMenu()\r\n \r\n def NoCursorMultiSelectPopup(self,event):\r\n \"\"\" Popup menu to show when no items under the mouse, and multiple items selected \"\"\"\r\n \r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addLayoutMenu( self )\r\n addResizeEntity( self )\r\n addNodeLabelDragToggle( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addSelectAll( self )\r\n addDeselectAll( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCut( self )\r\n addCopy( self )\r\n addPaste( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addUndo( self )\r\n addRedo( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addClear( self )\r\n \r\n self.showPopupMenu()\r\n \r\n def EntityAtCursorMultiSelectPopup(self,event):\r\n \"\"\" \r\n A graphical entity is under the mouse cursor, along with multiple\r\n selected items\r\n \"\"\"\r\n \r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addLayoutMenu( self )\r\n addEditEntity( self )\r\n addDragOverlap( self )\r\n addDrawArrow( self ) \r\n addResizeEntity( self ) \r\n addNodeLabelDragToggle( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addSelectAll( self )\r\n addDeselectAll( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCut( self )\r\n addCopy( self )\r\n addPaste( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCopyAttributes( self )\r\n addPasteAttributes( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addUndo( self )\r\n addRedo( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addClear( self )\r\n \r\n \r\n self.showPopupMenu()\r\n\r\n def EntityAtCursorNoSelectPopup(self,event):\r\n \"\"\" A graphical entity is under the mouse cursor, but no selected items \"\"\"\r\n \r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addEditEntity( self )\r\n addDragOverlap( self )\r\n addDrawArrow( self )\r\n addResizeEntity( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addSelectAll( self )\r\n addPaste( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCopyAttributes( self )\r\n addPasteAttributes( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addUndo( self )\r\n addRedo( self ) \r\n \r\n self.showPopupMenu()\r\n \r\n \r\n def LinkAtCursorMultiSelectPopup(self,event):\r\n \"\"\" \r\n A graphical link/connection is under the mouse cursor, along with multiple\r\n selected items\r\n \"\"\"\r\n \r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addLayoutMenu( self )\r\n addEditEntity( self )\r\n addDragOverlap( self )\r\n addArrowEditor( self )\r\n addResizeEntity( self ) \r\n addNodeLabelDragToggle( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addSmoothSelected( self )\r\n addToggleSmoothMode( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addSelectAll( self )\r\n addDeselectAll( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCut( self )\r\n addCopy( self )\r\n addPaste( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCopyAttributes( self )\r\n addPasteAttributes( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addUndo( self )\r\n addRedo( self ) \r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addClear( self )\r\n \r\n \r\n self.showPopupMenu()\r\n \r\n def LinkAtCursorNoSelectPopup(self,event):\r\n \"\"\" \r\n A graphical link/connection is under the mouse cursor, but there are no\r\n selected items\r\n \"\"\"\r\n \r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addEditEntity( self )\r\n addDragOverlap( self )\r\n addArrowEditor( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addSelectAll( self )\r\n addToggleSmoothMode( self )\r\n addPaste( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addCopyAttributes( self )\r\n addPasteAttributes( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addUndo( self )\r\n addRedo( self ) \r\n \r\n self.showPopupMenu()\r\n \r\n def ArrowEditorPopup(self,event):\r\n \"\"\" Menu for the arrow editor \"\"\"\r\n self.initilizePopupMenu( event )\r\n \r\n addLogo( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addEditEntity( self ) \r\n addInsertPoint( self )\r\n addDeletePoint( self )\r\n addSmoothSelected( self )\r\n addNodeLabelMoveToggle( self )\r\n #.........................\r\n addSeperator( self )\r\n #.........................\r\n addArrowEditorExit( self )\r\n \r\n self.showPopupMenu()\r\n \r\n # ----------------------- Popup a specific submenu -------------------------\r\n \r\n def LayoutPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n self.popupMenu = self.atom3i.layoutMenu\r\n self.showPopupMenu()\r\n \r\n def ExportPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n self.popupMenu = self.atom3i.exportMenu\r\n self.showPopupMenu()\r\n \r\n def ModelPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n self.popupMenu = self.atom3i.modelMenu\r\n self.showPopupMenu()\r\n \r\n def TransformationPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n self.popupMenu = self.atom3i.transMenu\r\n self.showPopupMenu()\r\n \r\n def FilePopup(self,event):\r\n self.initilizePopupMenu( event )\r\n self.popupMenu = self.atom3i.filemenu\r\n self.showPopupMenu()\r\n \r\n def LastModelPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n addOpenLastModelSubroutine( self, self.popupMenu )\r\n self.showPopupMenu()\r\n \r\n def LastMetaModelPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n addOpenLastMetaModelSubroutine( self, self.popupMenu )\r\n self.showPopupMenu()\r\n \r\n def SourcePathPopup(self,event):\r\n self.initilizePopupMenu( event )\r\n addSourcePathSubroutine( self, self.popupMenu )\r\n self.showPopupMenu()\r\n \r\n \r\n # ------------------------ String List to PopupMenu --------------------------------- \r\n \r\n def listChoicePopup(self, title, stringList, unused = None ):\r\n \"\"\" \r\n Creates a popup menu with radiobuttons labeled from the stringList.\r\n Returns the index of the label that was chosen.\r\n NOTE: choosing outside the popup implicitly chooses index 0\r\n \"\"\"\r\n \r\n # Remove any existing popups first\r\n self.popupRemover()\r\n \r\n self.popupMenu = Menu(self.master , tearoff=0)\r\n integerVar = IntVar()\r\n \r\n self.popupMenu.add_command( label=title, command=self.popupRemover )\r\n self.popupMenu.add_separator() \r\n \r\n i = 1\r\n for label in stringList:\r\n self.popupMenu.add_radiobutton( label=label, variable=integerVar,\r\n value=i,indicatoron=False ) \r\n i += 1\r\n \r\n # This gets the last known co-ordinates of the mouse :D\r\n # NOTE: We get co-ordinates in terms of canvas space, convert back into\r\n # screenspace first before using them...\r\n x,y = self.atom3i.cb.getLastClickCoord()\r\n dc = self.atom3i.cb.getCanvas()\r\n x,y = [x-dc.canvasx(0),y-dc.canvasy(0)]\r\n\r\n # These offsets place the menu just where I like it...\r\n x = int(x) +40 #+ 100\r\n y = int(y) +40 #+ 20\r\n \r\n # Posts the menu, and blocks program execution here on win32 only\r\n self.popupMenu.post( x,y )\r\n \r\n # Blocks program execution (all platforms) & waits for integerVar to be updated\r\n # Not ideal: If we close the popup without selecting anything this will\r\n # wait forever and execution will never get anywhere beyond this point!!!\r\n # Moreover: AToM3 will not shutdown properly!\r\n #self.master.wait_variable( integerVar )\r\n \r\n # THEORY: This will work whether or not the post() blocks or not\r\n # Practice: Works great on WinXP with Python 2.3\r\n # Linux?\r\n while( 1 ):\r\n self.master.update()\r\n value = integerVar.get() \r\n \r\n # Hapiness, we got the value we wanted\r\n if( value > 0 ): return value\r\n \r\n # The user killed the popup! O_O\r\n elif( self.popupMenu == None ): return 0\r\n \r\n # Unhapiness, the user avoided selecting anything\r\n elif( value == 0 ): \r\n self.popupMenu.unpost()\r\n self.popupMenu.post( x,y )\r\n self.master.update()\r\n \r\n time.sleep( 0.4 )\r\n \r\n return 0 # We won't get here, but just in case...\r\n \r\n def listChoicePopupAlternative(self, title, stringList, actionLabel ):\r\n \"\"\" OBSOLETE --- Delete this \"\"\"\r\n \r\n raise Exception, \"No one uses this method! But if you see this, maybe not so...\"\r\n \r\n \"\"\"\r\n optionList = [OptionDialog.BOOL_BUTTON_ENTRY,actionLabel]\r\n \r\n options = dict()\r\n optionOrder = list()\r\n for i in range(0,len(stringList)):\r\n options[i] = [False,optionList,stringList[i],'']\r\n optionOrder.append(i)\r\n i+=1\r\n \r\n dialog = OptionDialog(self.master, title, options,optionOrder, grab = False,\r\n position = self.atom3i.cb.getLastClickCoordInRootCoords() )\r\n \r\n if( dialog.isCanceled() ):\r\n return 0\r\n \r\n options = dialog.getOptionsDatabase()\r\n i = 1\r\n for option in optionOrder:\r\n if( options[option][0] ):\r\n return i\r\n i += 1 \r\n return 0\r\n \"\"\"\r\n \r\n \r\n "},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":2528474062664432600,"string":"2,528,474,062,664,432,600"},"line_mean":{"kind":"number","value":27.5870967742,"string":"27.587097"},"line_max":{"kind":"number","value":112,"string":"112"},"alpha_frac":{"kind":"number","value":0.5099592905,"string":"0.509959"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109440,"cells":{"repo_name":{"kind":"string","value":"luksan/kodos"},"path":{"kind":"string","value":"scripts/pyuicfix.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1631"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; truncate-lines: 0 -*-\n# vi: set fileencoding=utf-8 filetype=python expandtab tabstop=4 shiftwidth=4 softtabstop=4 cindent:\n# :mode=python:indentSize=4:tabSize=4:noTabs=true:\n\n\"\"\"\nthis should be invoked by a pyuic wrapper\nit looks for the arg after the -o cmd line flag\nwhich is used as the source AND destination file.\n\"\"\"\n\n#-----------------------------------------------------------------------------#\n# Built-in modules\n\nimport sys\nimport re\n\n#-----------------------------------------------------------------------------#\n\nfilename = None\nargs = sys.argv[1:]\n\nfor i in range(len(args)):\n arg = args[i]\n if arg == '-o':\n filename = args[i+1]\n break\n\nif not filename:\n print(\"Error: could not extract filename from: {0}\".format(args))\n sys.exit(0)\n\nfp = open(filename, \"r\")\npycode = fp.read()\nfp.close()\n\n\n# regex from Kodos (of course!)\nrx = re.compile(r\"\"\"self\\.clearWState\\(Qt\\.WState_Polished\\)\"\"\")\nrepl = \"\"\"try:\n self.clearWState(Qt.WState_Polished)\n except AttributeError:\n pass\n\"\"\"\npycode = rx.sub(repl, pycode)\n\n\nrx = re.compile(r\"\"\"\\.setAccel\\((?P.*)\"\"\")\npos = 0\nwhile 1:\n m = rx.search(pycode, pos)\n if not m: break\n pos = m.end()\n tr = m.group(1)\n pycode = pycode[:m.start()] + \\\n \".setAccel(QKeySequence(\" + \\\n tr + \\\n \")\" + \\\n pycode[m.end():]\n\n\nfp = open(filename, \"w\")\nfp.write(pycode)\nfp.close()\n\n#-----------------------------------------------------------------------------#\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-8801084592959030000,"string":"-8,801,084,592,959,030,000"},"line_mean":{"kind":"number","value":23.7121212121,"string":"23.712121"},"line_max":{"kind":"number","value":112,"string":"112"},"alpha_frac":{"kind":"number","value":0.5144083384,"string":"0.514408"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109441,"cells":{"repo_name":{"kind":"string","value":"vkoukis/pymatryoshka"},"path":{"kind":"string","value":"matryoshka/server.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"31315"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# PyMatryoshka: A VXLAN-over-UDP agent\n#\n# Copyright (c) 2012 Vangelis Koukis .\n#\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n\n\n\"\"\"Matryoshka: A VXLAN-over-UDP agent\"\"\"\n\nimport daemon\nimport daemon.pidlockfile\nimport errno\nimport fcntl\nimport logging\nimport logging.handlers\nimport os\nimport pyinotify\nimport select\nimport socket\nimport struct\nimport sys\nimport time\n\nfrom IPy import IP\n#from scapy.layers.l2 import Ether\nfrom signal import signal, siginterrupt, SIGTERM, SIGUSR1, SIGUSR2\n\nfrom vxlan import VXLAN\nfrom tuntap import VirtualTap\n\nDEFAULT_MCASTIF = \"\"\nDEFAULT_BINDADDR = \"\"\nDEFAULT_BINDPORT = 3601\nDEFAULT_STATEDIR = \"/var/lib/matryoshka\"\nDEFAULT_LOGDIR = \"/var/log/matryoshka\"\nDEFAULT_PIDFILE = \"/var/run/matryoshka/matryoshka.pid\"\n\nLOG_FILENAME = \"matryoshka.log\"\nLOG_FORMAT = \"%(asctime)-15s %(levelname)-6s %(message)s\"\n\nDEFAULT_MAC_TABLE_SIZE = 10000\n\n\nclass FileHandler(pyinotify.ProcessEvent):\n \"\"\"Handle pyinotify events from watching the state directory.\"\"\"\n def __init__(self, server):\n pyinotify.ProcessEvent.__init__(self)\n self.server = server\n\n def process_IN_DELETE(self, event):\n \"\"\"Handle deletion of file in the state directory.\n\n Whnever a file is deleted from the state directory,\n the server detaches itself from the associated virtual network.\n\n \"\"\"\n\n logging.debug(\"File %s deleted, detaching from virtual network\",\n event.name)\n self.server.detach_from_network((os.path.join(event.path, event.name)))\n return\n\n def process_IN_CLOSE_WRITE(self, event):\n \"\"\"Handle addition of file in the state directory.\n\n Whenever a file is added to the state directory,\n the server attaches itself to the associated virtual network.\n\n \"\"\"\n logging.debug(\"File %s added, attaching to virtual network\",\n event.name)\n self.server.attach_to_network((os.path.join(event.path, event.name)))\n return\n\n\nclass VirtualNetwork(object):\n \"\"\"A virtual network with MAC-to-VTEP learning functionality\"\"\"\n def __init__(self, vni, macttl, mactablesize=DEFAULT_MAC_TABLE_SIZE):\n self._macs = {}\n self.socket = None\n self.targetips = []\n\n self.vni = vni\n self.macttl = macttl\n self.mactablesize = mactablesize\n\n if not vni or not macttl:\n raise ValueError(\"vni and macttl arguments are mandatory\")\n\n def __repr__(self):\n return \"\" % (self.vni, self.macttl)\n\n def learn(self, mac, vtep):\n \"\"\"Learn a new mac address on endpoint vtep.\n\n Learn a new mac address on endpoint vtep, return True\n if the mac address is a new entry, False if the mac\n address was already known, so the existing entry gets\n a refreshed ttl.\n\n \"\"\"\n now = time.time()\n existing = mac in self._macs\n if not existing and len(self._macs) >= self.mactablesize:\n # Trigger cleaning of stale entries\n self.gc()\n if len(self._macs) >= self.mactablesize:\n raise MemoryError(\"Mac table size limit of %d reached for %r\" %\n (self.mactablesize, self))\n\n self._macs[mac] = (vtep, now + self.macttl)\n return not existing\n\n def lookup(self, mac):\n \"\"\"Lookup a MAC address, return VTEP if found, None otherwise\"\"\"\n now = time.time()\n entry = self._macs.get(mac, None)\n if not entry:\n return None\n if now > entry[1]:\n del self._macs[mac] # Remove stale entry\n return None\n return entry[0]\n\n def gc(self):\n \"\"\"Do garbage collection, flush all expired entries in MAC table\"\"\"\n now = time.time()\n for m in self._macs.keys():\n if now > self._macs[m][1]:\n del self._macs[m]\n\n\ndef _parse_network_file(path, family):\n \"\"\"Read virtual network information from file\"\"\"\n try:\n ifile = open(path, \"r\")\n except IOError as ioe:\n logging.error(\"Unable to open network file %s: %s\", path, ioe)\n return None\n\n try:\n vals = {}\n lcnt = 0\n for line in ifile:\n lcnt += 1\n # Lines are of the form \"key = val\", keys are converted\n # to all lowercase, lines starting with '#' are ignored.\n if not line.strip() or line.strip().startswith(\"#\"):\n continue\n (key, val) = [s.strip() for s in line.strip().split(\"=\", 1)]\n vals[key.lower()] = val\n except ValueError as ve:\n logging.error(\"Cannot parse line %d in %s using 'key=val' format: %s\",\n lcnt, path, ve)\n return None\n\n # Report on missing and unknown keys\n keys = [\"tapname\", \"vni\", \"macttl\", \"targetip\", \"targetport\"]\n unknown_keys = set(vals.keys()) - set(keys)\n missing_keys = set(keys) - set(vals.keys())\n if unknown_keys:\n logging.error(\"Unknown keys specified in network file %s: %s\",\n path, \", \".join(unknown_keys))\n return None\n if missing_keys:\n logging.error(\"Required keys missing from network file %s: %s\",\n path, \", \".join(missing_keys))\n return None\n\n try:\n vals[\"vni\"] = int(vals[\"vni\"])\n vals[\"macttl\"] = float(vals[\"macttl\"])\n targetip = IP(vals[\"targetip\"])\n if (targetip.version() == 4 and family != socket.AF_INET or\n targetip.version() == 6 and family != socket.AF_INET6):\n msg = (\"Cannot specify IPv%d IP in TARGETIP when\"\n \" using %s\") % (targetip.version(), _family_name(family))\n raise ValueError(msg)\n vals[\"targetip\"] = str(targetip)\n vals[\"targetport\"] = int(vals[\"targetport\"])\n except ValueError as ve:\n logging.error(\"Validation failed for fields in %s: %s\",\n path, ve)\n return None\n\n if \"tapname\" in vals and vals[\"tapname\"] != os.path.basename(path):\n logging.error(\"Network file %s refers to tap interface %s\",\n path, vals[\"tapname\"])\n return None\n\n return vals\n\n\ndef _mac_is_multicast(mac):\n return int(mac.split(\":\")[0], 16) & 1 == 1\n\n\ndef _ip_is_multicast(ip):\n ip = IP(ip)\n if ip.version() == 4:\n return ip in IP(\"224.0.0.0/4\")\n else:\n return ip in IP(\"ff00::/8\")\n\n\ndef _family_name(family):\n d = {socket.AF_INET: \"IPv4\", socket.AF_INET6: \"IPv6\"}\n return d[family]\n\n\ndef _join_mcast_group(s, addr, ifname):\n logging.debug(\"Socket %s joining multicast group %s on ifname '%s'\",\n s.getsockname(), addr, ifname)\n\n # Set the TTL for outgoing IP multicast packets\n # A value of '1' means same subnet, see\n # http://tldp.org/HOWTO/Multicast-HOWTO-2.html.\n TTL = 1\n optval = struct.pack(\"@B\", TTL)\n if s.family == socket.AF_INET:\n s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, optval)\n else:\n s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, optval)\n\n # Disable looping of locally originating packets\n LOOP = 0\n optval = struct.pack(\"@B\", LOOP)\n if s.family == socket.AF_INET:\n s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, optval)\n else:\n s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, optval)\n\n # Subscribe the socket to the IP multicast group on interface ifname\n mcast_packed = socket.inet_pton(s.family, addr)\n if s.family == socket.AF_INET:\n optval = mcast_packed + struct.pack(\"!II\", socket.INADDR_ANY,\n _if_nametoindex(ifname))\n s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, optval)\n else:\n optval = mcast_packed + struct.pack(\"!I\", _if_nametoindex(ifname))\n s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, optval)\n\n logging.debug(\"Socket %s joined multicast group %s on ifname '%s'\",\n s.getsockname(), addr, ifname)\n\n\ndef _leave_mcast_group(s, addr, ifname):\n logging.debug(\"Socket %s leaving multicast group %s on ifname '%s'\",\n s.getsockname(), addr, ifname)\n\n # Unsubscribe socket from the IP multicast group\n mcast_packed = socket.inet_pton(s.family, addr)\n if s.family == socket.AF_INET:\n optval = mcast_packed + struct.pack(\"!II\", socket.INADDR_ANY,\n _if_nametoindex(ifname))\n s.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, optval)\n else:\n optval = mcast_packed + struct.pack(\"!I\", _if_nametoindex(ifname))\n s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, optval)\n\n logging.debug(\"Socket %s left multicast group %s on ifname '%s'\",\n s.getsockname(), addr, ifname)\n\n\n# Use ctypes to access libC's if_nametoindex().\n# We need if_nametoindex() to get the network interface index\n# to pass to IP_MULTICAST_IF/IPV6_MULTICAST_IF socket options.\nfrom ctypes import CDLL\n_libc = CDLL(\"libc.so.6\")\n\n\ndef _if_nametoindex(ifname):\n if not ifname:\n return 0\n i = _libc.if_nametoindex(ifname)\n if not i:\n raise ValueError(\"Invalid network interface name %s\" % ifname)\n return i\n\n\ndef _get_bound_udp_socket(family, addr, port, mcastif):\n \"\"\"Get a UDP socket of the requested family.\n\n The socket is IPv4/IPv6 based on the value of family,\n bound to addr:port. If addr=None, the socket is bound to 0.0.0.0,\n or ::, for IPv4 and IPv6 respectively.\n\n If mcastif is set, outgoing multicast traffic is sent over the network\n interface with name mcastif on the local host, e.g. eth0.\n\n The socket is also set to allow IP broadcasting.\n\n \"\"\"\n if not addr:\n addr = \"0.0.0.0\" if family == socket.AF_INET else \"::\"\n\n try:\n ip = IP(addr)\n except ValueError:\n logging.error(\"Not a valid IPv4 or IPv6 address: %s\", addr)\n return None\n if (ip.version() == 4 and family != socket.AF_INET or\n ip.version() == 6 and family != socket.AF_INET6):\n logging.error(\"Cannot bind to an IPv%d address when using %s\",\n ip.version(), _family_name(family))\n return None\n\n try:\n s = socket.socket(family, socket.SOCK_DGRAM, 0)\n if family == socket.AF_INET6:\n # Only bind for IPv6 traffic when using an IPv6 socket\n s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)\n s.bind((addr, port))\n except socket.error as msg:\n logging.error(\"Could not bind %s UDP socket on %s, port %d: %s\",\n _family_name(family), addr, port, msg)\n s.close()\n return None\n\n # Allow sending UDP datagrams to broadcast addresses\n try:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n except Exception as msg:\n logging.error(\"Could not set the SO_BROADCAST flag on socket: %s\",\n msg)\n s.close()\n return None\n\n # Set the outgoing interface for multicast traffic\n try:\n ifindex = _if_nametoindex(mcastif)\n if family == socket.AF_INET6:\n s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF,\n struct.pack(\"!I\", ifindex))\n else:\n s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF,\n struct.pack(\"!III\", 0, 0, ifindex))\n except Exception as msg:\n logging.error(\"Failed to set multicast interface to '%s': %s\",\n mcastif, msg)\n s.close()\n return None\n\n # Set the socket in non-blocking mode\n fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)\n\n return s\n\n\ndef sigterm_handler(signum, stack_frame):\n assert signum == SIGTERM\n logging.info(\"Caught SIGTERM, terminating...\")\n raise SystemExit\n\n\nsigusr1_proxy = None\ntracing = None\n\n\ndef sigusr12_handler(signum, stack_frame):\n global tracing\n assert signum == SIGUSR1 or signum == SIGUSR2\n if signum == SIGUSR1:\n logging.info(\"Caught SIGUSR1. Showing currnet proxy state:\")\n sigusr1_proxy.log_state()\n return\n if signum == SIGUSR2:\n tracing = not tracing\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG if tracing else logging.INFO)\n logging.info(\"Caught SIGUSR2, %s tracing\" %\n (\"enabling\" if tracing else \"disabling\"))\n return\n\n\nclass VXLANProxy(object):\n \"\"\"The main class implementing the Matryoshka VXLAN proxy.\"\"\"\n def _create_vnet(self, vni, macttl):\n vn = VirtualNetwork(vni=vni, macttl=macttl)\n if vni in self.vnet_vni_map:\n raise ValueError(\"VNI %s already in use for vnet %r\",\n self.vnet_vni_map[vni])\n self.vnet_vni_map[vni] = vn\n return vn\n\n def _vnet_from_vni(self, vni):\n return self.vnet_vni_map[vni]\n\n def _remove_vnet(self, vnet):\n msg = \"Removed vnet %r\" % vnet\n del self.vnet_vni_map[vnet.vni]\n del vnet\n logging.info(msg)\n\n def _attach_to_network(self, tapname, vni, macttl, targetip, targetport):\n if tapname in self.vnet_tapname_map:\n vnet = self.vnet_tapname_map[tapname]\n msg = (\"Ignoring network addition request for tapname %s,\"\n \" already in use for vnet %r\" % (tapname, vnet))\n raise ValueError(msg)\n tap = VirtualTap(name=tapname)\n tap.open()\n self.taps.append(tap)\n # Set tap in non-blocking mode\n fcntl.fcntl(tap, fcntl.F_SETFL, os.O_NONBLOCK)\n\n vn = self._create_vnet(vni=vni, macttl=macttl)\n vn.targets = [(targetip, int(targetport))]\n vn.socket = self.socket\n vn.tap = tap\n tap.vnet = vn\n self.vnet_tapname_map[tapname] = vn\n\n for t in vn.targets:\n if _ip_is_multicast(t[0]):\n _join_mcast_group(vn.socket, t[0], self.mcastif)\n\n logging.info(\"Joined new network, vnet %r over tap %r\",\n tap.vnet, tap)\n\n def _detach_from_network(self, tapname):\n try:\n vnet = self.vnet_tapname_map[tapname]\n except KeyError:\n logging.error(\"Ignoring request to detach from unknown tap %s\",\n tapname)\n return\n\n for t in vnet.targets:\n if _ip_is_multicast(t[0]):\n _leave_mcast_group(vnet.socket, t[0], self.mcastif)\n\n del self.vnet_tapname_map[tapname]\n self._close_tap(vnet.tap)\n self._remove_vnet(vnet)\n\n def _close_tap(self, tap):\n logging.debug(\"Closing tap %r\", tap)\n tap.close()\n self.taps.remove(tap)\n del tap\n\n def _handle_incoming_frame(self, tap):\n \"\"\"Handle reception of incoming Ethernet frame on tap iface.\"\"\"\n vnet = tap.vnet\n logging.debug(\"Incoming frame on tap %r, vnet %r\", tap, vnet)\n frame = os.read(tap.fileno(), 10000)\n if not frame:\n logging.error(\"EOF on read, removing tap %r\", tap)\n self._close_tap(tap)\n return\n\n # TODO: Learn source mac. If it's a new MAC,\n # broadcast the packet to all VTEPs, to force MAC table\n # update on migrations.\n\n # build VXLAN-encapsulated packet\n #ether = Ether(frame)\n #packet = VXLAN(VNI=vnet.vni) / ether\n vx = VXLAN(frame=frame, vni=vnet.vni)\n\n # lookup vtep address for target dst MAC,\n # broadcast to all known targets if it's a multicast MAC.\n targets = [None]\n if not _mac_is_multicast(vx.dst_mac):\n targets = [vnet.lookup(vx.dst_mac)]\n if targets[0] is None:\n targets = vnet.targets\n\n # send it over UDP\n # TODO: Hash ether's headers to get source UDP address\n s = vnet.socket\n for t in targets:\n buf = str(vx)\n logging.debug(\"Sending VXLAN packet of %d bytes to peer %s\",\n len(buf), t)\n # TODO: Set O_NONBLOCK everywhere, report EAGAIN errors\n s.sendto(buf, t)\n\n def _handle_incoming_packet(self, s):\n \"\"\"Handle reception of encapsulated Ethernet frame on UDP socket.\"\"\"\n logging.debug(\"Incoming packet on socket %s\", s.getsockname())\n (packet, srcvtep) = s.recvfrom(10000)\n if not packet:\n logging.error(\"Received zero-length packet from %s?!\", srcvtep)\n return\n logging.debug(\"Incoming packet of length %d from %s\",\n len(packet), srcvtep)\n try:\n # vxlan = VXLAN(packet)\n # vni = vxlan.VNI\n vx = VXLAN(packet=packet)\n vni = vx.vni\n except Exception as e:\n logging.error(\"Dropping malformed non-VXLAN packet: %s\", e)\n return\n try:\n vnet = self._vnet_from_vni(vni)\n except KeyError:\n logging.error(\"Dropping packet with unknown VNI = %d\", vni)\n return\n\n logging.debug(\"Incoming packet from %s, len = %d for vnet = %r\",\n srcvtep, len(packet), vnet)\n\n # ether = vxlan.getlayer(Ether)\n #logging.debug(\"Ether MACs: dst = %s, src = %s\", ether.dst, ether.src)\n logging.debug(\"Ether MACs: dst = %s, src = %s\", vx.dst_mac, vx.src_mac)\n if _mac_is_multicast(vx.src_mac):\n # Drop frames with multicast address as Ethernet source MAC.\n #\n # IEEE 802.3-2002, Section 3.2.3(b) says I/G (multicast) bit is\n # reserved for Ethernet src MACs, see\n # http://standards.ieee.org/getieee802/download/802.3-2002.pdf\n #\n # Also useful:\n # RFC 1812, Section 3.3.2 says a router MUST not believe any ARP\n # reply that claims that the Link Layer address of another host or\n # router is a broadcast or multicast address, but the MS load\n # balancer violates this rule.\n logging.warning(\"Dropping inner Ethernet frame with multicast src\")\n return\n else:\n logging.debug(\"About to learn source MAC %s, endpoint %s\",\n vx.src_mac, srcvtep)\n try:\n wasnew = vnet.learn(vx.src_mac, srcvtep)\n logging.debug(\"MAC was %s for vnet %r\",\n 'new' if wasnew else 'known', vnet)\n except MemoryError:\n logging.debug(\"Could not learn MAC, table for %r full\",\n vnet)\n try:\n logging.debug(\"Writing Ethernet frame of length %d to fd %d\",\n len(vx.frame), vnet.tap.fileno())\n # TODO: Set O_NONBLOCK everywhere\n n = os.write(vnet.tap.fileno(), vx.frame)\n if n != len(vx.frame):\n logging.warning(\"Short write: %d != %d to tap %r for vnet %r\",\n n, len(vx.frame), vnet.tap, vnet)\n except Exception as e:\n logging.error(\"Error writing frame to tap %r for vnet %r: %s\",\n vnet.tap, vnet, e)\n\n def __init__(self, family=socket.AF_INET,\n bindaddr=DEFAULT_BINDADDR,\n bindport=DEFAULT_BINDPORT,\n mcastif=DEFAULT_MCASTIF,\n statedir=DEFAULT_STATEDIR):\n self.taps = []\n self.sockets = []\n self.vnet_vni_map = {}\n self.vnet_tapname_map = {}\n\n self.family = family\n self.bindaddr = bindaddr\n self.bindport = bindport\n self.mcastif = mcastif\n self.statedir = statedir\n\n self.wm = pyinotify.WatchManager()\n mask = pyinotify.EventsCodes.ALL_FLAGS[\"IN_DELETE\"]\n mask |= pyinotify.EventsCodes.ALL_FLAGS[\"IN_CLOSE_WRITE\"]\n self.notifier = pyinotify.Notifier(self.wm, FileHandler(self))\n wdd = self.wm.add_watch(self.statedir, mask, rec=True)\n if wdd[self.statedir] < 0:\n raise Exception(\"Could not watch state directory %s\" %\n self.statedir)\n\n # Allocate a single listening UDP socket.\n self.socket = _get_bound_udp_socket(self.family,\n self.bindaddr, self.bindport,\n self.mcastif)\n if not self.socket:\n raise Exception(\"Could not get bound UDP socket\")\n self.sockets.append(self.socket)\n\n def attach_to_network(self, path):\n \"\"\"Attach to a new virtual network, get parameters from path.\n\n The basename of the path is used as the name of the tap interface\n used to attach to the virtual network on the local host.\n\n \"\"\"\n logging.info(\"Attaching to network for file %s\", path)\n tapname = os.path.basename(path)\n info = _parse_network_file(path, self.family)\n if not info:\n logging.error(\"Ignoring network file %s due to errors\", path)\n return\n if tapname != info[\"tapname\"]:\n raise ValueError(\"filename of %s does not match TAPNAME=%s\" %\n (tapname, info[\"tapname\"]))\n self._attach_to_network(**info)\n\n def detach_from_network(self, path):\n \"\"\"Detach from a virtual network.\n\n The basename of the path is used as the name of the tap interface\n used to determine which network to detach from.\n\n \"\"\"\n logging.info(\"Detaching from network for file %s\", path)\n tapname = os.path.basename(path)\n self._detach_from_network(tapname)\n\n def log_state(self):\n s = [\"%s\" % str(sock.getsockname()) for sock in self.sockets]\n t = [\"%r %r\" % (tap, tap.vnet) for tap in self.taps]\n logging.info(\"Current set of open sockets, %d entries: %s\",\n len(s), \", \".join(s))\n logging.info((\"Current set of tap interfaces, and associated virtual\"\n \" networks, %d entries: %s\"), len(t),\n \", \".join(t))\n logging.info(\"Current mapping of VNIs to virtual networks: %s\",\n repr(self.vnet_vni_map))\n logging.info((\"Current mapping of tap interface names to virtual\"\n \" networks: %s\"), repr(self.vnet_tapname_map))\n logging.info(\"MAC tables per virtual network:\")\n for v in self.vnet_vni_map.keys():\n vnet = self.vnet_vni_map[v]\n logging.info(\"vnet %r: %r\", vnet, vnet._macs)\n\n def serve(self):\n # Cheat: get pyinotify Watch Manager's fd directly\n wmfd = self.wm._fd\n while True:\n # Before blocking on select(), process any pyinotify\n # events which may have been queued up by previous\n # invocations of serve(), but may have been left\n # unprocessed due to premature termination of this method,\n # if exceptions were thrown.\n logging.debug(\"processing any left-over pyinotify events\")\n self.notifier.process_events()\n\n logging.debug(\"Waiting for input from %d sockets, %d taps\",\n len(self.sockets), len(self.taps))\n try:\n rdset = self.sockets + self.taps + [wmfd]\n rfds, wfds, excfds = select.select(rdset, [], [])\n except select.error as e:\n if e[0] == errno.EINTR:\n continue\n\n logging.debug(\"Woke up after select, r = (%s, %s, %s)\",\n rfds, wfds, excfds)\n\n for fd in rfds:\n assert fd in rdset\n assert not wfds\n assert not excfds\n\n for fd in rfds:\n if fd in self.sockets:\n logging.debug(\"Socket fd %d ready after select\",\n fd.fileno())\n self._handle_incoming_packet(fd)\n if fd in self.taps:\n logging.debug(\"Tap fd %d ready after select\",\n fd.fileno())\n self._handle_incoming_frame(fd)\n if fd == wmfd:\n self.notifier.read_events()\n self.notifier.process_events()\n\n\ndef parse_arguments(args):\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n\n description = \\\n (\"Matryoshka is a VXLAN encapsulation agent, and implements a VXLAN\\n\"\n \"Virtual Tunnel Endpoint (VTEP). It performs two main functions:\\n\"\n \"a) it receives Ethernet frames from local tap ifaces, encapsulates\\n\"\n \" them in VXLAN packets with a proper Virtual Network ID (VNI), \\n\"\n \" and forwards them to the right VTEP based on destination MAC,\\n\"\n \"b) it listens to a UDP port, receiving VXLAN-encapsulated Ethernet\\n\"\n \" frames, which it then forwards to the proper local tap device\\n\"\n \" based on the VNI of the incoming packet.\\n\\n\"\n \"Matryoshka watches a state directory for requests\\n\"\n \"to attach and detach from virtual networks dynamically.\")\n\n parser = ArgumentParser(description=description,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument(\"-p\", \"--port\", action=\"store\", dest=\"bindport\",\n default=DEFAULT_BINDPORT, metavar=\"PORT\",\n help=(\"Bind to UDP port PORT, default is %d\" %\n DEFAULT_BINDPORT))\n parser.add_argument(\"-6\", \"--ipv6\", action=\"store_const\", dest=\"ipfamily\",\n default=socket.AF_INET, const=socket.AF_INET6,\n help=\"Run over IPv6, default is to run over IPv4\")\n parser.add_argument(\"-i\", \"--mcastif\", action=\"store\", dest=\"mcastif\",\n default=DEFAULT_MCASTIF, metavar=\"IFNAME\",\n help=(\"Send outgoing multicast datagrams, and join\"\n \" multicast groups over the interface with name\"\n \" IFNAME (e.g., eth0) on the local host. If not\"\n \" specified, multicast traffic goes over the\"\n \" default interface for the system.\"))\n parser.add_argument(\"--bindaddr\", action=\"store\", dest=\"bindaddr\",\n default=DEFAULT_BINDADDR, metavar=\"ADDRESS\",\n help=(\"Bind to host interface with address ADDRESS,\"\n \" default is to bind to 0.0.0.0 or to ::, for\"\n \" IPv4/IPv6 respectively. Warning: Do not bind\"\n \" if you will be using broadcast or multicast\"\n \" target addresses.\"))\n parser.add_argument(\"-s\", \"--statedir\", action=\"store\", dest=\"statedir\",\n default=DEFAULT_STATEDIR, metavar=\"DIRECTORY\",\n help=(\"Watch DIRECTORY for virtual network bindings,\"\n \" default is %s\" % DEFAULT_STATEDIR))\n parser.add_argument(\"--pidfile\", action=\"store\", dest=\"pidfile\",\n default=DEFAULT_PIDFILE, metavar=\"PIDFILE\",\n help=(\"Write the PID to PIDFILE if daemonizing,\"\n \" default is %s\" % DEFAULT_PIDFILE)),\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\", dest=\"debug\",\n default=False, help=\"Turn on debugging messages\")\n parser.add_argument(\"-l\", \"--logging-dir\", action=\"store\", dest=\"logdir\",\n default=DEFAULT_LOGDIR, metavar=\"DIRECTORY\",\n help=(\"Store logfile %s in DIRECTORY, default is %s\" %\n (LOG_FILENAME, DEFAULT_LOGDIR)))\n parser.add_argument(\"-f\", \"--foreground\", action=\"store_false\",\n dest=\"daemonize\", default=True,\n help=\"Stay in the foreground and do not daemonize\")\n return parser.parse_args(args)\n\n\ndef main():\n global tracing\n opts = parse_arguments(sys.argv[1:])\n\n tracing = opts.debug\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG if opts.debug else logging.INFO)\n if opts.daemonize:\n logfile = os.path.join(opts.logdir, LOG_FILENAME)\n handler = logging.handlers.RotatingFileHandler(logfile,\n maxBytes=1048576)\n else:\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(LOG_FORMAT))\n logger.addHandler(handler)\n\n if opts.daemonize:\n pidfile = daemon.pidlockfile.TimeoutPIDLockFile(opts.pidfile, 10)\n d = daemon.DaemonContext(pidfile=pidfile,\n stdout=handler.stream, stderr=handler.stream,\n files_preserve=[handler.stream])\n d.umask = 0022\n d.open()\n\n logging.info(\"Starting matryoshka...\")\n proxy = VXLANProxy(family=opts.ipfamily, bindaddr=opts.bindaddr,\n bindport=opts.bindport, statedir=opts.statedir)\n\n # Touch every single file in state dir, to trigger additions\n logging.info(\"Touching all files under %s, to trigger network additions\",\n opts.statedir)\n try:\n for dirpath, dirnames, filenames in os.walk(opts.statedir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n open(path, 'a').close()\n except Exception as msg:\n logging.error(\"Caught exception while touching files in %s: %s\",\n opts.statedir, msg)\n\n logging.info(\"Dropping privileges, setting capabilities, switching uid\")\n # TODO: Drop all privileges\n\n # Handle SIGTERM, SIGUSR1, do not interrupt system calls\n global sigusr1_proxy\n sigusr1_proxy = proxy\n signal(SIGTERM, sigterm_handler)\n siginterrupt(SIGTERM, False)\n signal(SIGUSR1, sigusr12_handler)\n siginterrupt(SIGUSR1, False)\n signal(SIGUSR2, sigusr12_handler)\n siginterrupt(SIGUSR2, False)\n\n logging.info(\"Watching state directory %s\", opts.statedir)\n\n while True:\n try:\n logging.info(\"Entering proxy request servicing loop\")\n proxy.serve()\n except ValueError as ve:\n logging.error(\"Caught exception: Invalid input values: %s\", ve)\n logging.info(\"Resuming main request loop\")\n except Exception:\n logging.exception(\"Caught unexpected exception, text follows\")\n logging.info(\"Resuming main request loop in 1s\")\n time.sleep(1)\n\n logging.info(\"Exiting matryoshka...\")\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-2801750069125500000,"string":"-2,801,750,069,125,500,000"},"line_mean":{"kind":"number","value":37.5178351784,"string":"37.517835"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5804566502,"string":"0.580457"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109442,"cells":{"repo_name":{"kind":"string","value":"skitazaki/python-clitool"},"path":{"kind":"string","value":"clitool/__init__.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2822"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\"\n===================================\nCommand Line Tool Utilities\n===================================\n\n* One line argument parsing function and decorator\n* Simple configuration loader\n* Stream utility with some logging\n* CSV reader/writer unicode support for Python 2.x (in official document)\n* Apache accesslog parser\n\nRequirements\n============\n\n* Python 2.7 or 3.x\n\nPython 2.4, 2.5, 2.6 are not supported.\n\nInstall\n=======\n\nUse ``pip`` via PyPI.\n\n::\n\n pip install clitool\n\nBootstrap\n=========\n\nAt first, create your script file using module script, ``clitool.cli``.\n\n::\n\n $ python -m clitool.cli -o your-script.py\n\nThis file can parse basic command line options and arguments.\n\n::\n\n $ ./your-script.py --help\n usage: your-script.py [-h] [-c FILE] [-o FILE] [--basedir BASEDIR]\n [--input-encoding INPUT_ENCODING]\n [--output-encoding OUTPUT_ENCODING]\n [--processes PROCESSES] [--chunksize CHUNKSIZE]\n [-v | -q]\n [FILE [FILE ...]]\n\n positional arguments:\n FILE\n\n optional arguments:\n -h, --help show this help message and exit\n -c FILE, --config FILE\n configuration file\n -o FILE, --output FILE\n output file\n --basedir BASEDIR base directory\n --input-encoding INPUT_ENCODING\n encoding of input source\n --output-encoding OUTPUT_ENCODING\n encoding of output distination\n --processes PROCESSES\n count of processes\n --chunksize CHUNKSIZE\n a number of chunks submitted to the process pool\n -v, --verbose set logging to verbose mode\n -q, --quiet set logging to quiet mode\n\nEdit this script on your own :D\n\nExamples\n========\n\nExample scripts exist in git repository.\n\n* csv2db.py: read csv data and import database via 'SQLAlchemy'.\n* csv2gexf.py: read csv data and dump them by GEXF format via 'NetworkX'.\n* csv2json.py: read csv data and dump them by JSON format.\n* csv2kml.py: read csv data and dump them by KML format via 'simplekml'.\n* logfile.py: parse Apache access log and create report.\n* logparams.py: parse Apache access log and analyze query parameters.\n\"\"\"\n\n__title__ = 'clitool'\n__version__ = '0.4.1'\n__author__ = 'KITAZAKI Shigeru'\n\n# Constant values.\n\nRUNNING_MODE_ENVKEY = 'PYTHON_CLITOOL_ENV'\n\nDEFAULT_ENCODING = 'utf-8'\nDEFAULT_RUNNING_MODE = 'development'\n\nPROCESSING_REPORTING_INTERVAL = 10000\nPROCESSING_SUCCESS = 'success'\nPROCESSING_SKIPPED = 'skipped'\nPROCESSING_ERROR = 'error'\nPROCESSING_TOTAL = 'total'\nPROCESSING_TIME = 'time'\n\n# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":1118717363739956000,"string":"1,118,717,363,739,956,000"},"line_mean":{"kind":"number","value":25.8761904762,"string":"25.87619"},"line_max":{"kind":"number","value":76,"string":"76"},"alpha_frac":{"kind":"number","value":0.597448618,"string":"0.597449"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109443,"cells":{"repo_name":{"kind":"string","value":"cjaymes/pyscap"},"path":{"kind":"string","value":"src/scap/model/ocil_2_0/ArtifactResultType.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1633"},"content":{"kind":"string","value":"# Copyright 2016 Casey Jaymes\n\n# This file is part of PySCAP.\n#\n# PySCAP is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# PySCAP is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with PySCAP. If not, see .\n\nfrom scap.Model import Model\nimport logging\n\nlogger = logging.getLogger(__name__)\nclass ArtifactResultType(Model):\n MODEL_MAP = {\n 'elements': [\n # children of artifact_value tag\n # TODO: at least one of *_artifact_value\n {'tag_name': 'text_artifact_value', 'class': 'TextArtifactValueElement', 'min': 0, 'max': 1},\n {'tag_name': 'binary_artifact_value', 'class': 'BinaryArtifactValueElement', 'min': 0, 'max': 1},\n {'tag_name': 'reference_artifact_value', 'class': 'ReferenceArtifactValueElement', 'min': 0, 'max': 1},\n {'tag_name': 'provider', 'type': 'ProviderValuePattern', 'min': 1, 'max': 1},\n {'tag_name': 'submitter', 'class': 'UserType', 'min': 1, 'max': 1},\n ],\n 'attributes': {\n 'artifact_ref': {'type': 'ArtifactIDPattern', 'required': True},\n 'timestamp': {'type': 'DateTimeType', 'required': True},\n }\n }\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-1167776288768710700,"string":"-1,167,776,288,768,710,700"},"line_mean":{"kind":"number","value":43.1351351351,"string":"43.135135"},"line_max":{"kind":"number","value":115,"string":"115"},"alpha_frac":{"kind":"number","value":0.6436007348,"string":"0.643601"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109444,"cells":{"repo_name":{"kind":"string","value":"Alwnikrotikz/open-hea"},"path":{"kind":"string","value":"src/openhea/importdata.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"9848"},"content":{"kind":"string","value":"import os, sys\nfrom datetime import date\n\nimport xlrd\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nfrom pprint import PrettyPrinter\n\n\nfrom model.config_parser import OpenHEAConfig\nfrom model.mapper import LivelihoodZone\n\npp = PrettyPrinter()\n\ndef transpose(grid):\n return zip(*grid)\n\ndef removeBlankRows(grid):\n return [list(row) for row in grid if any(row)]\n\ndef removeBlankRowsAndColumns(grid):\n return removeBlankRows(transpose(removeBlankRows(transpose(grid))))\n\ndef sheetToGrid(sheet):\n grid = []\n for rownum in range(sheet.nrows):\n grid.append(sheet.row_values(rownum))\n return grid\n\ndef sheetToGridNoBlank(sheet):\n return removeBlankRowsAndColumns(sheetToGrid(sheet))\n\ndef testPrint(obj):\n if test_print:\n pp.pprint(obj)\n\nclass DataImporter:\n def __init__(self, spreadsheet):\n # database session\n config_file = os.path.join(os.path.dirname(__file__), 'openhea.cfg')\n self.config = OpenHEAConfig()\n read = self.config.read(config_file)\n if len(read) != 1:\n print 'Need openhea.cfg setup with database parameters'\n sys.exit(1)\n cs = self.config.sqlalchemy_connection_string()\n engine = create_engine(cs, echo=True)\n Session = sessionmaker(bind=engine)\n self.session = Session()\n # open workbook\n self.workbook = xlrd.open_workbook(spreadsheet)\n\n def saveSiteData(self):\n \"\"\"Expecting site_data to look like:\n [\n ['One sample point', '']\n ['Country name', 'Namibia']\n ['LZ name', 'Caprivi Lowland Maize and Cattle Zone']\n ['Name of village or settlement', 'avillage']\n ['Interview date', '40321.0']\n ['Interviewer name', 'James Acidri']\n ['Interview number', '1']\n ['Start of reference/ consumption year', 'March']\n ]\n\n Note that the date being a number is an Excel thing - see code for how we deal with it\n \"\"\"\n site_data = sheetToGridNoBlank(self.workbook.sheet_by_index(0))\n testPrint(site_data)\n DATACOL = 1\n project = site_data[1][DATACOL]\n livelihoodzone = site_data[2][DATACOL]\n # TODO: not currently in database schema\n #village_name = site_data[3][DATACOL]\n date_tuple = xlrd.xldate_as_tuple(site_data[4][DATACOL],\n self.workbook.datemode)\n datecreated = date(date_tuple[0], date_tuple[1], date_tuple[2])\n createdby = site_data[5][DATACOL]\n # TODO: not currently in database schema\n #interview_number = site_data[6][DATACOL]\n consumptionyearbegins = site_data[7][DATACOL]\n lz = Livelihoodzone(\n livelihoodzone=livelihoodzone,\n createdby=createdby,\n datecreated=datecreated,\n consumptionyearbegins=consumptionyearbegins)\n self.session.add(lz)\n self.session.commit()\n\n def saveExpenditureData(self):\n \"\"\"Expecting expenditure_data to look like:\n [\n ['', '', '', '', '', u'HOUSEHOLD EXPENDITURE', '', '', '', '', '', '', '', '', ''],\n ['', '', '', u'WG1 Lower', u'WG1 upper', u'WG2 Lower', u'WG2 Upper', u'WG3 Lower', u'WG3 Upper', u'WG4 Lower', u'WG4 Upper', u'WG1', u'WG2', u'WG3', u'WG4'],\n [u'Category', u'Food type', u'Unit', '', '', '', u'No. Units purchased', '', '', '', '', '', u'Price per unit', '', ''],\n [u'Staple food', u'Maize meal', u'Kg', 390.0, 390.0, 208.0, 208.0, 182.0, 182.0, 97.5, 97.5, 4.0, 4.0, 4.0, 4.0],\n [u'Non-staple food', u'Sugar', u'Kg', 11.0, 11.0, 22.0, 22.0, 36.0, 36.0, 18.0, 18.0, 7.5, 11.0, 6.0, 8.5],\n ['', '', '', '', '', '', u'Annual expenditure', '', '', '', '', '', '', '', ''],\n [u'Household items', u'Candles', u'N$', 92.0, 92.0, 95.0, 95.0, 164.0, 164.0, 235.0, 235.0, '', '', '', ''],\n ['', u'Soap/Vaseline', u'N$', 258.0, 258.0, 480.0, 480.0, 326.0, 326.0, 597.0, 597.0, '', '', '', ''],\n ['', u'Kerosine', u'N$', 0.0, 0.0, 0.0, 0.0, 140.0, 140.0, 360.0, 360.0, '', '', '', ''],\n [u'Essential inputs', u'Tools', '', 200.0, 200.0, 200.0, 200.0, 300.0, 300.0, 300.0, 300.0, '', '', '', '']\n ]\n \"\"\"\n expenditure_data = sheetToGridNoBlank(self.workbook.sheet_by_index(3))\n testPrint(expenditure_data)\n # check the first row is what we expect\n title_row = expenditure_data.pop(0)\n stripped_title_row = [x for x in title_row if x != '']\n assert len(stripped_title_row) == 1\n wealth_groups = {}\n expenditure = []\n standard_of_living = []\n wg_row = expenditure_data.pop(0)\n for index, col in enumerate(wg_row):\n if col.lower().endswith('lower') and wg_row[index+1].lower().endswith('upper'):\n wealth_group_name = col.split()[0]\n assert wealth_group_name.lower() == wg_row[index+1].lower().split()[0]\n wealth_groups[wealth_group_name] = {\n 'lower_col': index,\n 'upper_col': index+1,\n }\n expenditure_data.pop(0)\n data = expenditure\n category = ''\n for row in expenditure_data:\n strip_row = [x for x in row if x != '']\n # change data dictionary after \"Annual Expenditure\"\n if len(strip_row) == 1 and strip_row[0].lower().startswith('annual'):\n data = standard_of_living\n continue\n # cache category - it is often not repeated, so we will keep using the same\n # value until it changes\n datadict = {}\n if row[0]:\n category = row[0]\n datadict['category'] = category\n datadict['type'] = row[1]\n datadict['unit'] = row[2]\n for wg in wealth_groups.keys():\n datadict[wg] = {\n 'lower': row[wealth_groups[wg]['lower_col']],\n 'upper': row[wealth_groups[wg]['upper_col']],\n }\n data.append(datadict)\n\n\n def saveWealthGroupAssetsData(self):\n \"\"\"Expecting wgassets_data to look like:\n [['', u'Wealth group characteristics', '', '', '', '', '', '', '', '', '', '', '', ''],\n ['', '', '', u'WG1', u'WG2', u'WG3', u'WG4', u'WG5 etc', '', '', '', '', '', ''],\n ['', u'Wealth group name', '', u'very poor', u'poor ', u'middle ', u'better off', '', '', '', '', '', '', ''],\n ['', u'Percent in wealth group', '', 0.31, 0.39, 0.22, 0.08, '', '', '', '', '', '', ''],\n ['', u'Number of people in household', '', 7.0, 7.0, 6.0, 5.0, '', '', '', '', '', '', ''],\n ['', u'Wealth group rank 1= poorest', '', 1.0, 2.0, 3.0, 4.0, '', '', '', '', '', '', ''],\n [u'ASSETS', '', '', '', '', u'Asset holdings', '', '', '', '', '', '', u'Asset price', ''],\n ['', '', '', u'WG1', u'WG1', u'WG2', u'WG2', u'WG3', u'WG3', u'WG4 ', u'WG4', u'WG5 etc', '', ''],\n ['', '', '', u'Lower', u'Upper', u'Lower', u'Upper', u'Lower', u'Upper', u'Lower', u'Upper', '', u'Lower', u'Upper'],\n [u'Category', u'Asset Type', u'Unit', '', '', '', '', '', '', '', '', '', '', ''],\n [u'Land', u'Upland', u'Acre', 1.25, 2.0, 2.5, 3.0, 20.0, 25.0, 22.5, 30.0, '', '', ''],\n ['', u'Owned Irrigated', u'Acre', 0.5, 1.0, 1.0, 2.0, 1.0, 2.0, '', '', '', '', ''],\n [u'Trees', u'Mango', u'Item', 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 5.0, '', '', ''],\n [u'Other tradeable goods', u'Cell phone', u'Item', 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, '', '', ''],\n ['', u'Ox plough', u'Item', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, '', '', ''],\n ['', u'Livestock assets setup missing', '', '', '', '', '', '', '', '', '', '', '', ''],\n [u'Livestock', u'Cattle', u'Item', 0.0, 5.0, 3.0, 6.0, 7.0, 14.0, 35.0, 87.0, '', 300.0, 300.0],\n ['', u'Goats', u'Item', 0.0, 6.0, 7.0, 11.0, 12.0, 23.0, 20.0, 22.0, '', 50.0, 50.0],\n ['', u'?cash?foodstocks', '', '', '', '', '', '', '', '', '', '', '', '']]\n\n \"\"\"\n wgassets_data = sheetToGridNoBlank(self.workbook.sheet_by_index(1))\n # check the first row has one cell (the title)\n title_row = wgassets_data[0]\n stripped_title_row = [x for x in title_row if x != '']\n assert len(stripped_title_row) == 1\n wealth_groups = {}\n category = ''\n data = []\n wealth_groups_list = set(wgassets_data[7])\n wealth_groups_list.remove('')\n wealth_groups = {}\n for wg in wealth_groups_list:\n wealth_groups[wg] = {\n 'upper':'',\n 'lower':'',\n }\n for row in wgassets_data[10:]:\n this_wealth_groups = wealth_groups.copy()\n # cache category - it is often not repeated, so we will keep using the same\n # value until it changes\n if row[0]:\n category = row[0]\n datadict = {}\n datadict['category'] = category\n datadict['type'] = row[1]\n datadict['unit'] = row[2]\n for index,val in enumerate(row[3:]):\n if val != '' and wgassets_data[7][index+3] != '':\n this_wealth_groups[wgassets_data[7][index+3]][wgassets_data[8][index+3].lower()] = val\n\n datadict['data'] = this_wealth_groups\n data.append(datadict)\n\n\n\ndef main(doc):\n di = DataImporter(doc)\n # TODO: reinstate commented out methods once everything is working\n di.saveSiteData()\n di.saveExpenditureData()\n di.saveWealthGroupAssetsData()\n\nif __name__ == '__main__':\n test_print = True\n sample_doc = os.path.join(os.path.dirname(__file__), '..', '..', 'EXAMPLEHEADATA.xls')\n sys.exit(main(sample_doc))\n\n"},"license":{"kind":"string","value":"lgpl-2.1"},"hash":{"kind":"number","value":4157351473706986500,"string":"4,157,351,473,706,986,500"},"line_mean":{"kind":"number","value":44.3824884793,"string":"44.382488"},"line_max":{"kind":"number","value":165,"string":"165"},"alpha_frac":{"kind":"number","value":0.5087327376,"string":"0.508733"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109445,"cells":{"repo_name":{"kind":"string","value":"whatevsz/rbackupd"},"path":{"kind":"string","value":"rbackupd/config/configmanager.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2444"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n# Copyright (c) 2013 Hannes Körber \n\nimport logging\nimport configobj\nimport validate\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConfigManager(configobj.ConfigObj):\n \"\"\"\n This class is derived from ConfigObj and validates the configuration file\n automatically.\n\n :param path: The path to the configuration file.\n :type path: str\n\n :param configspec: The path to the configspec describing the structure of\n the configuration file. Consult the configobj\n documentation for details.\n :type path: str\n\n :raise ValidationError: if the validation fails\n :raise IOError: if the configuration file or the configspec is not found\n \"\"\"\n\n def __init__(self, path, configspec):\n configobj.ConfigObj.__init__(\n self,\n infile=path,\n list_values=True,\n create_empty=False,\n file_error=True,\n interpolation=False,\n raise_errors=True,\n configspec=configspec,\n write_empty_values=True)\n\n logger.debug(\"Validating configuration file.\")\n validator = validate.Validator()\n try:\n result = self.validate(validator, preserve_errors=True)\n except IOError:\n raise\n if result is not True:\n message = \"\"\n for entry in configobj.flatten_errors(self, result):\n (sections, key, error) = entry\n expanded_section = \".\".join(sections)\n if error is False:\n message += (\"In section \\\"%s\\\": key \\\"%s\\\" not found\\n\" %\n (expanded_section, key))\n else:\n message += (\"In section \\\"%s\\\": failed valiation for key \"\n \"\\\"%s\\\"\\n\" %\n (expanded_section, key))\n message = message.rstrip(\"\\n\")\n raise ValidationError(message)\n\n\nclass ValidationError(Exception):\n \"\"\"\n This exception is raised when the validation of the configuration file\n fails.\n\n :param message: A message with details about how the validation failed.\n :type message: str\n \"\"\"\n\n def __init__(self, message):\n Exception.__init__(self)\n self.message = message\n\n def __str__(self):\n return self.message\n\nConfigError = configobj.ConfigObjError\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":6444203800207971000,"string":"6,444,203,800,207,971,000"},"line_mean":{"kind":"number","value":30.3205128205,"string":"30.320513"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.5738845682,"string":"0.573885"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109446,"cells":{"repo_name":{"kind":"string","value":"ArchiFleKs/magnum"},"path":{"kind":"string","value":"magnum/drivers/swarm_fedora_atomic_v1/template_def.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1075"},"content":{"kind":"string","value":"# Copyright 2016 Rackspace Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport os\n\nfrom magnum.drivers.heat import swarm_fedora_template_def as sftd\n\n\nclass AtomicSwarmTemplateDefinition(sftd.SwarmFedoraTemplateDefinition):\n \"\"\"Docker swarm template for a Fedora Atomic VM.\"\"\"\n\n @property\n def driver_module_path(self):\n return __name__[:__name__.rindex('.')]\n\n @property\n def template_path(self):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'templates/cluster.yaml')\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":2792705736945375700,"string":"2,792,705,736,945,375,700"},"line_mean":{"kind":"number","value":36.0689655172,"string":"36.068966"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.7162790698,"string":"0.716279"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109447,"cells":{"repo_name":{"kind":"string","value":"Ramblurr/Far-Horizons"},"path":{"kind":"string","value":"tools/game_packet.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1525"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\n This script will create a zip file containing a first turn packet for a player.\n\"\"\"\nimport fhutils\nimport os, sys, tempfile, subprocess\nimport getopt\n\ndef main(argv):\n config_file = None\n discard = False\n try: \n opts, args = getopt.getopt(argv, \"hc:\", [\"help\", \"config=\"])\n except getopt.GetoptError: \n print(__doc__) \n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"): \n print(__doc__) \n sys.exit(0)\n elif opt in (\"-c\", \"--config\"):\n config_file = arg\n\n if config_file:\n config = fhutils.GameConfig(config_file)\n else:\n config = fhutils.GameConfig()\n game = config.gameslist[0] # for now we only support a single game\n game_name = game['name']\n data_dir = game['datadir']\n bin_dir = config.bindir\n \n \n os.chdir(data_dir)\n \n # prepare galaxy list\n output = fhutils.run(bin_dir, \"ListGalaxy\", [\"-p\"])\n with open(\"galaxy.list.txt\", \"w\") as f:\n f.write(output)\n \n players = fhutils.Game().players\n for p in players:\n try:\n subprocess.check_call([\"zip\", \"sp%s.zip\" % (p['num']), \"sp%s.rpt.t1\" % (p['num']), \"galaxy.map.pdf\", \"galaxy.map.txt\", \"game_policies.pdf\", \"galaxy.list.txt\"])\n except CalledProcessError:\n print(\"ERROR making zip: sp%s.zip\" % (p['num']))\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":6489482347662260000,"string":"6,489,482,347,662,260,000"},"line_mean":{"kind":"number","value":30.1428571429,"string":"30.142857"},"line_max":{"kind":"number","value":172,"string":"172"},"alpha_frac":{"kind":"number","value":0.5291803279,"string":"0.52918"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109448,"cells":{"repo_name":{"kind":"string","value":"ITLabProject2016/internet_technology_lab_project"},"path":{"kind":"string","value":"prepare_images.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1075"},"content":{"kind":"string","value":"import os\nimport Image\nimport glob\n\n#gets images from a dir recursively, resizes them to optimal width and copies them to a new dir\n\n##################################\n#change me to your pictures dir!!!!\nDIR = \"/home/kostis/Desktop/img/\"\n\n#story icons are saved at \"./populate_img/stories\" with width 100\n#story point icons are saved at \"./populate_img/points\" with width 350\n\nOUT_DIR = \"./populate_img/points\"\nopt_width = 300;\n##################################\n\nG = glob.glob(DIR+\"*.jpg\")\nG1 = glob.glob(DIR+\"*.png\")\nG2 = glob.glob(DIR+\"*.JPEG\")\nG3 = glob.glob(DIR+\"*.JPG\")\nG = G + G1 + G2 + G3\n\nfor filePath in G:\n\n file = open(filePath)\n img = Image.open(file)\n\n width = img.size[0]\n height = img.size[1]\n\n ratio = opt_width / float(width)\n\n width = int(width * ratio)\n height = int(height * ratio)\n\n img = img.resize([width,height])\n img.save(os.path.join(os.path.join(OUT_DIR, os.path.splitext(os.path.basename(filePath))[0]))+\".jpg\")\n print os.path.join(os.path.join(OUT_DIR, os.path.splitext(os.path.basename(filePath))[0]))+\".jpg\"\n\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":522612940822323900,"string":"522,612,940,822,323,900"},"line_mean":{"kind":"number","value":25.875,"string":"25.875"},"line_max":{"kind":"number","value":105,"string":"105"},"alpha_frac":{"kind":"number","value":0.6176744186,"string":"0.617674"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109449,"cells":{"repo_name":{"kind":"string","value":"avaitla/Haskell-to-C---Bridge"},"path":{"kind":"string","value":"pygccxml-1.0.0/unittests/filters_tester.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3555"},"content":{"kind":"string","value":"# Copyright 2004-2008 Roman Yakovenko.\r\n# Distributed under the Boost Software License, Version 1.0. (See\r\n# accompanying file LICENSE_1_0.txt or copy at\r\n# http://www.boost.org/LICENSE_1_0.txt)\r\n\r\nimport os\r\nimport unittest\r\nimport autoconfig\r\nimport parser_test_case\r\n\r\nfrom pygccxml import utils\r\nfrom pygccxml import parser\r\nfrom pygccxml import declarations\r\n\r\nclass tester_t( parser_test_case.parser_test_case_t ):\r\n global_ns = None\r\n COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE \r\n def __init__(self, *args ):\r\n parser_test_case.parser_test_case_t.__init__( self, *args )\r\n self.header = 'declarations_calldef.hpp'\r\n self.global_ns = None\r\n \r\n def setUp(self):\r\n if not tester_t.global_ns:\r\n decls = parser.parse( [self.header], self.config )\r\n tester_t.global_ns = declarations.get_global_namespace( decls )\r\n tester_t.global_ns.init_optimizer()\r\n self.global_ns = tester_t.global_ns\r\n \r\n \r\n def test_regex( self ): \r\n criteria = declarations.regex_matcher_t( 'oper.*'\r\n , lambda decl: decl.name )\r\n operators = declarations.matcher.find( criteria, self.global_ns )\r\n operators = filter( lambda d: not d.is_artificial, operators )\r\n self.failUnless( 6 == len(operators) )\r\n\r\n def test_access_type( self ): \r\n criteria = declarations.access_type_matcher_t( declarations.ACCESS_TYPES.PUBLIC )\r\n public_members = declarations.matcher.find( criteria, self.global_ns )\r\n if '0.9' in public_members[0].compiler:\r\n public_members = filter( lambda d: not d.is_artificial, public_members )\r\n self.failUnless( 16 == len( public_members ) ) \r\n else:\r\n self.failUnless( 20 == len( public_members ) )\r\n \r\n def test_or_matcher( self ):\r\n criteria1 = declarations.regex_matcher_t( 'oper.*'\r\n , lambda decl: decl.name )\r\n criteria2 = declarations.access_type_matcher_t( declarations.ACCESS_TYPES.PUBLIC )\r\n found = declarations.matcher.find( criteria1 | criteria2, self.global_ns )\r\n\r\n if '0.9' in found[0].compiler:\r\n found = filter( lambda d: not d.is_artificial, found )\r\n self.failUnless( 15 <= len( found ) <= 21) \r\n else:\r\n self.failUnless( 19 <= len( found ) <= 25)\r\n\r\n def test_and_matcher( self ):\r\n criteria1 = declarations.regex_matcher_t( 'oper.*'\r\n , lambda decl: decl.name )\r\n criteria2 = declarations.access_type_matcher_t( declarations.ACCESS_TYPES.PUBLIC )\r\n found = declarations.matcher.find( criteria1 & criteria2, self.global_ns )\r\n found = filter( lambda d: not d.is_artificial, found )\r\n self.failUnless( len( found ) <= 6 )\r\n\r\n def test_not_matcher( self ):\r\n criteria1 = declarations.regex_matcher_t( 'oper.*'\r\n , lambda decl: decl.name )\r\n found = declarations.matcher.find( ~( ~criteria1 ), self.global_ns )\r\n found = filter( lambda d: not d.is_artificial, found )\r\n self.failUnless( len( found ) == 6 )\r\n\r\ndef create_suite():\r\n suite = unittest.TestSuite() \r\n suite.addTest( unittest.makeSuite(tester_t))\r\n return suite\r\n\r\ndef run_suite():\r\n unittest.TextTestRunner(verbosity=2).run( create_suite() )\r\n\r\nif __name__ == \"__main__\":\r\n run_suite()\r\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-6151372971956007000,"string":"-6,151,372,971,956,007,000"},"line_mean":{"kind":"number","value":40.8313253012,"string":"40.831325"},"line_max":{"kind":"number","value":90,"string":"90"},"alpha_frac":{"kind":"number","value":0.5836849508,"string":"0.583685"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109450,"cells":{"repo_name":{"kind":"string","value":"googleapis/python-compute"},"path":{"kind":"string","value":"google/cloud/compute_v1/services/routers/transports/rest.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"33916"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport warnings\nfrom typing import Callable, Dict, Optional, Sequence, Tuple\n\nfrom google.api_core import gapic_v1 # type: ignore\nfrom google.api_core import exceptions as core_exceptions # type: ignore\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.auth.transport.grpc import SslCredentials # type: ignore\n\nimport grpc # type: ignore\n\nfrom google.auth.transport.requests import AuthorizedSession\n\nfrom google.cloud.compute_v1.types import compute\n\nfrom .base import RoutersTransport, DEFAULT_CLIENT_INFO\n\n\nclass RoutersRestTransport(RoutersTransport):\n \"\"\"REST backend transport for Routers.\n\n The Routers API.\n\n This class defines the same methods as the primary client, so the\n primary client can load the underlying transport implementation\n and call it.\n\n It sends JSON representations of protocol buffers over HTTP/1.1\n \"\"\"\n\n def __init__(\n self,\n *,\n host: str = \"compute.googleapis.com\",\n credentials: ga_credentials.Credentials = None,\n credentials_file: str = None,\n scopes: Sequence[str] = None,\n client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,\n quota_project_id: Optional[str] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional(Sequence[str])): A list of scopes. This argument is\n ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client\n certificate to configure mutual TLS HTTP channel. It is ignored\n if ``channel`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n \"\"\"\n # Run the base constructor\n # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.\n # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the\n # credentials object\n super().__init__(\n host=host, credentials=credentials, client_info=client_info,\n )\n self._session = AuthorizedSession(\n self._credentials, default_host=self.DEFAULT_HOST\n )\n if client_cert_source_for_mtls:\n self._session.configure_mtls_channel(client_cert_source_for_mtls)\n self._prep_wrapped_messages(client_info)\n\n def aggregated_list(\n self,\n request: compute.AggregatedListRoutersRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.RouterAggregatedList:\n r\"\"\"Call the aggregated list method over HTTP.\n\n Args:\n request (~.compute.AggregatedListRoutersRequest):\n The request object. A request message for\n Routers.AggregatedList. See the method\n description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.RouterAggregatedList:\n Contains a list of routers.\n \"\"\"\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/aggregated/routers\".format(\n host=self._host, project=request.project,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.AggregatedListRoutersRequest.filter in request:\n query_params[\"filter\"] = request.filter\n if compute.AggregatedListRoutersRequest.include_all_scopes in request:\n query_params[\"includeAllScopes\"] = request.include_all_scopes\n if compute.AggregatedListRoutersRequest.max_results in request:\n query_params[\"maxResults\"] = request.max_results\n if compute.AggregatedListRoutersRequest.order_by in request:\n query_params[\"orderBy\"] = request.order_by\n if compute.AggregatedListRoutersRequest.page_token in request:\n query_params[\"pageToken\"] = request.page_token\n if compute.AggregatedListRoutersRequest.return_partial_success in request:\n query_params[\"returnPartialSuccess\"] = request.return_partial_success\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.get(url, headers=headers,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.RouterAggregatedList.from_json(\n response.content, ignore_unknown_fields=True\n )\n\n def delete(\n self,\n request: compute.DeleteRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.Operation:\n r\"\"\"Call the delete method over HTTP.\n\n Args:\n request (~.compute.DeleteRouterRequest):\n The request object. A request message for Routers.Delete.\n See the method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource.\n\n Google Compute Engine has three Operation resources:\n\n - `Global `__\n \\*\n `Regional `__\n \\*\n `Zonal `__\n\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses.\n\n Operations can be global, regional or zonal.\n\n - For global operations, use the ``globalOperations``\n resource.\n - For regional operations, use the ``regionOperations``\n resource.\n - For zonal operations, use the ``zonalOperations``\n resource.\n\n For more information, read Global, Regional, and Zonal\n Resources. (== resource_for\n {$api_version}.globalOperations ==) (== resource_for\n {$api_version}.regionOperations ==) (== resource_for\n {$api_version}.zoneOperations ==)\n\n \"\"\"\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.DeleteRouterRequest.request_id in request:\n query_params[\"requestId\"] = request.request_id\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.delete(url, headers=headers,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.Operation.from_json(response.content, ignore_unknown_fields=True)\n\n def get(\n self,\n request: compute.GetRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.Router:\n r\"\"\"Call the get method over HTTP.\n\n Args:\n request (~.compute.GetRouterRequest):\n The request object. A request message for Routers.Get.\n See the method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Router:\n Represents a Cloud Router resource.\n For more information about Cloud Router,\n read the Cloud Router overview.\n\n \"\"\"\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.get(url, headers=headers,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.Router.from_json(response.content, ignore_unknown_fields=True)\n\n def get_nat_mapping_info(\n self,\n request: compute.GetNatMappingInfoRoutersRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.VmEndpointNatMappingsList:\n r\"\"\"Call the get nat mapping info method over HTTP.\n\n Args:\n request (~.compute.GetNatMappingInfoRoutersRequest):\n The request object. A request message for\n Routers.GetNatMappingInfo. See the\n method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.VmEndpointNatMappingsList:\n Contains a list of\n VmEndpointNatMappings.\n\n \"\"\"\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.GetNatMappingInfoRoutersRequest.filter in request:\n query_params[\"filter\"] = request.filter\n if compute.GetNatMappingInfoRoutersRequest.max_results in request:\n query_params[\"maxResults\"] = request.max_results\n if compute.GetNatMappingInfoRoutersRequest.order_by in request:\n query_params[\"orderBy\"] = request.order_by\n if compute.GetNatMappingInfoRoutersRequest.page_token in request:\n query_params[\"pageToken\"] = request.page_token\n if compute.GetNatMappingInfoRoutersRequest.return_partial_success in request:\n query_params[\"returnPartialSuccess\"] = request.return_partial_success\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.get(url, headers=headers,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.VmEndpointNatMappingsList.from_json(\n response.content, ignore_unknown_fields=True\n )\n\n def get_router_status(\n self,\n request: compute.GetRouterStatusRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.RouterStatusResponse:\n r\"\"\"Call the get router status method over HTTP.\n\n Args:\n request (~.compute.GetRouterStatusRouterRequest):\n The request object. A request message for\n Routers.GetRouterStatus. See the method\n description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.RouterStatusResponse:\n\n \"\"\"\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}/getRouterStatus\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.get(url, headers=headers,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.RouterStatusResponse.from_json(\n response.content, ignore_unknown_fields=True\n )\n\n def insert(\n self,\n request: compute.InsertRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.Operation:\n r\"\"\"Call the insert method over HTTP.\n\n Args:\n request (~.compute.InsertRouterRequest):\n The request object. A request message for Routers.Insert.\n See the method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource.\n\n Google Compute Engine has three Operation resources:\n\n - `Global `__\n \\*\n `Regional `__\n \\*\n `Zonal `__\n\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses.\n\n Operations can be global, regional or zonal.\n\n - For global operations, use the ``globalOperations``\n resource.\n - For regional operations, use the ``regionOperations``\n resource.\n - For zonal operations, use the ``zonalOperations``\n resource.\n\n For more information, read Global, Regional, and Zonal\n Resources. (== resource_for\n {$api_version}.globalOperations ==) (== resource_for\n {$api_version}.regionOperations ==) (== resource_for\n {$api_version}.zoneOperations ==)\n\n \"\"\"\n\n # Jsonify the request body\n body = compute.Router.to_json(\n request.router_resource,\n including_default_value_fields=False,\n use_integers_for_enums=False,\n )\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers\".format(\n host=self._host, project=request.project, region=request.region,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.InsertRouterRequest.request_id in request:\n query_params[\"requestId\"] = request.request_id\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.post(url, headers=headers, data=body,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.Operation.from_json(response.content, ignore_unknown_fields=True)\n\n def list(\n self,\n request: compute.ListRoutersRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.RouterList:\n r\"\"\"Call the list method over HTTP.\n\n Args:\n request (~.compute.ListRoutersRequest):\n The request object. A request message for Routers.List.\n See the method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.RouterList:\n Contains a list of Router resources.\n \"\"\"\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers\".format(\n host=self._host, project=request.project, region=request.region,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.ListRoutersRequest.filter in request:\n query_params[\"filter\"] = request.filter\n if compute.ListRoutersRequest.max_results in request:\n query_params[\"maxResults\"] = request.max_results\n if compute.ListRoutersRequest.order_by in request:\n query_params[\"orderBy\"] = request.order_by\n if compute.ListRoutersRequest.page_token in request:\n query_params[\"pageToken\"] = request.page_token\n if compute.ListRoutersRequest.return_partial_success in request:\n query_params[\"returnPartialSuccess\"] = request.return_partial_success\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.get(url, headers=headers,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.RouterList.from_json(\n response.content, ignore_unknown_fields=True\n )\n\n def patch(\n self,\n request: compute.PatchRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.Operation:\n r\"\"\"Call the patch method over HTTP.\n\n Args:\n request (~.compute.PatchRouterRequest):\n The request object. A request message for Routers.Patch.\n See the method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource.\n\n Google Compute Engine has three Operation resources:\n\n - `Global `__\n \\*\n `Regional `__\n \\*\n `Zonal `__\n\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses.\n\n Operations can be global, regional or zonal.\n\n - For global operations, use the ``globalOperations``\n resource.\n - For regional operations, use the ``regionOperations``\n resource.\n - For zonal operations, use the ``zonalOperations``\n resource.\n\n For more information, read Global, Regional, and Zonal\n Resources. (== resource_for\n {$api_version}.globalOperations ==) (== resource_for\n {$api_version}.regionOperations ==) (== resource_for\n {$api_version}.zoneOperations ==)\n\n \"\"\"\n\n # Jsonify the request body\n body = compute.Router.to_json(\n request.router_resource,\n including_default_value_fields=False,\n use_integers_for_enums=False,\n )\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.PatchRouterRequest.request_id in request:\n query_params[\"requestId\"] = request.request_id\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.patch(url, headers=headers, data=body,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.Operation.from_json(response.content, ignore_unknown_fields=True)\n\n def preview(\n self,\n request: compute.PreviewRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.RoutersPreviewResponse:\n r\"\"\"Call the preview method over HTTP.\n\n Args:\n request (~.compute.PreviewRouterRequest):\n The request object. A request message for\n Routers.Preview. See the method\n description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.RoutersPreviewResponse:\n\n \"\"\"\n\n # Jsonify the request body\n body = compute.Router.to_json(\n request.router_resource,\n including_default_value_fields=False,\n use_integers_for_enums=False,\n )\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}/preview\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.post(url, headers=headers, data=body,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.RoutersPreviewResponse.from_json(\n response.content, ignore_unknown_fields=True\n )\n\n def update(\n self,\n request: compute.UpdateRouterRequest,\n *,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> compute.Operation:\n r\"\"\"Call the update method over HTTP.\n\n Args:\n request (~.compute.UpdateRouterRequest):\n The request object. A request message for Routers.Update.\n See the method description for details.\n\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource.\n\n Google Compute Engine has three Operation resources:\n\n - `Global `__\n \\*\n `Regional `__\n \\*\n `Zonal `__\n\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses.\n\n Operations can be global, regional or zonal.\n\n - For global operations, use the ``globalOperations``\n resource.\n - For regional operations, use the ``regionOperations``\n resource.\n - For zonal operations, use the ``zonalOperations``\n resource.\n\n For more information, read Global, Regional, and Zonal\n Resources. (== resource_for\n {$api_version}.globalOperations ==) (== resource_for\n {$api_version}.regionOperations ==) (== resource_for\n {$api_version}.zoneOperations ==)\n\n \"\"\"\n\n # Jsonify the request body\n body = compute.Router.to_json(\n request.router_resource,\n including_default_value_fields=False,\n use_integers_for_enums=False,\n )\n\n # TODO(yon-mg): need to handle grpc transcoding and parse url correctly\n # current impl assumes basic case of grpc transcoding\n url = \"https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}\".format(\n host=self._host,\n project=request.project,\n region=request.region,\n router=request.router,\n )\n\n # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields\n # not required for GCE\n query_params = {}\n if compute.UpdateRouterRequest.request_id in request:\n query_params[\"requestId\"] = request.request_id\n\n # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here\n # discards default values\n # TODO(yon-mg): add test for proper url encoded strings\n query_params = [\"{k}={v}\".format(k=k, v=v) for k, v in query_params.items()]\n url += \"?{}\".format(\"&\".join(query_params)).replace(\" \", \"+\")\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = self._session.put(url, headers=headers, data=body,)\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n return compute.Operation.from_json(response.content, ignore_unknown_fields=True)\n\n\n__all__ = (\"RoutersRestTransport\",)\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":2138113358878847700,"string":"2,138,113,358,878,847,700"},"line_mean":{"kind":"number","value":40.4621026895,"string":"40.462103"},"line_max":{"kind":"number","value":120,"string":"120"},"alpha_frac":{"kind":"number","value":0.6021052011,"string":"0.602105"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109451,"cells":{"repo_name":{"kind":"string","value":"SKA-ScienceDataProcessor/algorithm-reference-library"},"path":{"kind":"string","value":"processing_components/griddata/convolution_functions.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8955"},"content":{"kind":"string","value":"#\n\"\"\"\nFunctions that define and manipulate ConvolutionFunctions.\n\nThe griddata has axes [chan, pol, z, dy, dx, y, x] where z, y, x are spatial axes in either sky or Fourier plane. The\norder in the WCS is reversed so the grid_WCS describes UU, VV, DUU, DVV, WW, STOKES, FREQ axes.\n\nGridData can be used to hold the Fourier transform of an Image or gridded visibilities. In addition, the convolution\nfunction can be stored in a GridData, most probably with finer spatial sampling.\n\n\n\"\"\"\nimport copy\nimport logging\n\nimport numpy\nfrom astropy.wcs import WCS\n\nfrom data_models.memory_data_models import GridData, ConvolutionFunction\nfrom data_models.memory_data_models import QA\nfrom data_models.polarisation import PolarisationFrame\nfrom processing_library.image.operations import create_image_from_array\n\nlog = logging.getLogger(__name__)\n\n\ndef convolutionfunction_sizeof(cf: ConvolutionFunction):\n \"\"\" Return size in GB\n \"\"\"\n return cf.size()\n\n\ndef create_convolutionfunction_from_array(data: numpy.array, grid_wcs: WCS, projection_wcs: WCS,\n polarisation_frame: PolarisationFrame) -> ConvolutionFunction:\n \"\"\" Create a convolution function from an array and wcs's\n \n The griddata has axes [chan, pol, z, dy, dx, y, x] where z, y, x are spatial axes in either sky or Fourier plane. The\n order in the WCS is reversed so the grid_WCS describes UU, VV, WW, STOKES, FREQ axes\n \n The axes UU,VV have the same physical stride as the image, The axes DUU, DVV are subsampled.\n \n Convolution function holds the original sky plane projection in the projection_wcs.\n\n :param data: Numpy.array\n :param grid_wcs: Grid world coordinate system\n :param projection_wcs: Projection world coordinate system\n :param polarisation_frame: Polarisation Frame\n :return: GridData\n \n \"\"\"\n fconvfunc = ConvolutionFunction()\n fconvfunc.polarisation_frame = polarisation_frame\n \n fconvfunc.data = data\n fconvfunc.grid_wcs = grid_wcs.deepcopy()\n fconvfunc.projection_wcs = projection_wcs.deepcopy()\n \n assert isinstance(fconvfunc, ConvolutionFunction), \"Type is %s\" % type(fconvfunc)\n return fconvfunc\n\n\ndef create_convolutionfunction_from_image(im: numpy.array, nz=1, zstep=1e15, ztype='WW', oversampling=8, support=16):\n \"\"\" Create a convolution function from an image\n\n The griddata has axes [chan, pol, z, dy, dx, y, x] where z, y, x are spatial axes in either sky or Fourier plane. The\n order in the WCS is reversed so the grid_WCS describes UU, VV, WW, STOKES, FREQ axes\n\n The axes UU,VV have the same physical stride as the image, The axes DUU, DVV are subsampled.\n\n Convolution function holds the original sky plane projection in the projection_wcs.\n\n :param im: Template Image\n :param nz: Number of z axes, usually z is W\n :param zstep: Step in z, usually z is W\n :param ztype: Type of z, usually 'WW'\n :param oversampling: Oversampling (size of dy, dx axes)\n :param support: Support of final convolution function (size of y, x axes)\n :return: Convolution Function\n\n \"\"\"\n assert len(im.shape) == 4\n assert im.wcs.wcs.ctype[0] == 'RA---SIN'\n assert im.wcs.wcs.ctype[1] == 'DEC--SIN'\n \n d2r = numpy.pi / 180.0\n \n # WCS Coords are [x, y, dy, dx, z, pol, chan] where x, y, z are spatial axes in real space or Fourier space\n # Array Coords are [chan, pol, z, dy, dx, y, x] where x, y, z are spatial axes in real space or Fourier space\n cf_wcs = WCS(naxis=7)\n \n cf_wcs.wcs.ctype[0] = 'UU'\n cf_wcs.wcs.ctype[1] = 'VV'\n cf_wcs.wcs.ctype[2] = 'DUU'\n cf_wcs.wcs.ctype[3] = 'DVV'\n cf_wcs.wcs.ctype[4] = ztype\n cf_wcs.wcs.ctype[5] = im.wcs.wcs.ctype[2]\n cf_wcs.wcs.ctype[6] = im.wcs.wcs.ctype[3]\n \n cf_wcs.wcs.axis_types[0] = 0\n cf_wcs.wcs.axis_types[1] = 0\n cf_wcs.wcs.axis_types[2] = 0\n cf_wcs.wcs.axis_types[3] = 0\n cf_wcs.wcs.axis_types[4] = 0\n cf_wcs.wcs.axis_types[5] = im.wcs.wcs.axis_types[2]\n cf_wcs.wcs.axis_types[6] = im.wcs.wcs.axis_types[3]\n \n cf_wcs.wcs.crval[0] = 0.0\n cf_wcs.wcs.crval[1] = 0.0\n cf_wcs.wcs.crval[2] = 0.0\n cf_wcs.wcs.crval[3] = 0.0\n cf_wcs.wcs.crval[4] = 0.0\n cf_wcs.wcs.crval[5] = im.wcs.wcs.crval[2]\n cf_wcs.wcs.crval[6] = im.wcs.wcs.crval[3]\n \n cf_wcs.wcs.crpix[0] = float(support // 2) + 1.0\n cf_wcs.wcs.crpix[1] = float(support // 2) + 1.0\n cf_wcs.wcs.crpix[2] = float(oversampling // 2) + 1.0\n cf_wcs.wcs.crpix[3] = float(oversampling // 2) + 1.0\n cf_wcs.wcs.crpix[4] = float(nz // 2 + 1)\n cf_wcs.wcs.crpix[5] = im.wcs.wcs.crpix[2]\n cf_wcs.wcs.crpix[6] = im.wcs.wcs.crpix[3]\n \n # The sampling on the UU and VV axes should be the same as for the image.\n # The sampling on the DUU and DVV axes should be oversampling times finer.\n cf_wcs.wcs.cdelt[0] = 1.0 / (im.shape[3] * d2r * im.wcs.wcs.cdelt[0])\n cf_wcs.wcs.cdelt[1] = 1.0 / (im.shape[2] * d2r * im.wcs.wcs.cdelt[1])\n cf_wcs.wcs.cdelt[2] = cf_wcs.wcs.cdelt[0] / oversampling\n cf_wcs.wcs.cdelt[3] = cf_wcs.wcs.cdelt[1] / oversampling\n cf_wcs.wcs.cdelt[4] = zstep\n cf_wcs.wcs.cdelt[5] = im.wcs.wcs.cdelt[2]\n cf_wcs.wcs.cdelt[6] = im.wcs.wcs.cdelt[3]\n \n grid_data = im.data[..., numpy.newaxis, :, :].astype('complex')\n grid_data[...] = 0.0\n \n nchan, npol, ny, nx = im.shape\n \n fconvfunc = ConvolutionFunction()\n fconvfunc.polarisation_frame = im.polarisation_frame\n \n fconvfunc.data = numpy.zeros([nchan, npol, nz, oversampling, oversampling, support, support], dtype='complex')\n fconvfunc.grid_wcs = cf_wcs.deepcopy()\n fconvfunc.projection_wcs = im.wcs.deepcopy()\n \n assert isinstance(fconvfunc, ConvolutionFunction), \"Type is %s\" % type(fconvfunc)\n \n return fconvfunc\n\n\ndef convert_convolutionfunction_to_image(cf):\n \"\"\" Convert ConvolutionFunction to an image\n \n :param cf:\n :return:\n \"\"\"\n return create_image_from_array(cf.data, cf.grid_wcs, cf.polarisation_frame)\n\n\ndef apply_bounding_box_convolutionfunction(cf, fractional_level=1e-4):\n \"\"\"Apply a bounding box to a convolution function\n\n :param cf:\n :param fractional_level:\n :return: bounded convolution function\n \"\"\"\n newcf = copy_convolutionfunction(cf)\n nx = newcf.data.shape[-1]\n ny = newcf.data.shape[-2]\n mask = numpy.max(numpy.abs(newcf.data), axis=(0, 1, 2, 3, 4))\n coords = numpy.argwhere(mask > fractional_level * numpy.max(numpy.abs(cf.data)))\n crpx = int(numpy.round(cf.grid_wcs.wcs.crpix[0]))\n crpy = int(numpy.round(cf.grid_wcs.wcs.crpix[1]))\n x0, y0 = coords.min(axis=0)\n dx = crpx - x0\n dy = crpy - y0\n x0 -= 1\n y0 -= 1\n x1 = crpx + dx - 1\n y1 = crpy + dy - 1\n newcf.data = newcf.data[..., y0:y1, x0:x1]\n nny, nnx = newcf.data.shape[-2], newcf.data.shape[-1]\n newcf.grid_wcs.wcs.crpix[0] += nnx / 2 - nx / 2\n newcf.grid_wcs.wcs.crpix[1] += nny / 2 - ny / 2\n return newcf\n\n\ndef calculate_bounding_box_convolutionfunction(cf, fractional_level=1e-4):\n \"\"\"Calculate bounding boxes\n \n Returns a list of bounding boxes where each element is\n (z, (y0, y1), (x0, x1))\n \n These can be used in griddata/degridding.\n\n :param cf:\n :param fractional_level:\n :return: list of bounding boxes\n \"\"\"\n bboxes = list()\n threshold = fractional_level * numpy.max(numpy.abs(cf.data))\n for z in range(cf.data.shape[2]):\n mask = numpy.max(numpy.abs(cf.data[:, :, z, ...]), axis=(0, 1, 2, 3))\n coords = numpy.argwhere(mask > threshold)\n x0, y0 = coords.min(axis=0)\n x1, y1 = coords.max(axis=0)\n bboxes.append((z, (y0, y1), (x0, x1)))\n return bboxes\n\n\ndef qa_convolutionfunction(cf, context=\"\") -> QA:\n \"\"\"Assess the quality of a convolutionfunction\n\n :param cf:\n :return: QA\n \"\"\"\n assert isinstance(cf, ConvolutionFunction), cf\n data = {'shape': str(cf.data.shape),\n 'max': numpy.max(cf.data),\n 'min': numpy.min(cf.data),\n 'rms': numpy.std(cf.data),\n 'sum': numpy.sum(cf.data),\n 'medianabs': numpy.median(numpy.abs(cf.data)),\n 'median': numpy.median(cf.data)}\n \n qa = QA(origin=\"qa_image\", data=data, context=context)\n return qa\n\ndef copy_convolutionfunction(cf):\n \"\"\"Make a copy of a convolution function\n \n :param cf:\n :return:\n \"\"\"\n assert isinstance(cf, ConvolutionFunction), cf\n fcf = ConvolutionFunction()\n fcf.polarisation_frame = cf.polarisation_frame\n fcf.data = copy.deepcopy(cf.data)\n fcf.projection_wcs = copy.deepcopy(cf.projection_wcs)\n fcf.grid_wcs = copy.deepcopy(cf.grid_wcs)\n if convolutionfunction_sizeof(fcf) >= 1.0:\n log.debug(\"copy_convolutionfunction: copied %s convolution function of shape %s, size %.3f (GB)\" %\n (fcf.data.dtype, str(fcf.shape), convolutionfunction_sizeof(fcf)))\n assert isinstance(fcf, ConvolutionFunction), fcf\n return fcf\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-4059187236238970400,"string":"-4,059,187,236,238,970,400"},"line_mean":{"kind":"number","value":35.7008196721,"string":"35.70082"},"line_max":{"kind":"number","value":121,"string":"121"},"alpha_frac":{"kind":"number","value":0.6491345617,"string":"0.649135"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109452,"cells":{"repo_name":{"kind":"string","value":"alabs/petateca"},"path":{"kind":"string","value":"petateca/apps/userdata/migrations/0006_auto.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14230"},"content":{"kind":"string","value":"# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n \n # Adding M2M table for field viewed_episodes on 'UserProfile'\n db.create_table('userdata_userprofile_viewed_episodes', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('userprofile', models.ForeignKey(orm['userdata.userprofile'], null=False)),\n ('episode', models.ForeignKey(orm['serie.episode'], null=False))\n ))\n db.create_unique('userdata_userprofile_viewed_episodes', ['userprofile_id', 'episode_id'])\n\n\n def backwards(self, orm):\n \n # Removing M2M table for field viewed_episodes on 'UserProfile'\n db.delete_table('userdata_userprofile_viewed_episodes')\n\n\n models = {\n 'auth.group': {\n 'Meta': {'object_name': 'Group'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n 'auth.permission': {\n 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n 'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n 'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'serie.actor': {\n 'Meta': {'object_name': 'Actor'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': \"'poster_of'\", 'unique': 'True', 'null': 'True', 'to': \"orm['serie.ImageActor']\"}),\n 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})\n },\n 'serie.episode': {\n 'Meta': {'object_name': 'Episode'},\n 'air_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),\n 'created_time': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'description_es': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'episode': ('django.db.models.fields.IntegerField', [], {}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'modified_time': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': \"'poster_of'\", 'unique': 'True', 'null': 'True', 'to': \"orm['serie.ImageEpisode']\"}),\n 'season': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'episodes'\", 'to': \"orm['serie.Season']\"}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'title_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'title_es': ('django.db.models.fields.CharField', [], {'max_length': '255'})\n },\n 'serie.genre': {\n 'Meta': {'object_name': 'Genre'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),\n 'name_en': ('django.db.models.fields.CharField', [], {'max_length': '25'}),\n 'name_es': ('django.db.models.fields.CharField', [], {'max_length': '25'}),\n 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})\n },\n 'serie.imageactor': {\n 'Meta': {'object_name': 'ImageActor'},\n 'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'images'\", 'to': \"orm['serie.Actor']\"}),\n 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'serie.imageepisode': {\n 'Meta': {'object_name': 'ImageEpisode'},\n 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'episode': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'images'\", 'to': \"orm['serie.Episode']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'serie.imageseason': {\n 'Meta': {'object_name': 'ImageSeason'},\n 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'season': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'images'\", 'to': \"orm['serie.Season']\"}),\n 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'serie.imageserie': {\n 'Meta': {'object_name': 'ImageSerie'},\n 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'serie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'images'\", 'to': \"orm['serie.Serie']\"}),\n 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'serie.network': {\n 'Meta': {'object_name': 'Network'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),\n 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),\n 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})\n },\n 'serie.role': {\n 'Meta': {'unique_together': \"(('serie', 'actor', 'role'),)\", 'object_name': 'Role'},\n 'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['serie.Actor']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'role': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'serie': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['serie.Serie']\"}),\n 'sortorder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})\n },\n 'serie.season': {\n 'Meta': {'object_name': 'Season'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': \"'poster_of'\", 'unique': 'True', 'null': 'True', 'to': \"orm['serie.ImageSeason']\"}),\n 'season': ('django.db.models.fields.IntegerField', [], {}),\n 'serie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'season'\", 'to': \"orm['serie.Serie']\"})\n },\n 'serie.serie': {\n 'Meta': {'object_name': 'Serie'},\n 'actors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': \"orm['serie.Actor']\", 'null': 'True', 'through': \"orm['serie.Role']\", 'blank': 'True'}),\n 'description': ('django.db.models.fields.TextField', [], {}),\n 'description_en': ('django.db.models.fields.TextField', [], {}),\n 'description_es': ('django.db.models.fields.TextField', [], {}),\n 'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'genres': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': \"'series'\", 'symmetrical': 'False', 'to': \"orm['serie.Genre']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'name_es': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'series'\", 'to': \"orm['serie.Network']\"}),\n 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': \"'poster_of'\", 'unique': 'True', 'null': 'True', 'to': \"orm['serie.ImageSerie']\"}),\n 'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),\n 'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),\n 'runtime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})\n },\n 'userdata.userprofile': {\n 'Meta': {'object_name': 'UserProfile'},\n 'favorite_series': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': \"'favorite_of'\", 'symmetrical': 'False', 'to': \"orm['serie.Serie']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\", 'unique': 'True'}),\n 'viewed_episodes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': \"'viewed_episodes'\", 'symmetrical': 'False', 'to': \"orm['serie.Episode']\"})\n },\n 'userdata.usertoinvite': {\n 'Meta': {'object_name': 'UserToInvite'},\n 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),\n 'has_been_invited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'mail': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'})\n }\n }\n\n complete_apps = ['userdata']\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":866152889865654700,"string":"866,152,889,865,654,700"},"line_mean":{"kind":"number","value":75.0962566845,"string":"75.096257"},"line_max":{"kind":"number","value":197,"string":"197"},"alpha_frac":{"kind":"number","value":0.5448348559,"string":"0.544835"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109453,"cells":{"repo_name":{"kind":"string","value":"colincsl/TemporalConvolutionalNetworks"},"path":{"kind":"string","value":"code/tf_models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"12033"},"content":{"kind":"string","value":"import numpy as np\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, TimeDistributed, merge, Lambda\nfrom keras.layers.core import *\nfrom keras.layers.convolutional import *\nfrom keras.layers.recurrent import *\n\nimport tensorflow as tf\nfrom keras import backend as K\n\nfrom keras.activations import relu\nfrom functools import partial\nclipped_relu = partial(relu, max_value=5)\n\ndef max_filter(x):\n # Max over the best filter score (like ICRA paper)\n max_values = K.max(x, 2, keepdims=True)\n max_flag = tf.greater_equal(x, max_values)\n out = x * tf.cast(max_flag, tf.float32)\n return out\n\ndef channel_normalization(x):\n # Normalize by the highest activation\n max_values = K.max(K.abs(x), 2, keepdims=True)+1e-5\n out = x / max_values\n return out\n\ndef WaveNet_activation(x):\n tanh_out = Activation('tanh')(x)\n sigm_out = Activation('sigmoid')(x) \n return Merge(mode='mul')([tanh_out, sigm_out])\n\n# -------------------------------------------------------------\ndef temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len, \n causal=False, loss='categorical_crossentropy', \n optimizer='adam', return_param_str=False):\n \"\"\" Used in paper: \n Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation\n Lea et al. ECCV 2016\n\n Note: Spatial dropout was not used in the original paper. \n It tends to improve performance a little. \n \"\"\"\n\n inputs = Input(shape=(max_len,n_feat))\n if causal: model = ZeroPadding1D((conv_len//2,0))(model)\n model = Convolution1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, border_mode='same', activation='relu')(inputs)\n if causal: model = Cropping1D((0,conv_len//2))(model)\n\n model = SpatialDropout1D(0.3)(model)\n\n model = TimeDistributed(Dense(n_classes, activation=\"softmax\" ))(model)\n \n model = Model(input=inputs, output=model)\n model.compile(loss=loss, optimizer=optimizer, sample_weight_mode=\"temporal\")\n\n if return_param_str:\n param_str = \"tConv_C{}\".format(conv_len)\n if causal:\n param_str += \"_causal\"\n \n return model, param_str\n else:\n return model\n\n\ndef ED_TCN(n_nodes, conv_len, n_classes, n_feat, max_len, \n loss='categorical_crossentropy', causal=False, \n optimizer=\"rmsprop\", activation='norm_relu',\n return_param_str=False):\n n_layers = len(n_nodes)\n\n inputs = Input(shape=(max_len,n_feat))\n model = inputs\n\n # ---- Encoder ----\n for i in range(n_layers):\n # Pad beginning of sequence to prevent usage of future data\n if causal: model = ZeroPadding1D((conv_len//2,0))(model)\n model = Convolution1D(n_nodes[i], conv_len, border_mode='same')(model)\n if causal: model = Cropping1D((0,conv_len//2))(model)\n\n model = SpatialDropout1D(0.3)(model)\n \n if activation=='norm_relu': \n model = Activation('relu')(model) \n model = Lambda(channel_normalization, name=\"encoder_norm_{}\".format(i))(model)\n elif activation=='wavenet': \n model = WaveNet_activation(model) \n else:\n model = Activation(activation)(model) \n \n model = MaxPooling1D(2)(model)\n\n # ---- Decoder ----\n for i in range(n_layers):\n model = UpSampling1D(2)(model)\n if causal: model = ZeroPadding1D((conv_len//2,0))(model)\n model = Convolution1D(n_nodes[-i-1], conv_len, border_mode='same')(model)\n if causal: model = Cropping1D((0,conv_len//2))(model)\n\n model = SpatialDropout1D(0.3)(model)\n\n if activation=='norm_relu': \n model = Activation('relu')(model)\n model = Lambda(channel_normalization, name=\"decoder_norm_{}\".format(i))(model)\n elif activation=='wavenet': \n model = WaveNet_activation(model) \n else:\n model = Activation(activation)(model)\n\n # Output FC layer\n model = TimeDistributed(Dense(n_classes, activation=\"softmax\" ))(model)\n \n model = Model(input=inputs, output=model)\n model.compile(loss=loss, optimizer=optimizer, sample_weight_mode=\"temporal\", metrics=['accuracy'])\n\n if return_param_str:\n param_str = \"ED-TCN_C{}_L{}\".format(conv_len, n_layers)\n if causal:\n param_str += \"_causal\"\n \n return model, param_str\n else:\n return model\n\ndef ED_TCN_atrous(n_nodes, conv_len, n_classes, n_feat, max_len, \n loss='categorical_crossentropy', causal=False, \n optimizer=\"rmsprop\", activation='norm_relu',\n return_param_str=False):\n n_layers = len(n_nodes)\n\n inputs = Input(shape=(None,n_feat))\n model = inputs\n\n # ---- Encoder ----\n for i in range(n_layers):\n # Pad beginning of sequence to prevent usage of future data\n if causal: model = ZeroPadding1D((conv_len//2,0))(model)\n model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model)\n if causal: model = Cropping1D((0,conv_len//2))(model)\n\n model = SpatialDropout1D(0.3)(model)\n \n if activation=='norm_relu': \n model = Activation('relu')(model) \n model = Lambda(channel_normalization, name=\"encoder_norm_{}\".format(i))(model)\n elif activation=='wavenet': \n model = WaveNet_activation(model) \n else:\n model = Activation(activation)(model) \n\n # ---- Decoder ----\n for i in range(n_layers):\n if causal: model = ZeroPadding1D((conv_len//2,0))(model)\n model = AtrousConvolution1D(n_nodes[-i-1], conv_len, atrous_rate=n_layers-i, border_mode='same')(model) \n if causal: model = Cropping1D((0,conv_len//2))(model)\n\n model = SpatialDropout1D(0.3)(model)\n\n if activation=='norm_relu': \n model = Activation('relu')(model)\n model = Lambda(channel_normalization, name=\"decoder_norm_{}\".format(i))(model)\n elif activation=='wavenet': \n model = WaveNet_activation(model) \n else:\n model = Activation(activation)(model)\n\n # Output FC layer\n model = TimeDistributed(Dense(n_classes, activation=\"softmax\" ))(model)\n\n model = Model(input=inputs, output=model)\n\n model.compile(loss=loss, optimizer=optimizer, sample_weight_mode=\"temporal\", metrics=['accuracy'])\n\n if return_param_str:\n param_str = \"ED-TCNa_C{}_L{}\".format(conv_len, n_layers)\n if causal:\n param_str += \"_causal\"\n \n return model, param_str\n else:\n return model\n\n\n\ndef TimeDelayNeuralNetwork(n_nodes, conv_len, n_classes, n_feat, max_len, \n loss='categorical_crossentropy', causal=False, \n optimizer=\"rmsprop\", activation='sigmoid',\n return_param_str=False):\n # Time-delay neural network\n n_layers = len(n_nodes)\n\n inputs = Input(shape=(max_len,n_feat))\n model = inputs\n inputs_mask = Input(shape=(max_len,1))\n model_masks = [inputs_mask]\n\n # ---- Encoder ----\n for i in range(n_layers):\n # Pad beginning of sequence to prevent usage of future data\n if causal: model = ZeroPadding1D((conv_len//2,0))(model)\n model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model)\n # model = SpatialDropout1D(0.3)(model)\n if causal: model = Cropping1D((0,conv_len//2))(model)\n \n if activation=='norm_relu': \n model = Activation('relu')(model) \n model = Lambda(channel_normalization, name=\"encoder_norm_{}\".format(i))(model)\n elif activation=='wavenet': \n model = WaveNet_activation(model) \n else:\n model = Activation(activation)(model) \n\n # Output FC layer\n model = TimeDistributed(Dense(n_classes, activation=\"softmax\"))(model)\n\n model = Model(input=inputs, output=model)\n model.compile(loss=loss, optimizer=optimizer, sample_weight_mode=\"temporal\", metrics=['accuracy'])\n\n if return_param_str:\n param_str = \"TDN_C{}\".format(conv_len)\n if causal:\n param_str += \"_causal\"\n \n return model, param_str\n else:\n return model\n\n\n\ndef Dilated_TCN(num_feat, num_classes, nb_filters, dilation_depth, nb_stacks, max_len, \n activation=\"wavenet\", tail_conv=1, use_skip_connections=True, causal=False, \n optimizer='adam', return_param_str=False):\n \"\"\"\n dilation_depth : number of layers per stack\n nb_stacks : number of stacks.\n \"\"\"\n\n def residual_block(x, s, i, activation):\n original_x = x\n\n if causal:\n x = ZeroPadding1D(((2**i)//2,0))(x)\n conv = AtrousConvolution1D(nb_filters, 2, atrous_rate=2**i, border_mode='same',\n name='dilated_conv_%d_tanh_s%d' % (2**i, s))(x)\n conv = Cropping1D((0,(2**i)//2))(conv)\n else:\n conv = AtrousConvolution1D(nb_filters, 3, atrous_rate=2**i, border_mode='same',\n name='dilated_conv_%d_tanh_s%d' % (2**i, s))(x) \n\n conv = SpatialDropout1D(0.3)(conv)\n # x = WaveNet_activation(conv)\n\n if activation=='norm_relu': \n x = Activation('relu')(conv)\n x = Lambda(channel_normalization)(x)\n elif activation=='wavenet': \n x = WaveNet_activation(conv) \n else:\n x = Activation(activation)(conv) \n\n #res_x = Convolution1D(nb_filters, 1, border_mode='same')(x)\n #skip_x = Convolution1D(nb_filters, 1, border_mode='same')(x)\n x = Convolution1D(nb_filters, 1, border_mode='same')(x)\n\n res_x = Merge(mode='sum')([original_x, x])\n\n #return res_x, skip_x\n return res_x, x\n\n input_layer = Input(shape=(max_len, num_feat))\n\n skip_connections = []\n\n x = input_layer\n if causal:\n x = ZeroPadding1D((1,0))(x)\n x = Convolution1D(nb_filters, 2, border_mode='same', name='initial_conv')(x)\n x = Cropping1D((0,1))(x)\n else:\n x = Convolution1D(nb_filters, 3, border_mode='same', name='initial_conv')(x) \n\n for s in range(nb_stacks):\n for i in range(0, dilation_depth+1):\n x, skip_out = residual_block(x, s, i, activation)\n skip_connections.append(skip_out)\n\n if use_skip_connections:\n x = Merge(mode='sum')(skip_connections)\n x = Activation('relu')(x)\n x = Convolution1D(nb_filters, tail_conv, border_mode='same')(x)\n x = Activation('relu')(x)\n x = Convolution1D(num_classes, tail_conv, border_mode='same')(x)\n x = Activation('softmax', name='output_softmax')(x)\n\n model = Model(input_layer, x)\n model.compile(optimizer, loss='categorical_crossentropy', sample_weight_mode='temporal')\n\n if return_param_str:\n param_str = \"D-TCN_C{}_B{}_L{}\".format(2, nb_stacks, dilation_depth)\n if causal:\n param_str += \"_causal\"\n \n return model, param_str\n else:\n return model\n\ndef BidirLSTM(n_nodes, n_classes, n_feat, max_len=None, \n causal=True, loss='categorical_crossentropy', optimizer=\"adam\",\n return_param_str=False):\n \n inputs = Input(shape=(None,n_feat))\n model = LSTM(n_nodes, return_sequences=True)(inputs)\n\n # Birdirectional LSTM\n if not causal:\n model_backwards = LSTM(n_nodes, return_sequences=True, go_backwards=True)(inputs)\n model = Merge(mode=\"concat\")([model, model_backwards])\n\n model = TimeDistributed(Dense(n_classes, activation=\"softmax\"))(model)\n \n model = Model(input=inputs, output=model)\n model.compile(optimizer=optimizer, loss=loss, sample_weight_mode=\"temporal\", metrics=['accuracy'])\n \n if return_param_str:\n param_str = \"LSTM_N{}\".format(n_nodes)\n if causal:\n param_str += \"_causal\"\n \n return model, param_str\n else:\n return model\n\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2949897003949492000,"string":"2,949,897,003,949,492,000"},"line_mean":{"kind":"number","value":34.9194029851,"string":"34.919403"},"line_max":{"kind":"number","value":131,"string":"131"},"alpha_frac":{"kind":"number","value":0.5944485997,"string":"0.594449"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109454,"cells":{"repo_name":{"kind":"string","value":"agarsev/grafeno"},"path":{"kind":"string","value":"grafeno/transformers/concept_class.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1056"},"content":{"kind":"string","value":"from grafeno.transformers.wordnet import Transformer as WNGet\n\nclass Transformer (WNGet):\n '''Finds the wordnet-defined `class' of a concept.\n\n Parameters\n ----------\n concept_class_hypernyms : bool\n If True, a new node is added with the class concept, related to the\n original node by an ``HYP'' edge.\n '''\n\n def __init__ (self, concept_class_hypernyms = True, **kwds):\n super().__init__(**kwds)\n self.__hyper = concept_class_hypernyms\n\n def post_process (self):\n super().post_process()\n for n in self.nodes.values():\n ss = n.get('synset')\n if ss:\n concept_class = ss.lexname().split('.')[1]\n if concept_class and concept_class != 'Tops':\n n['class'] = concept_class\n if self.__hyper:\n chyp = { 'concept': concept_class }\n if 'sempos' in n:\n chyp['sempos'] = n['sempos']\n self.sprout(n['id'], 'HYP', chyp)\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":6876593098635753000,"string":"6,876,593,098,635,753,000"},"line_mean":{"kind":"number","value":35.4137931034,"string":"35.413793"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.5075757576,"string":"0.507576"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109455,"cells":{"repo_name":{"kind":"string","value":"cvxopt/chompack"},"path":{"kind":"string","value":"examples/symbolic_factorization.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1261"},"content":{"kind":"string","value":"from cvxopt import spmatrix, printing, amd\nimport chompack as cp\nprinting.options['width'] = 17\n\n# Define sparse matrix\nI = range(17) + [2,2,3,3,4,14,4,14,8,14,15,8,15,7,8,14,8,14,14,15,10,12,13,16,12,13,16,12,13,15,16,13,15,16,15,16,15,16,16]\nJ = range(17) + [0,1,1,2,2,2,3,3,4,4,4,5,5,6,6,6,7,7,8,8,9,9,9,9,10,10,10,11,11,11,11,12,12,12,13,13,14,14,15]\nA = spmatrix(1.0,I,J,(17,17))\n\n# Test if A is chordal\np = cp.maxcardsearch(A)\nprint(\"\\nMaximum cardinality search\")\nprint(\" -- perfect elimination order:\"), cp.peo(A,p)\n\n# Test if natural ordering 0,1,2,...,17 is a perfect elimination order\np = range(17)\nprint(\"\\nNatural ordering\")\nprint(\" -- perfect elimination order:\"), cp.peo(A,p)\n\np = amd.order(A)\nprint(\"\\nAMD ordering\")\nprint(\" -- perfect elimination order:\"), cp.peo(A,p)\n\n# Compute a symbolic factorization \nsymb = cp.symbolic(A, p)\nprint(\"\\nSymbolic factorization:\")\nprint(\"Fill :\"), sum(symb.fill)\nprint(\"Number of cliques :\"), symb.Nsn\nprint(symb)\n\n# Compute a symbolic factorization with clique merging\nsymb2 = cp.symbolic(A, p, merge_function = cp.merge_size_fill(3,3))\nprint(\"Symbolic factorization with clique merging:\")\nprint(\"Fill (fact.+merging) :\"), sum(symb2.fill)\nprint(\"Number of cliques :\"), symb2.Nsn\nprint(symb2)\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-2211677090977100500,"string":"-2,211,677,090,977,100,500"},"line_mean":{"kind":"number","value":34.0277777778,"string":"34.027778"},"line_max":{"kind":"number","value":123,"string":"123"},"alpha_frac":{"kind":"number","value":0.6796193497,"string":"0.679619"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109456,"cells":{"repo_name":{"kind":"string","value":"mikemoorester/ESM"},"path":{"kind":"string","value":"thesis/chapter_esm_modelling.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1912"},"content":{"kind":"string","value":"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n#from scipy.stats.stats import pearsonr,kendalltau\n\n#vel_light = 299792458.0\n#fL1 = 10.23e6*77.*2. \n#fL2 = 10.23e6*60.*2. \n#wL1 = vel_light/fL1 \n#wL2 = vel_light/fL2\n#lcl1 = 1./(1.-(fL2/fL1)**2)\n#lcl2 = -(fL2/fL1)/(1.-(fL2/fL1)**2)\n\ndef plotFontSize(ax,fontsize=8):\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(fontsize)\n return ax\n\ndef pcoBias(args):\n fig = plt.figure(figsize=(3.62, 2.76))\n fig.canvas.set_window_title('pco_pcv_correlation')\n ax = fig.add_subplot(111)\n\n plt.rc('text', usetex=True)\n\n nadir = np.linspace(0,14,141)\n for dr in [0.1, 0.5, 1.0]:\n dpcv = -dr *(1 - np.cos(np.radians(nadir)))\n ax.plot(nadir,dpcv)\n\n\n ax.set_ylabel('\\Delta' ' Satellite PCV (m)')\n ax.set_xlabel('Nadir angle' r'($\\displaystyle^\\circ$)')# ($^\\circ$)')\n ax.set_xlim([0,14])\n ax.legend([r'$\\Delta r$ = 0.1 m', r'$\\Delta r$ = 0.5 m',r'$\\Delta r$ = 1.0 m'],fontsize=8,loc='best')\n ax = plotFontSize(ax,8)\n plt.tight_layout()\n\n if args.plot_save:\n plt.savefig('pco_pcv_correlation.eps')\n plt.close()\n return 1 \n\nif __name__ == \"__main__\":\n\n import argparse \n parser = argparse.ArgumentParser(prog='chapter_esm_modelling',description='Create some basic plots for thesis chapter')\n parser.add_argument(\"--pco\", dest=\"pco\",action=\"store_true\",default=False,\n help=\"Produce a plot of pco bias appearing as a pcv bias (correlation)\")\n\n parser.add_argument(\"--ps\",\"--plot_save\", dest=\"plot_save\",action=\"store_true\",default=False,\n help=\"Save plots in eps format\")\n\n args = parser.parse_args()\n\n if args.pco:\n pcoBias(args)\n\n if not args.plot_save:\n plt.show()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-856222044414287900,"string":"-856,222,044,414,287,900"},"line_mean":{"kind":"number","value":29.8387096774,"string":"29.83871"},"line_max":{"kind":"number","value":123,"string":"123"},"alpha_frac":{"kind":"number","value":0.6108786611,"string":"0.610879"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109457,"cells":{"repo_name":{"kind":"string","value":"scdsr/ptt-webcrawler"},"path":{"kind":"string","value":"main.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1556"},"content":{"kind":"string","value":"#!/usr/bin/python3\n#coding:utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport lib.CalWords as CalWords\nimport lib.url2aid as url2aid\n\n\n# Basic config\n## Board name\nsearchBoard = 'https://www.ptt.cc/bbs/test/index.html'\n## Pages to crawl\nsearchPages = 1\n## Words required\nlessNum = 30\n\nres = requests.get(searchBoard)\nmainSoup = BeautifulSoup(res.text, \"html.parser\")\ntargetURLs = []\nfor aclass in mainSoup.select('a'):\n if aclass.text == '‹ 上頁':\n targetURLs = ['https://www.ptt.cc' + aclass['href']]\n\nstartnum = int(targetURLs[0][-6])\ntargetURLs = [targetURLs[0][:-6] + str(i) + targetURLs[0][-5:]\n for i in range(startnum + 1, startnum - searchPages + 1, -1)]\n\narticleURLs = []\n\n# Get urls for searching\nfor url in targetURLs:\n res = requests.get(url)\n soup = BeautifulSoup(res.text, \"html.parser\")\n htmltext = ''\n\n for entry in soup.select('.r-ent'):\n htmltext += str(entry)\n\n asoup = BeautifulSoup(htmltext, \"html.parser\")\n\n for a in asoup.find_all('a', href=True):\n articleURLs += ['https://www.ptt.cc' + a['href']]\n\n# Print out results\ntry:\n for url in articleURLs:\n # Get article uid\n fn = url[-23:-5]\n aid = url2aid.url2aid(fn)\n if CalWords.calculate_words(url)[1] < lessNum:\n authorEnd = CalWords.calculate_words(url)[0].index(u'(')\n print(aid, CalWords.calculate_words(url)[0][0:authorEnd],\n str(CalWords.calculate_words(url)[1]) + ' characters')\nexcept:\n print('No more article less than', lessNum, '!')\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":668714726556833400,"string":"668,714,726,556,833,400"},"line_mean":{"kind":"number","value":27.1818181818,"string":"27.181818"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.6341935484,"string":"0.634194"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109458,"cells":{"repo_name":{"kind":"string","value":"bartdag/ghmiles"},"path":{"kind":"string","value":"ghmiles.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"16809"},"content":{"kind":"string","value":"''' \n ghmiles generates a milestones model from the list of issues in a github\n repository. \n\n :copyright: Copyright 2011 Barthelemy Dagenais\n :license: BSD, see LICENSE for details\n'''\n# Necessary for monkey patching\nfrom github2.request import GithubRequest\nfrom github2.users import Users\nfrom github2.repositories import Repositories\nfrom github2.commits import Commits\nimport time\nimport sys\n\nfrom github2.issues import Issues, Issue\nfrom github2.client import Github\nimport datetime\nimport StringIO\nimport re\n\n#### MONKEY PATCH github2 ####\n\ndef list_by_label(self, project, label):\n \"\"\"Get all issues for project' with label'.\n\n ``project`` is a string with the project owner username and repository\n name separated by ``/`` (e.g. ``ask/pygithub2``).\n ``label`` is a string representing a label (e.g., ``bug``).\n \"\"\"\n return self.get_values(\"list\", project, \"label\", label, filter=\"issues\",\n datatype=Issue)\n\ndef list_labels(self, project):\n \"\"\"Get all labels for project'.\n\n ``project`` is a string with the project owner username and repository\n name separated by ``/`` (e.g. ``ask/pygithub2``).\n \"\"\"\n return self.get_values(\"labels\", project, filter=\"labels\")\n\ndef gh_init(self, username=None, api_token=None, debug=False,\n requests_per_minute=None, access_token=None):\n self.debug = debug\n self.request = GithubRequest(username=username, api_token=api_token,\n debug=self.debug,\n access_token=access_token,\n requests_per_minute=requests_per_minute)\n self.issues = Issues(self.request)\n self.users = Users(self.request)\n self.repos = Repositories(self.request)\n self.commits = Commits(self.request)\n\ndef gr_init(self, username=None, api_token=None, url_prefix=None,\n debug=False, requests_per_minute=None, access_token=None):\n \"\"\"\n Make an API request.\n \"\"\"\n self.username = username\n self.api_token = api_token\n self.access_token = access_token\n self.url_prefix = url_prefix\n self.debug = debug\n\n if requests_per_minute is not None:\n self.requests_per_minute = requests_per_minute\n self.requests_count = 0\n self.delay = 60.0\n else:\n self.delay = 0\n self.last_request = datetime.datetime(1900, 1, 1)\n if not self.url_prefix:\n self.url_prefix = self.url_format % {\n \"github_url\": self.github_url,\n \"api_version\": self.api_version,\n \"api_format\": self.api_format,\n }\n\ndef gr_make_request(self, path, extra_post_data=None, method=\"GET\"):\n # WARNING: THIS CODE IS NOT THREAD SAFE!!!!\n new_round = False\n\n if self.delay:\n since_last = (datetime.datetime.now() - self.last_request)\n since_last_seconds = (since_last.days * 24 * 60 * 60) + since_last.seconds + (since_last.microseconds/1000000.0)\n\n if since_last_seconds > self.delay:\n self.requests_count = 1\n new_round = True\n elif self.requests_count >= self.requests_per_minute:\n duration = self.delay - since_last_seconds\n if self.debug:\n sys.stderr.write(\"delaying API call %s\\n\" % duration)\n time.sleep(duration)\n self.requests_count = 1\n new_round = True\n else:\n self.requests_count += 1\n\n extra_post_data = extra_post_data or {}\n url = \"/\".join([self.url_prefix, path])\n result = self.raw_request(url, extra_post_data, method=method)\n\n if self.delay and new_round:\n self.last_request = datetime.datetime.now()\n return result\n\nIssues.list_by_label = list_by_label\nIssues.list_labels = list_labels\nGithubRequest.__init__ = gr_init\nGithubRequest.make_request = gr_make_request\nGithub.__init__ = gh_init\n\n\n#### CONSTANTS ####\n\nMILESTONE_LABEL_V = re.compile(r'''^v\\d+(?:\\.\\d+)*$''')\n'''Regex used to identify milestone labels of the form v0.1'''\n\nMILESTONE_LABEL_NUM = re.compile(r'''^\\d+(?:\\.\\d+)*$''')\n'''Regex used to identify numerical milestone labels of the form 0.1'''\n\nMILESTONE_LABEL_V_RELAX = re.compile(r'''^v\\d+(?:\\.\\d+)*''')\n'''Regex used to identify milestone labels of the form v0.1'''\n\nMILESTONE_LABEL_NUM_RELAX = re.compile(r'''^\\d+(?:\\.\\d+)*''')\n'''Regex used to identify numerical milestone labels of the form 0.1'''\n\nSIMPLE_HTML_HEADER = '''\n\n \n \n {0} Roadmap\n \n \n

{0} Roadmap

\n'''\n\nSIMPLE_HTML_FOOTER = '''\n
\n

\n Generated by ghmiles\n on {0}.\n

\n

\n \n

\n \n'''\n\nFANCY_HTML_HEADER = '''\n\n \n \n {0} Roadmap\n \n \n \n \n \n \n \n
\n

{0} Roadmap

\n
\n
\n'''\n\nFANCY_HTML_FOOTER = '''\n
\n
\n

\n Generated by ghmiles\n on {0}.\n

\n

\n \n

\n
\n \n'''\n\n#### MILESTONE MODEL #####\n\nclass Milestone(object):\n\n def __init__(self, title, issues):\n self.title = title\n self.issues = issues\n self.issues.sort(key=lambda item: int(item.number))\n self.total = len(issues)\n self.opened = sum((1 for issue in issues if issue.state == 'open'))\n self.closed = self.total - self.opened\n self.progress = float(self.closed) * 100.0 / float(self.total)\n\n def __repr__(self):\n return ''.format(\n self.title, self.total, self.progress)\n\ndef label_key(label, padding=5):\n '''Returns a padded key from a label representing a milestone number.\n All parts of a label that are numbers are padded so that alphabetical\n sorting can work as expected (e.g., '2.0' < '11.0'). \n\n For example, this function will return 'v00001.00022e-00123b' if label =\n 'v1.22e-123b'.\n\n :param label: the milestone label\n :param padding: the maximum number of characters for each numeric part.\n Default=5\n :return: a key that can be used in alphabetical sorting\n '''\n key = prefix = ''\n components = []\n \n in_prefix = True\n current_number = current_suffix = ''\n\n for c in label:\n if not c.isdigit():\n if in_prefix:\n prefix += c\n else:\n current_suffix += c\n else:\n if in_prefix:\n in_prefix = False\n if current_suffix != '':\n components.append((current_number, current_suffix))\n current_number = current_suffix = ''\n current_number += c\n\n if not in_prefix and current_number != '':\n components.append((current_number, current_suffix))\n\n key = prefix\n for component in components:\n key += component[0].rjust(padding,'0') + component[1]\n\n return key\n\ndef get_milestone_labels(project, milestone_regex, reverse=True, github=None):\n if github is None:\n github = Github(requests_per_minute=60)\n labels = sorted(github.issues.list_labels(project), key=label_key, reverse=reverse)\n project_labels = (label for label in labels if milestone_regex.match(label))\n return project_labels\n\ndef get_intel_milestone_labels(project, reverse=True, github=None):\n if github is None:\n github = Github(requests_per_minute=60)\n labels = sorted(github.issues.list_labels(project), key=label_key, reverse=reverse)\n regexes = [MILESTONE_LABEL_NUM, MILESTONE_LABEL_NUM_RELAX,\n MILESTONE_LABEL_V, MILESTONE_LABEL_V_RELAX]\n max_labels = 0\n limit = len(labels)\n project_labels = []\n for regex in regexes:\n temp_labels = [label for label in labels if regex.match(label)]\n size = len(temp_labels)\n if size > max_labels:\n project_labels = temp_labels\n max_labels = size\n if size == limit:\n break\n\n return (project_labels, labels)\n\ndef get_milestone(project, milestone_label, github=None):\n if github is None:\n github = Github(requests_per_minute=60)\n issues = github.issues.list_by_label(project, milestone_label)\n return Milestone(milestone_label, issues)\n\ndef get_milestones(project, milestone_regex, reverse=True, github=None):\n '''Generates a list of milestones for a github project\n\n :param project: a string of the form `user/project`\n :param milestone_regex: a regular expression used to identify the labels\n representing milestones.\n :param reverse: If True (default), sort the milestones from the highest \n number to the lowest. Oppositive if False.\n :param github: a Github client (optional).\n :return: A generator (iterator) of milestones. \n '''\n\n if github is None:\n github = Github(requests_per_minute=60)\n labels = get_milestone_labels(project, milestone_regex, reverse, github)\n milestones = (get_milestone(project, label, github) for\n label in labels) \n\n return milestones\n\ndef get_milestones_from_labels(project, labels, github=None):\n '''Generates a list of milestones from the specified issue labels of a \n github project. This can be used to generate a milestone model for recent\n milestones only.\n\n :param project: a string of the form `user/project`\n :param labels: a list of labels used to generate milestones. \n :param github: a Github client (optional).\n :return: A generator (iterator) of milestones. \n '''\n if github is None:\n github = Github(requests_per_minute=60)\n milestones = (get_milestone(project, label, github) for\n label in labels) \n\n return milestones\n\n\n#### HTML GENERATION ####\n\ndef write_simple_html_milestones(milestones, output):\n for milestone in milestones:\n output.write('

Milestone: {0}

\\n'.format(milestone.title))\n output.write('

Progress: {0}%

'\n .format(milestone.progress))\n output.write('

Number of tickets: ')\n output.write('closed: {0} active: {1} total: {2}

\\n'\n .format(milestone.closed, milestone.opened, milestone.total))\n output.write('

Issues:

\\n
    \\n')\n for issue in milestone.issues:\n output.write('
  • #{0} {1} {2}
  • \\n'\n .format(issue.number, issue.title, issue.state))\n output.write('
\\n')\n\ndef get_simple_html_page(milestones, project_name = 'GitHub Project', \n save_path=None, header=SIMPLE_HTML_HEADER, footer=SIMPLE_HTML_FOOTER):\n '''Generates a simple HTML page similar to a Trac roadmap.\n\n :param milestones: a list (or iterator) of milestones.\n :param project_name: a human-readable project name. (optional)\n :param save_path: the output path used to save the HTML page. If None, a\n string containing the HTML page will be returned instead.\n :param header: the HTML header used to generate the HTML page. (optional)\n :param footer: the HTML footer used to generate the HTML page. (optional)\n :return: None if a save_path is provided, an HTML string otherwise.\n '''\n\n return_value = None\n\n if save_path is None:\n output = StringIO.StringIO()\n else:\n output = open(save_path, 'w')\n\n output.write(header.format(project_name))\n\n write_simple_html_milestones(milestones, output)\n\n output.write(footer.format(str(datetime.datetime.now())))\n\n if save_path is None:\n return_value = output.getvalue()\n\n output.close()\n\n return return_value\n\n\ndef write_fancy_html_milestones(milestones, project, output):\n for milestone in milestones:\n new_title = milestone.title.replace('.','--')\n progress = int(milestone.progress)\n\n output.write(''.format(milestone.title))\n output.write('

Milestone: {0}

\\n'.format(milestone.title))\n output.write('''\n \n '''.format(new_title,progress))\n output.write('''\n
\n
\n
{1}%
\n
\n '''.format(new_title, progress))\n output.write('''\n
\n
Number of tickets:
\n
closed:
\n
{0}
\n
active:
\n
{1}
\n
total:
\n
{2}
\n
\n '''.format(milestone.closed, milestone.opened, milestone.total))\n output.write('

'\n .format(milestone.title))\n output.write('List of Issues:

\\n')\n output.write('
    \\n')\n for issue in milestone.issues:\n output.write(\n '
  • #{1}'\n .format(project, issue.number))\n output.write(' {0}'.format(issue.title))\n output.write(' - {0}
  • \\n'.format(issue.state))\n output.write('
\\n')\n\ndef get_fancy_html_page(milestones, project, project_name = None,\n save_path=None, header=FANCY_HTML_HEADER, footer=FANCY_HTML_FOOTER):\n '''Generates a fancy HTML page similar to a Trac roadmap.\n\n :param milestones: a list (or iterator) of milestones.\n :param project: a string of the form `user/project`\n :param project_name: a human-readable project name. (optional)\n :param save_path: the output path used to save the HTML page. If None, a\n string containing the HTML page will be returned instead.\n :param header: the HTML header used to generate the HTML page. (optional)\n :param footer: the HTML footer used to generate the HTML page. (optional)\n :return: None if a save_path is provided, an HTML string otherwise.\n '''\n\n return_value = None\n\n if project_name is None:\n project_name = project.split('/')[1]\n\n if save_path is None:\n output = StringIO.StringIO()\n else:\n output = open(save_path, 'w')\n\n output.write(header.format(project_name))\n\n write_fancy_html_milestones(milestones, project, output)\n\n output.write(footer.format(str(datetime.datetime.now())))\n\n if save_path is None:\n return_value = output.getvalue()\n\n output.close()\n\n return return_value\n\n\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":4907401982529198000,"string":"4,907,401,982,529,198,000"},"line_mean":{"kind":"number","value":31.0171428571,"string":"31.017143"},"line_max":{"kind":"number","value":135,"string":"135"},"alpha_frac":{"kind":"number","value":0.6162769945,"string":"0.616277"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109459,"cells":{"repo_name":{"kind":"string","value":"Scille/parsec-cloud"},"path":{"kind":"string","value":"tests/backend/user/test_user_create.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"21387"},"content":{"kind":"string","value":"# Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS\n\nimport pytest\nimport pendulum\n\nfrom parsec.backend.user import INVITATION_VALIDITY, User, Device\nfrom parsec.api.data import UserCertificateContent, DeviceCertificateContent, UserProfile\nfrom parsec.api.protocol import DeviceID\n\nfrom tests.common import freeze_time\nfrom tests.backend.common import user_get, user_create\n\n\n@pytest.mark.trio\n@pytest.mark.parametrize(\n \"profile,with_labels\", [(profile, profile != UserProfile.STANDARD) for profile in UserProfile]\n)\nasync def test_user_create_ok(\n backend, backend_sock_factory, alice_backend_sock, alice, mallory, profile, with_labels\n):\n now = pendulum.now()\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=profile,\n )\n redacted_user_certificate = user_certificate.evolve(human_handle=None)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n )\n redacted_device_certificate = device_certificate.evolve(device_label=None)\n if not with_labels:\n user_certificate = redacted_user_certificate\n device_certificate = redacted_device_certificate\n\n user_certificate = user_certificate.dump_and_sign(alice.signing_key)\n device_certificate = device_certificate.dump_and_sign(alice.signing_key)\n redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key)\n redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key)\n\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=redacted_user_certificate,\n redacted_device_certificate=redacted_device_certificate,\n )\n assert rep == {\"status\": \"ok\"}\n\n # Make sure mallory can connect now\n async with backend_sock_factory(backend, mallory) as sock:\n rep = await user_get(sock, user_id=mallory.user_id)\n assert rep[\"status\"] == \"ok\"\n\n # Check the resulting data in the backend\n backend_user, backend_device = await backend.user.get_user_with_device(\n mallory.organization_id, mallory.device_id\n )\n assert backend_user == User(\n user_id=mallory.user_id,\n human_handle=mallory.human_handle if with_labels else None,\n profile=profile,\n user_certificate=user_certificate,\n redacted_user_certificate=redacted_user_certificate,\n user_certifier=alice.device_id,\n created_on=now,\n )\n assert backend_device == Device(\n device_id=mallory.device_id,\n device_label=mallory.device_label if with_labels else None,\n device_certificate=device_certificate,\n redacted_device_certificate=redacted_device_certificate,\n device_certifier=alice.device_id,\n created_on=now,\n )\n\n\n@pytest.mark.trio\nasync def test_user_create_invalid_certificate(alice_backend_sock, alice, bob, mallory):\n now = pendulum.now()\n good_user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(alice.signing_key)\n good_device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n ).dump_and_sign(alice.signing_key)\n bad_user_certificate = UserCertificateContent(\n author=bob.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(bob.signing_key)\n bad_device_certificate = DeviceCertificateContent(\n author=bob.device_id,\n timestamp=now,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n ).dump_and_sign(bob.signing_key)\n\n for cu, cd in [\n (good_user_certificate, bad_device_certificate),\n (bad_user_certificate, good_device_certificate),\n (bad_user_certificate, bad_device_certificate),\n ]:\n rep = await user_create(\n alice_backend_sock,\n user_certificate=cu,\n device_certificate=cd,\n redacted_user_certificate=good_user_certificate,\n redacted_device_certificate=good_device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_certification\",\n \"reason\": \"Invalid certification data (Signature was forged or corrupt).\",\n }\n\n # Same thing for the redacted part\n for cu, cd in [\n (good_user_certificate, bad_device_certificate),\n (bad_user_certificate, good_device_certificate),\n (bad_user_certificate, bad_device_certificate),\n ]:\n rep = await user_create(\n alice_backend_sock,\n user_certificate=good_user_certificate,\n device_certificate=good_device_certificate,\n redacted_user_certificate=cu,\n redacted_device_certificate=cd,\n )\n assert rep == {\n \"status\": \"invalid_certification\",\n \"reason\": \"Invalid certification data (Signature was forged or corrupt).\",\n }\n\n\n@pytest.mark.trio\nasync def test_user_create_not_matching_user_device(alice_backend_sock, alice, bob, mallory):\n now = pendulum.now()\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(alice.signing_key)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=bob.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n ).dump_and_sign(alice.signing_key)\n\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_data\",\n \"reason\": \"Device and User must have the same user ID.\",\n }\n\n\n@pytest.mark.trio\nasync def test_user_create_bad_redacted_device_certificate(alice_backend_sock, alice, mallory):\n now = pendulum.now()\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=None, # Can be used as regular and redacted certificate\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(alice.signing_key)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n )\n good_redacted_device_certificate = device_certificate.evolve(device_label=None)\n device_certificate = device_certificate.dump_and_sign(alice.signing_key)\n for bad_redacted_device_certificate in (\n good_redacted_device_certificate.evolve(timestamp=now.add(seconds=1)),\n good_redacted_device_certificate.evolve(device_id=alice.device_id),\n good_redacted_device_certificate.evolve(verify_key=alice.verify_key),\n ):\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=bad_redacted_device_certificate.dump_and_sign(\n alice.signing_key\n ),\n )\n assert rep == {\n \"status\": \"invalid_data\",\n \"reason\": \"Redacted Device certificate differs from Device certificate.\",\n }\n\n # Missing redacted certificate is not allowed as well\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=None,\n )\n assert rep == {\n \"status\": \"bad_message\",\n \"reason\": \"Invalid message.\",\n \"errors\": {\"redacted_device_certificate\": [\"Missing data for required field.\"]},\n }\n\n # Finally just make sure good was really good\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=good_redacted_device_certificate.dump_and_sign(\n alice.signing_key\n ),\n )\n assert rep == {\"status\": \"ok\"}\n\n\n@pytest.mark.trio\nasync def test_user_create_bad_redacted_user_certificate(alice_backend_sock, alice, mallory):\n now = pendulum.now()\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=mallory.device_id,\n device_label=None, # Can be used as regular and redacted certificate\n verify_key=mallory.verify_key,\n ).dump_and_sign(alice.signing_key)\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n )\n good_redacted_user_certificate = user_certificate.evolve(human_handle=None)\n user_certificate = user_certificate.dump_and_sign(alice.signing_key)\n for bad_redacted_user_certificate in (\n good_redacted_user_certificate.evolve(timestamp=now.add(seconds=1)),\n good_redacted_user_certificate.evolve(user_id=alice.user_id),\n good_redacted_user_certificate.evolve(public_key=alice.public_key),\n good_redacted_user_certificate.evolve(profile=UserProfile.OUTSIDER),\n ):\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=bad_redacted_user_certificate.dump_and_sign(\n alice.signing_key\n ),\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_data\",\n \"reason\": \"Redacted User certificate differs from User certificate.\",\n }\n\n # Missing redacted certificate is not allowed as well\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=None,\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\n \"status\": \"bad_message\",\n \"reason\": \"Invalid message.\",\n \"errors\": {\"redacted_user_certificate\": [\"Missing data for required field.\"]},\n }\n\n # Finally just make sure good was really good\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=good_redacted_user_certificate.dump_and_sign(alice.signing_key),\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\"status\": \"ok\"}\n\n\n@pytest.mark.trio\nasync def test_user_create_already_exists(alice_backend_sock, alice, bob):\n now = pendulum.now()\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=bob.user_id,\n human_handle=None,\n public_key=bob.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(alice.signing_key)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=bob.device_id,\n device_label=None,\n verify_key=bob.verify_key,\n ).dump_and_sign(alice.signing_key)\n\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\"status\": \"already_exists\", \"reason\": f\"User `{bob.user_id}` already exists\"}\n\n\n@pytest.mark.trio\nasync def test_user_create_human_handle_already_exists(alice_backend_sock, alice, bob):\n now = pendulum.now()\n bob2_device_id = DeviceID(\"bob2@dev1\")\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=bob2_device_id.user_id,\n human_handle=bob.human_handle,\n public_key=bob.public_key,\n profile=UserProfile.STANDARD,\n )\n redacted_user_certificate = user_certificate.evolve(human_handle=None)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=bob2_device_id,\n device_label=\"dev2\",\n verify_key=bob.verify_key,\n )\n redacted_device_certificate = device_certificate.evolve(device_label=None)\n\n user_certificate = user_certificate.dump_and_sign(alice.signing_key)\n redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key)\n device_certificate = device_certificate.dump_and_sign(alice.signing_key)\n redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key)\n\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=redacted_user_certificate,\n redacted_device_certificate=redacted_device_certificate,\n )\n assert rep == {\n \"status\": \"already_exists\",\n \"reason\": f\"Human handle `{bob.human_handle}` already corresponds to a non-revoked user\",\n }\n\n\n@pytest.mark.trio\nasync def test_user_create_human_handle_with_revoked_previous_one(\n alice_backend_sock, alice, bob, backend_data_binder\n):\n # First revoke bob\n await backend_data_binder.bind_revocation(user_id=bob.user_id, certifier=alice)\n\n # Now recreate another user with bob's human handle\n now = pendulum.now()\n bob2_device_id = DeviceID(\"bob2@dev1\")\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=bob2_device_id.user_id,\n human_handle=bob.human_handle,\n public_key=bob.public_key,\n profile=UserProfile.STANDARD,\n )\n redacted_user_certificate = user_certificate.evolve(human_handle=None)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=bob2_device_id,\n device_label=bob.device_label, # Device label doesn't have to be unique\n verify_key=bob.verify_key,\n )\n redacted_device_certificate = device_certificate.evolve(device_label=None)\n\n user_certificate = user_certificate.dump_and_sign(alice.signing_key)\n redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key)\n device_certificate = device_certificate.dump_and_sign(alice.signing_key)\n redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key)\n\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=redacted_user_certificate,\n redacted_device_certificate=redacted_device_certificate,\n )\n assert rep == {\"status\": \"ok\"}\n\n\n@pytest.mark.trio\nasync def test_user_create_not_matching_certified_on(alice_backend_sock, alice, mallory):\n date1 = pendulum.datetime(2000, 1, 1)\n date2 = date1.add(seconds=1)\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=date1,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(alice.signing_key)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=date2,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n ).dump_and_sign(alice.signing_key)\n with freeze_time(date1):\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_data\",\n \"reason\": \"Device and User certificates must have the same timestamp.\",\n }\n\n\n@pytest.mark.trio\nasync def test_user_create_certificate_too_old(alice_backend_sock, alice, mallory):\n too_old = pendulum.datetime(2000, 1, 1)\n now = too_old.add(seconds=INVITATION_VALIDITY + 1)\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=too_old,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n ).dump_and_sign(alice.signing_key)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=too_old,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n ).dump_and_sign(alice.signing_key)\n\n with freeze_time(now):\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_certification\",\n \"reason\": \"Invalid timestamp in certificate.\",\n }\n\n\n@pytest.mark.trio\nasync def test_user_create_author_not_admin(backend, bob_backend_sock):\n # No need for valid certificate given given access right should be\n # checked before payload deserialization\n rep = await user_create(\n bob_backend_sock,\n user_certificate=b\"\",\n device_certificate=b\"\",\n redacted_user_certificate=b\"\",\n redacted_device_certificate=b\"\",\n )\n assert rep == {\"status\": \"not_allowed\", \"reason\": \"User `bob` is not admin\"}\n\n\n@pytest.mark.trio\nasync def test_redacted_certificates_cannot_contain_sensitive_data(\n alice_backend_sock, alice, mallory\n):\n now = pendulum.now()\n user_certificate = UserCertificateContent(\n author=alice.device_id,\n timestamp=now,\n user_id=mallory.user_id,\n human_handle=mallory.human_handle,\n public_key=mallory.public_key,\n profile=UserProfile.STANDARD,\n )\n redacted_user_certificate = user_certificate.evolve(human_handle=None)\n device_certificate = DeviceCertificateContent(\n author=alice.device_id,\n timestamp=now,\n device_id=mallory.device_id,\n device_label=mallory.device_label,\n verify_key=mallory.verify_key,\n )\n redacted_device_certificate = device_certificate.evolve(device_label=None)\n\n user_certificate = user_certificate.dump_and_sign(alice.signing_key)\n device_certificate = device_certificate.dump_and_sign(alice.signing_key)\n redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key)\n redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key)\n\n with freeze_time(now):\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=user_certificate,\n redacted_device_certificate=redacted_device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_data\",\n \"reason\": \"Redacted User certificate must not contain a human_handle field.\",\n }\n\n rep = await user_create(\n alice_backend_sock,\n user_certificate=user_certificate,\n device_certificate=device_certificate,\n redacted_user_certificate=redacted_user_certificate,\n redacted_device_certificate=device_certificate,\n )\n assert rep == {\n \"status\": \"invalid_data\",\n \"reason\": \"Redacted Device certificate must not contain a device_label field.\",\n }\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":2347862217895652000,"string":"2,347,862,217,895,652,000"},"line_mean":{"kind":"number","value":37.0551601423,"string":"37.05516"},"line_max":{"kind":"number","value":98,"string":"98"},"alpha_frac":{"kind":"number","value":0.6672745126,"string":"0.667275"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109460,"cells":{"repo_name":{"kind":"string","value":"neversakura/EE511_Fall2016"},"path":{"kind":"string","value":"Basic Examples/SolvingLinearEquations.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2763"},"content":{"kind":"string","value":"# This is a simple example using numpy to solve a linear system of the form\n# A*x=B\n# You can find more details on the following webpage:\n# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.solve.html\n\n# Author: Huo Chen\n\n# Revison History\n# ===========================================================================\n# First commit 8/28/2016\n# Singular matrix 9/6/2016\n\nimport numpy as np\n\ndef nullspace(A, atol=1e-13, rtol=0):\n \"\"\"Compute an approximate basis for the nullspace of A.\n\n The algorithm used by this function is based on the singular value\n decomposition of `A`.\n\n Parameters\n ----------\n A : ndarray\n A should be at most 2-D. A 1-D array with length k will be treated\n as a 2-D with shape (1, k)\n atol : float\n The absolute tolerance for a zero singular value. Singular values\n smaller than `atol` are considered to be zero.\n rtol : float\n The relative tolerance. Singular values less than rtol*smax are\n considered to be zero, where smax is the largest singular value.\n\n If both `atol` and `rtol` are positive, the combined tolerance is the\n maximum of the two; that is::\n tol = max(atol, rtol * smax)\n Singular values smaller than `tol` are considered to be zero.\n\n Return value\n ------------\n ns : ndarray\n If `A` is an array with shape (m, k), then `ns` will be an array\n with shape (k, n), where n is the estimated dimension of the\n nullspace of `A`. The columns of `ns` are a basis for the\n nullspace; each element in numpy.dot(A, ns) will be approximately\n zero.\n \"\"\"\n\n A = np.atleast_2d(A)\n u, s, vh = np.lin.svd(A)\n tol = max(atol, rtol * s[0])\n nnz = (s >= tol).sum()\n ns = vh[nnz:].conj().T\n return ns\n\nA=np.random.randint(10,size=(3,3))\nB=np.random.randint(10,size=(3,))\n\nC=np.concatenate((A,np.transpose([B])),axis=1)\n\nprint('The linear system has the form of A*x=B, where A is:')\nprint(A)\nprint('B is:')\nprint(B)\n\n# if matrix A is singluar, we have two situations-- infinite many solutions\n# or no solution at all\n\nif np.linalg.matrix_rank(A)!=3 and np.linalg.matrix_rank(C) == np.linalg.matrix_rank(A):\n print('The matrix is singular and there are infinite number of solutions.')\n print('The homogeneous solution is:')\n print(nullspace(A))\n print('The particular solution is:')\n print(np.linalg.lstsq(A,B)[0])\n\nelif np.linalg.matrix_rank(A)!=3 and np.linalg.matrix_rank(C) != np.linalg.matrix_rank(A):\n print('Matrix A is singular and there is no proper solution.')\n\nelse:\n print('The solution x is:')\n x=np.linalg.solve(A,B)\n print(x)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":954687164481706400,"string":"954,687,164,481,706,400"},"line_mean":{"kind":"number","value":33.1111111111,"string":"33.111111"},"line_max":{"kind":"number","value":90,"string":"90"},"alpha_frac":{"kind":"number","value":0.6123778502,"string":"0.612378"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109461,"cells":{"repo_name":{"kind":"string","value":"Marcerisson/his-dudeness"},"path":{"kind":"string","value":"graphalama/screen.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4929"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport random\nimport pygame\nimport os\nimport graphalama\nfrom graphalama.borg import Borg\nfrom graphalama import fonts as f\nfrom graphalama.color import Color\nfrom graphalama.rectangle import Rectangle\nfrom graphalama.text import Text\nfrom graphalama.CONSTANTS import *\n\npath = str(os.path.dirname(graphalama.__file__)).replace('\\\\', '/') + '/assets/img/'\nwait_party_images = [pygame.image.load(path + 'wait_party/bunch_of_mms.png')] # we load the wait party image\nicon_image = pygame.image.load(path + 'logo.png') # we load the logo image\nborg_baby = Borg()\n\n\ndef new_display(name='Test with Graphalama', size=(0, 0), full_screen=False, icon_path='icon.png'):\n\n if not full_screen:\n display = pygame.display.set_mode(size, pygame.RESIZABLE | pygame.SRCALPHA) # window creation w/o fullscreen\n else:\n display = pygame.display.set_mode([0, 0], pygame.FULLSCREEN | pygame.SRCALPHA) # window creation w/ fullscreen\n pygame.display.set_caption(name + ' ' + str(Borg().version)) # the window title\n\n try:\n icon_image = pygame.image.load(icon_path) # we load the logo image\n pygame.display.set_icon(icon_image) # the logo (task bar)\n except pygame.error:\n print('Icon file not found or unreadable')\n\n Borg().SCREEN_SIZE = display.get_size()\n return display\n\n\ndef resize(display, inputs):\n \"\"\"\n\n :param display: The pygame display, created with new_display()\n :param inputs: An Inputs object\n :return:\n \"\"\"\n screen_size_before = borg_baby.SCREEN_SIZE\n\n if inputs['F12']['just pressed']:\n inputs['screen']['fullscreen'] = not inputs['screen']['fullscreen']\n\n if not inputs['screen']['fullscreen']:\n display = pygame.display.set_mode([800, 500], pygame.RESIZABLE | pygame.SRCALPHA)\n else:\n display = pygame.display.set_mode([0, 0], pygame.FULLSCREEN | pygame.SRCALPHA)\n\n elif inputs['screen']['size'] != borg_baby.SCREEN_SIZE:\n display = pygame.display.set_mode(inputs['screen']['size'], pygame.RESIZABLE | pygame.SRCALPHA)\n inputs['screen']['change'] = False\n\n borg_baby.SCREEN_SIZE = display.get_size()\n\n if screen_size_before != borg_baby.SCREEN_SIZE:\n inputs['screen']['size'] = borg_baby.SCREEN_SIZE\n\n\ndef wait_party(surface):\n\n max_w = surface.get_width()\n h = surface.get_height()\n\n def color_mode_function(color):\n color.do_rainbow(random.randint(20, 50))\n return color.rainbow\n color = Color(WHITE, my_rainbow=color_mode_function)\n color.mode = 'my_rainbow'\n bpoints = [0]\n hpoints = [0]\n allc = list()\n while bpoints[-1] < max_w or hpoints[-1] < max_w:\n bpoints.append(bpoints[-1] + random.randint(20, 100))\n hpoints.append(hpoints[-1] + random.randint(20, 100))\n allc.append(color.RGB)\n points = ((hpoints[-2], 0), (hpoints[-1], 0), (bpoints[-1], h), (bpoints[-2], h))\n pygame.draw.polygon(surface, allc[-1], points)\n\n for c, hx, bx in zip(allc, bpoints, hpoints):\n pygame.draw.aaline(surface, c, (bx, 0), (hx, h))\n\n pygame.display.update()\n\n\n# TODO : Faire retourner ça chez lui, dans Hubert. Et modifier le fonctionnement avec une Queue.\n# Pour plus d'info, envoyez QUEUE au 06 95 40 21 62 !\ndef error_screen(display, inputs, error, fatal_error=False):\n wait_party(display)\n log_rect = Rectangle(0.1, 0.25, 0.8, 0.5, WHITE, border=(RED, 1))\n text_1_error = 'An error occured :', 'en'\n text_2_error = 'Please press enter to continue', 'en'\n error_1_text = Text(text_1_error, (0.15, 0.28, 0.7, 0.05), f.Font(f.Calibri, 1, True), color=RED,\n anchor=('tc', 0, 0))\n error_2_text = Text(str(error), (0.15, 0.37, 0.7, 0.05),\n f.Font(f.Calibri, 1, True), anchor=('tc', 0, 0), color=RED)\n error_3_text = Text(text_2_error, (0.15, 0.49, 0.7, 0.05),\n f.Font(f.Calibri, 1, True), anchor=('tc', 0, 0), color=RED)\n valid_button = Text('Ok :/', (0.4, 0.62, 0.2, 0.1, L_BLUE, ROUNDED, 0.5), f.Font(f.Calibri, 0.3), RED,\n anchor=('cc', 0, 0))\n enter = False\n while not (enter or valid_button.mouse_click_area(1)): # tant qu'on pèse pas sur Entrée\n if inputs['enter']['is pressed']:\n enter = True\n log_rect.render()\n error_1_text.render()\n error_2_text.render()\n error_3_text.render()\n valid_button.render()\n display.blit(log_rect, log_rect.real_topleft)\n display.blit(error_1_text, error_1_text.real_topleft)\n display.blit(error_2_text, error_2_text.real_topleft)\n display.blit(error_3_text, error_3_text.real_topleft)\n display.blit(valid_button, valid_button.real_topleft)\n yield \"APOUAAAAAAAAAAAAAAAAAAAAAL nick fury ta maire de Strasbourg #rolandries\"\n if fatal_error:\n quit()\n\n__all__ = ['new_display', \"resize\", 'wait_party', 'error_scren']\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1188802242255722000,"string":"-1,188,802,242,255,722,000"},"line_mean":{"kind":"number","value":39.0487804878,"string":"39.04878"},"line_max":{"kind":"number","value":119,"string":"119"},"alpha_frac":{"kind":"number","value":0.6278928136,"string":"0.627893"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109462,"cells":{"repo_name":{"kind":"string","value":"micktwomey/gamecraft-mk-iii"},"path":{"kind":"string","value":"gamecraft/settings_local_development.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1218"},"content":{"kind":"string","value":"from gamecraft.settings import *\n\nimport os\n\nDJANGO_SECRET_KEY = \"foo\"\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.environ.get(\"GAMECRAFT_SQLITE_DB\", \"/tmp/gamecraft.sqlite\"),\n }\n}\n\nINSTALLED_APPS = INSTALLED_APPS + (\n 'debug_toolbar.apps.DebugToolbarConfig',\n)\n\nINTERNAL_IPS = ['127.0.0.1', 'localhost', '::1']\n\nDEBUG_TOOLBAR_CONFIG = {\n \"SHOW_TOOLBAR_CALLBACK\": \"gamecraft.utils.debug_toolbar_callback\",\n}\n\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'\n\nPIPELINE_COMPILERS = (\n 'pipeline.compilers.less.LessCompiler',\n)\n\nPIPELINE_CSS = {\n 'gamecraft': {\n 'source_filenames': (\n 'css/gamecraft.less',\n 'css/leaflet.css',\n ),\n 'output_filename': 'css/gamecraft.css',\n },\n}\n\nPIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'\n\nPIPELINE_JS = {\n 'gamecraft': {\n 'source_filenames': (\n 'js/holder.js',\n 'js/jquery.js',\n 'js/bootstrap.js',\n 'js/leaflet.js',\n # 'js/react-with-addons.js', # bug in processing the '\\uFEFF' in the file\n ),\n 'output_filename': 'js/gamecraft.js',\n },\n}\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-4015671678422743000,"string":"-4,015,671,678,422,743,000"},"line_mean":{"kind":"number","value":21.9811320755,"string":"21.981132"},"line_max":{"kind":"number","value":86,"string":"86"},"alpha_frac":{"kind":"number","value":0.5952380952,"string":"0.595238"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109463,"cells":{"repo_name":{"kind":"string","value":"mozilla/peekaboo"},"path":{"kind":"string","value":"peekaboo/settings/base.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3762"},"content":{"kind":"string","value":"# This is your project's main settings file that can be committed to your\n# repo. If you need to override a setting locally, use settings_local.py\n\nfrom funfactory.settings_base import * # NOQA\n\n# Name of the top-level module where you put all your apps.\n# If you did not install Playdoh with the funfactory installer script\n# you may need to edit this value. See the docs about installing from a\n# clone.\nPROJECT_MODULE = 'peekaboo'\n\nUSE_TZ = True\nTIME_ZONE = 'US/Pacific'\n\n# Defines the views served for root URLs.\nROOT_URLCONF = '%s.urls' % PROJECT_MODULE\n\nINSTALLED_APPS = (\n 'funfactory',\n 'compressor',\n 'django_browserid',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'commonware.response.cookies',\n 'session_csrf',\n\n # Application base, containing global templates.\n '%s.base' % PROJECT_MODULE,\n '%s.main' % PROJECT_MODULE,\n '%s.sheet' % PROJECT_MODULE,\n '%s.authentication' % PROJECT_MODULE,\n '%s.users' % PROJECT_MODULE,\n '%s.locations' % PROJECT_MODULE,\n 'sorl.thumbnail',\n 'bootstrapform',\n 'cronjobs',\n 'django.contrib.admin',\n 'raven.contrib.django.raven_compat',\n 'django_nose', # deliberately making this the last one\n)\n\n\nLOCALE_PATHS = (\n os.path.join(ROOT, PROJECT_MODULE, 'locale'),\n)\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\n# Because Jinja2 is the default template loader, add any non-Jinja templated\n# apps here:\nJINGO_EXCLUDE_APPS = [\n 'admin',\n 'bootstrapform',\n 'browserid',\n]\n\n# BrowserID configuration\nAUTHENTICATION_BACKENDS = [\n 'django_browserid.auth.BrowserIDBackend',\n 'django.contrib.auth.backends.ModelBackend',\n]\n\nSITE_URL = 'http://localhost:8000'\nLOGIN_URL = '/auth/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGIN_REDIRECT_URL_FAILURE = '/auth/login/'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.media',\n 'django.core.context_processors.request',\n 'session_csrf.context_processor',\n 'django.contrib.messages.context_processors.messages',\n 'funfactory.context_processors.globals',\n 'peekaboo.main.context_processors.main',\n)\n\n# Should robots.txt deny everything or disallow a calculated list of URLs we\n# don't want to be crawled? Default is false, disallow everything.\n# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710\nENGAGE_ROBOTS = False\n\n# Always generate a CSRF token for anonymous users.\nANON_ALWAYS = True\n\nMIDDLEWARE_CLASSES = (\n 'funfactory.middleware.LocaleURLMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'session_csrf.CsrfMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'commonware.middleware.FrameOptionsHeader',\n 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'\n)\n\n# We're never storing any passwords so this can be anything\nHMAC_KEYS = {'something': 'anything'}\nPASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)\n\nLOGGING = dict(loggers=dict(playdoh={'level': logging.DEBUG}))\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'\n\n# Whether the picture taking part of sign in process should be enabled\nDEFAULT_TAKE_PICTURE = True\n\nBROWSERID_REQUEST_ARGS = {'siteName': 'Peek-a-boo!'}\n\nRECYCLE_MINIMUM_HOURS = 30 # days\n\n# Set to True if you want to keep the components that are made to generate\n# PDFs when printing badges\nDEBUG_PDF_PROGRAM = False\n\n# Default in Django is 2 weeks (1209600 = 60 * 60 * 24 * 7 * 2)\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 365 # 1 year\n"},"license":{"kind":"string","value":"mpl-2.0"},"hash":{"kind":"number","value":6753622614826250000,"string":"6,753,622,614,826,250,000"},"line_mean":{"kind":"number","value":30.35,"string":"30.35"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.7235513025,"string":"0.723551"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109464,"cells":{"repo_name":{"kind":"string","value":"chadhs/idonethis-spectator"},"path":{"kind":"string","value":"spectator.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1732"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom flask import Flask\nfrom flask import render_template\nimport requests\nimport datetime\nimport config\n\n\n## configuration\ntoken = config.token\nteam = config.team\nuser = config.user\nidt_url = \"https://idonethis.com\"\napi_dones_url = \"%s/api/v0.1/dones/?owner=%s&team=%s&page_size=100\" % (idt_url, user, team)\n\n## the app\napp = Flask(__name__)\n\n\n### helpers\ndef get_json_data(url):\n \"\"\"\n fetch dones from the iDoneThis api, return list of dones from the json response\n \"\"\"\n headers = {'content-type': 'application/json', 'authorization': 'token %s' % (token)}\n r = requests.get(url, headers=headers)\n data = r.json()\n dones = data['results']\n return dones\n\ndef fix_rel_url(dones):\n \"\"\"\n replace relative urls in markedup_text with absolute urls\n \"\"\"\n for done in dones:\n done['markedup_text'] = done['markedup_text'].replace(\"/hashtags\",\"%s/hashtags\" % (idt_url))\n done['markedup_text'] = done['markedup_text'].replace(\"/cal\",\"%s/cal\" % (idt_url))\n return dones\n\n\n### urls\n@app.route(\"/\")\ndef display_dones():\n startdate = datetime.date.today() - datetime.timedelta(1)\n enddate = datetime.date.today() - datetime.timedelta(7)\n\n url_today = \"%s&done_date=today\" % (api_dones_url)\n dones_today = get_json_data(url_today)\n dones_today = fix_rel_url(dones_today)\n\n url_lastweek = \"%s&done_date_after=%s&done_date_before=%s\" % (api_dones_url, enddate, startdate)\n dones_lastweek = get_json_data(url_lastweek)\n dones_lastweek = fix_rel_url(dones_lastweek)\n\n return render_template('dones.html', team=team, user=user, dones_today=dones_today, dones_lastweek=dones_lastweek)\n\n\n## run app\nif __name__ == \"__main__\":\n app.run(debug=True)\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":8088948709020566000,"string":"8,088,948,709,020,566,000"},"line_mean":{"kind":"number","value":27.393442623,"string":"27.393443"},"line_max":{"kind":"number","value":118,"string":"118"},"alpha_frac":{"kind":"number","value":0.6587759815,"string":"0.658776"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109465,"cells":{"repo_name":{"kind":"string","value":"berjc/code-complete"},"path":{"kind":"string","value":"code_complete/code_snippet_providers/github_code_snippet_provider.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5232"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\"\"\" Encapsulates Functionality for Gathering Relevant Code Snippets from Github. \"\"\"\n\nimport httplib\nfrom lxml import html\nimport requests\n\nfrom abstract_code_snippet_provider import AbstractCodeSnippetProvider\nfrom utils.request_builder import RequestBuilder\n\n\nclass GithubCodeSnippetProvider(AbstractCodeSnippetProvider):\n \"\"\" Encapsulates Functionality for Gathering Relevant Code Snippets from Github. \"\"\"\n\n PATH_DELIM = '/'\n SPACE_DELIM = '+'\n\n # The number of search pages to iterate through on Github.\n NUM_PAGES_TO_CHECK = 2\n\n GITHUB_DOMAIN = 'github.com'\n GITHUB_SEARCH_PATH = '/search'\n\n RAW_GITHUB_USER_CONTENT_DOMAIN = 'raw.githubusercontent.com'\n BLOB_INDEX = 3\n\n # Github Search Request Parameters.\n GITHUB_LANGUAGE_KEY = 'l'\n GITHUB_QUERY_KEY = 'q'\n GITHUB_PAGE_KEY = 'p'\n GITHUB_TYPE_KEY = 'type'\n GITHUB_TYPE_VALUE = 'Code'\n\n # The xpath for parsing snippet URLs from the Github search results page.\n XPATH_SNIPPET_URLS = '//div[contains(@class, \"code-list-item\") and contains(@class, \"code-list-item-public\")]' \\\n '//p[@class=\"title\"]//a[@title]/@href'\n\n def __init__(self, task_description, language):\n \"\"\" Initializes the `GithubCodeSnippetProvider` object.\n\n :param task_description: A description of the task to complete.\n :type task_description: str\n :param language: The programming language the code snippets should be in.\n :type language: str\n \"\"\"\n AbstractCodeSnippetProvider.__init__(self, task_description, language)\n\n @staticmethod\n def _construct_raw_user_content_url_path(code_snippet_url):\n \"\"\" Returns the raw user content URL for the given code snippet URL.\n\n :return: The raw user content URL for the given code snippet URL.\n :rtype: str\n\n .. code-block:: python\n\n code_snippet_url = '/username/reponame/blob/hashvalue/path/to/file'\n\n # Returns ...\n\n '/username/reponame/hashvalue/path/to/file'\n \"\"\"\n parts_of_path = code_snippet_url.split(GithubCodeSnippetProvider.PATH_DELIM)\n return GithubCodeSnippetProvider.PATH_DELIM.join(\n parts_of_path[:GithubCodeSnippetProvider.BLOB_INDEX] +\n parts_of_path[GithubCodeSnippetProvider.BLOB_INDEX + 1:]\n )\n\n @staticmethod\n def _get_code_snippets_from_snippet_urls(code_snippet_urls):\n \"\"\" Returns the code snippets resident at the given snippet URls.\n\n :param code_snippet_urls: A list of the URLs of code snippets related to the given task description and\n language.\n :type code_snippet_urls: list\n\n :return: A list of the code snippets resident at the given snippet URLs.\n :rtype: list\n \"\"\"\n code_snippets = []\n for code_snippet_url in code_snippet_urls:\n raw_user_content_url_path = GithubCodeSnippetProvider._construct_raw_user_content_url_path(code_snippet_url)\n request_url = RequestBuilder(\n GithubCodeSnippetProvider.RAW_GITHUB_USER_CONTENT_DOMAIN,\n path=raw_user_content_url_path,\n ).build()\n page = requests.get(request_url)\n code_snippets.append(page.content)\n return code_snippets\n\n def _get_code_snippet_urls(self):\n \"\"\" Returns the URLs of all code snippets related to the given task description and language.\n\n :return: A list of the URLs of code snippets related to the given task description and language.\n :rtype: list\n \"\"\"\n code_snippet_urls = []\n for page_number in xrange(GithubCodeSnippetProvider.NUM_PAGES_TO_CHECK):\n request_url = RequestBuilder(\n GithubCodeSnippetProvider.GITHUB_DOMAIN,\n path=GithubCodeSnippetProvider.GITHUB_SEARCH_PATH,\n params={\n GithubCodeSnippetProvider.GITHUB_LANGUAGE_KEY: self._language,\n GithubCodeSnippetProvider.GITHUB_PAGE_KEY: page_number + 1,\n GithubCodeSnippetProvider.GITHUB_QUERY_KEY: GithubCodeSnippetProvider.SPACE_DELIM.join(\n self._task_description.split()\n ),\n GithubCodeSnippetProvider.GITHUB_TYPE_KEY: GithubCodeSnippetProvider.GITHUB_TYPE_VALUE,\n },\n ).build()\n page = requests.get(request_url)\n if page.status_code != httplib.OK:\n # This occurs if the page number exceeds the the number of pages for the available search results.\n break\n tree = html.fromstring(page.content)\n code_snippet_urls[0:0] = tree.xpath(GithubCodeSnippetProvider.XPATH_SNIPPET_URLS)\n return code_snippet_urls\n\n def get_code_snippets(self):\n \"\"\" Returns the code snippets related to the given task description and language.\n\n :return: A list of code snippets related to the given task description and language.\n :rtype: list\n \"\"\"\n code_snippet_urls = self._get_code_snippet_urls()\n self._code_snippets = GithubCodeSnippetProvider._get_code_snippets_from_snippet_urls(code_snippet_urls)\n return self._code_snippets\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2548377350738495000,"string":"2,548,377,350,738,495,000"},"line_mean":{"kind":"number","value":39.875,"string":"39.875"},"line_max":{"kind":"number","value":120,"string":"120"},"alpha_frac":{"kind":"number","value":0.6488914373,"string":"0.648891"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109466,"cells":{"repo_name":{"kind":"string","value":"pkhorrami4/make_chen_dataset"},"path":{"kind":"string","value":"code/detect_faces.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"6022"},"content":{"kind":"string","value":"import argparse\nimport os\nimport shutil\nimport sys\nfrom time import time\n\nimport numpy\nimport skimage.transform\n\nimport dlib\nfrom ffvideo import VideoStream\n\n\ndef detect_crop_all_faces(X):\n num_frames = X.shape[0]\n all_cropped_faces = numpy.zeros((num_frames, 3, 96, 96), dtype=numpy.uint8)\n all_landmarks = numpy.zeros((num_frames, 2*68), dtype=numpy.float32)\n fail_vec = numpy.zeros(num_frames, dtype=numpy.uint8)\n print all_cropped_faces.shape\n\n for i in range(num_frames):\n #for i in range(100):\n img = X[i, :, :, :]\n\n # Detect face / landmarks with dlib\n time_start = time()\n detect_flag, landmarks = detect_face_dlib(img)\n\n # If face detected:\n if detect_flag != 0:\n # Crop it (using landmarks) and convert to grayscale\n crop_frame, bb = crop_frame_using_landmarks(img, landmarks)\n crop_frame = skimage.transform.resize(crop_frame, (96, 96))\n crop_frame = numpy.uint8(crop_frame*255.0)\n # skimage.io.imsave('./img_%.4d.jpg' % i, crop_frame)\n\n # Re-adjust the landmarks\n landmarks = normalize_landmarks(landmarks, bb, 96)\n\n # Save cropped image\n all_cropped_faces[i, :, :, :] = crop_frame.transpose(2, 0, 1)\n all_landmarks[i, :] = landmarks\n fail_vec[i] = 0\n time_elapsed = time() - time_start\n print 'Processing frame (%d/%d) -- %.2f sec.' % (i, num_frames,\n time_elapsed)\n else:\n print 'Face missed in frame (%d/%d)' % (i, num_frames)\n fail_vec[i] = 1\n\n return all_cropped_faces, all_landmarks, fail_vec\n\n\ndef detect_face_dlib(frame):\n num_landmarks = 68\n predictor_path = '/var/research/Code/dlib-18.17/python_examples/' \\\n 'shape_predictor/shape_predictor_68_face_landmarks.dat'\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(predictor_path)\n\n det = detector(frame, 1)\n\n if det:\n det = det[0]\n detect_flag = 1\n landmarks = []\n shape = predictor(frame, det)\n for i in range(num_landmarks):\n part = shape.part(i)\n landmarks.append(part.x)\n landmarks.append(part.y)\n landmarks = numpy.array(landmarks, dtype='float32')\n else:\n detect_flag = 0\n landmarks = numpy.zeros((2*num_landmarks), dtype='float32')\n\n # print detect_flag, landmarks\n return detect_flag, landmarks\n\n\ndef normalize_landmarks(landmarks, face_bb, new_img_size):\n \"\"\" Function to readjust the detected facial landmarks when\n the face is cropped out of the frame.\"\"\"\n\n # Subtract upper left corner of face bounding box\n rep_face_bb = numpy.tile(face_bb[0:2], len(landmarks)/2)\n landmarks -= rep_face_bb\n\n # Scale x,y coordinates from face_w, face_h to be in 96x96 image\n scale_vec = numpy.tile([new_img_size/face_bb[2], new_img_size/face_bb[3]],\n len(landmarks)/2)\n landmarks *= scale_vec\n\n return landmarks\n\n\ndef crop_frame_using_landmarks(frame, landmarks):\n \"\"\" Function to crop the face using the detected facial\n landmarks (courtesy of dlib).\"\"\"\n\n landmarks = numpy.reshape(landmarks, (2, len(landmarks)/2), 'F')\n min_x = numpy.min(landmarks[0, :])\n min_y = numpy.min(landmarks[1, :])-30 # include more of the brow\n max_x = numpy.max(landmarks[0, :])\n max_y = numpy.max(landmarks[1, :])\n\n # print min_x, max_x\n # print min_y, max_y\n crop_frame = frame[min_y:max_y, min_x:max_x, :]\n bb = (min_x, min_y, max_x-min_x, max_y-min_y)\n return crop_frame, bb\n\n\ndef save_out_data(save_path, save_filename, data):\n \"\"\"Save data as .npy file to location given by save_path.\"\"\"\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n save_file_path = os.path.join(save_path, save_filename)\n numpy.save(save_file_path, data)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Detect and extract faces in '\n 'specified .npy and save it.', \n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--npy_file_path', dest='npy_file_path',\n default='/data/Expr_Recog/Chen_Huang_avdata_python/npy_files_raw/',\n help='Path to .npy file containing un-cropped faces.')\n parser.add_argument('--save_path', dest='save_path',\n default='./npy_cropped_faces/',\n help='Folder to save output .npy files.')\n parser.add_argument('--subj_id', dest='subj_id',\n help='Subject to extract cropped faces.')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n print 'Args: ', args\n\n time_start = time()\n\n npy_file_path = args.npy_file_path\n save_path = args.save_path\n subj_id = args.subj_id\n\n # Load data\n input_X_filename = 'X_'+subj_id+'.npy'\n X = numpy.load(os.path.join(npy_file_path, input_X_filename))\n\n # Detect and crop faces\n all_cropped_faces, all_landmarks, fail_vec = detect_crop_all_faces(X)\n\n # Save data to .npy files\n output_X_filename = 'X_'+subj_id+'.npy'\n output_landmark_filename = 'landmarks_'+subj_id+'.npy'\n output_fail_vec_filename = 'fail_vec_'+subj_id+'.npy'\n save_out_data(save_path, output_X_filename, all_cropped_faces)\n save_out_data(save_path, output_landmark_filename, all_landmarks)\n save_out_data(save_path, output_fail_vec_filename, fail_vec)\n\n # Copy label file\n y_src_file = os.path.join(npy_file_path, 'y_'+subj_id+'.npy')\n y_dest_file = os.path.join(save_path, 'y_'+subj_id+'.npy')\n shutil.copyfile(y_src_file, y_dest_file)\n\n time_elapsed = time() - time_start\n print 'Total Execution Time: %.2f sec.' % time_elapsed\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":1692513392116138500,"string":"1,692,513,392,116,138,500"},"line_mean":{"kind":"number","value":33.0225988701,"string":"33.022599"},"line_max":{"kind":"number","value":92,"string":"92"},"alpha_frac":{"kind":"number","value":0.5994686151,"string":"0.599469"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109467,"cells":{"repo_name":{"kind":"string","value":"praekelt/ummeli"},"path":{"kind":"string","value":"ummeli/base/migrations/0009_auto__add_skill__add_field_curriculumvitae_preferred_skill.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"12779"},"content":{"kind":"string","value":"# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n \n # Adding model 'Skill'\n db.create_table('base_skill', (\n ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('skill', self.gf('django.db.models.fields.CharField')(max_length=45)),\n ))\n db.send_create_signal('base', ['Skill'])\n\n # Adding field 'CurriculumVitae.preferred_skill'\n db.add_column('base_curriculumvitae', 'preferred_skill', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='profiles_preferred', null=True, to=orm['base.Skill']), keep_default=False)\n\n # Adding M2M table for field skills on 'CurriculumVitae'\n db.create_table('base_curriculumvitae_skills', (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('curriculumvitae', models.ForeignKey(orm['base.curriculumvitae'], null=False)),\n ('skill', models.ForeignKey(orm['base.skill'], null=False))\n ))\n db.create_unique('base_curriculumvitae_skills', ['curriculumvitae_id', 'skill_id'])\n\n\n def backwards(self, orm):\n \n # Deleting model 'Skill'\n db.delete_table('base_skill')\n\n # Deleting field 'CurriculumVitae.preferred_skill'\n db.delete_column('base_curriculumvitae', 'preferred_skill_id')\n\n # Removing M2M table for field skills on 'CurriculumVitae'\n db.delete_table('base_curriculumvitae_skills')\n\n\n models = {\n 'auth.group': {\n 'Meta': {'object_name': 'Group'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n 'auth.permission': {\n 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n 'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Group']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n 'base.article': {\n 'Meta': {'object_name': 'Article'},\n 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 29, 18, 14, 13, 829532)', 'blank': 'True'}),\n 'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'text': ('django.db.models.fields.TextField', [], {})\n },\n 'base.category': {\n 'Meta': {'object_name': 'Category'},\n 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': \"orm['base.Article']\", 'null': 'True', 'blank': 'True'}),\n 'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'province': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['base.Province']\"}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '45'}),\n 'user_submitted_job_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': \"orm['base.UserSubmittedJobArticle']\", 'null': 'True', 'blank': 'True'})\n },\n 'base.certificate': {\n 'Meta': {'object_name': 'Certificate'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),\n 'year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})\n },\n 'base.curriculumvitae': {\n 'Meta': {'object_name': 'CurriculumVitae'},\n 'certificates': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['base.Certificate']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'connection_requests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'connection_requests'\", 'blank': 'True', 'to': \"orm['auth.User']\"}),\n 'date_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'email': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'gender': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'highest_grade': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'highest_grade_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),\n 'house_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['base.Language']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'nr_of_faxes_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),\n 'preferred_skill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': \"'profiles_preferred'\", 'null': 'True', 'to': \"orm['base.Skill']\"}),\n 'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['base.Reference']\", 'symmetrical': 'False', 'blank': 'True'}),\n 'school': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'skills': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': \"'profiles'\", 'null': 'True', 'symmetrical': 'False', 'to': \"orm['base.Skill']\"}),\n 'street_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'surname': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': \"orm['auth.User']\", 'unique': 'True'}),\n 'work_experiences': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['base.WorkExperience']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n 'base.language': {\n 'Meta': {'object_name': 'Language'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'language': ('django.db.models.fields.CharField', [], {'max_length': '45'}),\n 'read_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})\n },\n 'base.province': {\n 'Meta': {'object_name': 'Province'},\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),\n 'search_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})\n },\n 'base.reference': {\n 'Meta': {'object_name': 'Reference'},\n 'contact_no': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'fullname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'relationship': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'})\n },\n 'base.skill': {\n 'Meta': {'object_name': 'Skill'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'skill': ('django.db.models.fields.CharField', [], {'max_length': '45'})\n },\n 'base.usersubmittedjobarticle': {\n 'Meta': {'object_name': 'UserSubmittedJobArticle'},\n 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'text': ('django.db.models.fields.TextField', [], {'default': \"''\"}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'user_submitted_job_article_user'\", 'to': \"orm['auth.User']\"})\n },\n 'base.workexperience': {\n 'Meta': {'object_name': 'WorkExperience'},\n 'company': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),\n 'end_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'start_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '45'})\n },\n 'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n }\n }\n\n complete_apps = ['base']\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":3870404524602337300,"string":"3,870,404,524,602,337,300"},"line_mean":{"kind":"number","value":73.2965116279,"string":"73.296512"},"line_max":{"kind":"number","value":219,"string":"219"},"alpha_frac":{"kind":"number","value":0.555677283,"string":"0.555677"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109468,"cells":{"repo_name":{"kind":"string","value":"gnarph/DIRT"},"path":{"kind":"string","value":"utilities/path.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1652"},"content":{"kind":"string","value":"import os\nimport shutil\n\n\ndef iter_files_in(directory):\n \"\"\"\n Iterate over all filenames in a directory\n Does not descend into sub-directories\n :param directory: directory to look for\n :return: generator\n \"\"\"\n for item_name in os.listdir(directory):\n\n full_name = os.path.join(directory, item_name)\n print full_name, directory\n if should_use_file(full_name):\n yield full_name\n\n\ndef should_use_file(name):\n \"\"\"\n Should DIRT use the file?\n :param name: name of file\n :return: boolean\n \"\"\"\n if is_hidden_file(name):\n return False\n return os.path.isfile(name)\n\n\ndef is_hidden_file(full_name):\n \"\"\"\n Is a file hidden?\n :param full_name: name of file\n :return: True or False\n \"\"\"\n name = get_name(full_name)\n return name[0] == '.'\n\n\ndef get_name(filename, extension=True):\n \"\"\"\n Get name of a file without the path portion\n :param filename: path of file\n :param extension: include the extension?\n :return: name of file without path\n \"\"\"\n fn = filename if extension else os.path.splitext(filename)[0]\n return os.path.split(fn)[1]\n\n\ndef delete_folder(name):\n \"\"\"\n Deletes folder, if folder does not exist, fails silently\n :param name: name/path of folder to delete\n \"\"\"\n try:\n shutil.rmtree(name)\n except OSError:\n pass\n\n\ndef create_folder(name):\n \"\"\"\n Creates a folder\n :param name: folder name/path\n \"\"\"\n os.makedirs(name)\n\n\ndef reset_folder(name):\n \"\"\"\n Cleanout a folder\n :param name: folder to clean out\n :return:\n \"\"\"\n delete_folder(name)\n create_folder(name)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":8095127568173375000,"string":"8,095,127,568,173,375,000"},"line_mean":{"kind":"number","value":20.1794871795,"string":"20.179487"},"line_max":{"kind":"number","value":65,"string":"65"},"alpha_frac":{"kind":"number","value":0.6216707022,"string":"0.621671"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109469,"cells":{"repo_name":{"kind":"string","value":"valdergallo/mock_django_orm"},"path":{"kind":"string","value":"app/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1142"},"content":{"kind":"string","value":"from django.db import models\nfrom app.managers import AppSecondManager\n\n\nclass AppOne(models.Model):\n name = models.CharField(max_length=50)\n description = models.TextField()\n\n def get_full_description(self):\n return u'%s / %s' % (self.name, self.description)\n\n def __unicode__(self):\n return self.name\n\n\nclass AppSecond(models.Model):\n name = models.CharField(max_length=50)\n description = models.TextField()\n\n objects = AppSecondManager()\n\n def __unicode__(self):\n return self.name\n\n\nclass AppThird(models.Model):\n name = models.CharField(max_length=50)\n app_one = models.ForeignKey(AppOne)\n app_second = models.ForeignKey(AppSecond)\n\n def get_extra(self):\n if self.app_one.name == '1':\n return u'%s-%s' % (self.app_one.name, self.name)\n elif self.app_second.name == '1':\n return u'%s-%s' % (self.app_second.name, self.name)\n elif self.app_one.name == '2' and self.app_second.name == '2':\n return u'%s%s-%s' % (self.app_one.name,\n self.app_second.name,\n self.name)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-6667769444941581000,"string":"-6,667,769,444,941,581,000"},"line_mean":{"kind":"number","value":28.2820512821,"string":"28.282051"},"line_max":{"kind":"number","value":70,"string":"70"},"alpha_frac":{"kind":"number","value":0.5954465849,"string":"0.595447"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109470,"cells":{"repo_name":{"kind":"string","value":"amsqr/k-Met"},"path":{"kind":"string","value":"kMet.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5656"},"content":{"kind":"string","value":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\nimport os,sys,glob\nimport re\nfrom decimal import *\nimport re, collections\nfrom phonetic_algorithms import PhoneticAlgorithms\nimport difflib\n#from zlib import compress\n\n \n# kMet Phonetic Clustering Algorithm\n# Copyright (C) 2012 Alejandro Mosquera \n \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3 of the License.\n \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\nprint \"kMet 0.1\"\nprint \"Support: amsqr2@gmail.com\\n\\n\"\n\nclass kMet(object):\n\n\t\"\"\" kMet Phonetic Clustering Algorithm \"\"\"\n\t\n\tdef __init__(self):\n self.groups={}\n self.phon_dict={}\n\n \n def save_phon_dict(self,words,dict_name):\n f = open(words,'w')\n for k in dict_name:\n f.write(str(k) + '|||' + str(dict_name[k]) + '\\n')\n f.close()\n\n \n \n def text2phon(self,word,pword):\n \n phon = PhoneticAlgorithms().double_metaphone(pword)\n phon = str(phon).split(',')\n phon1= str(phon[0])[1:]\n phon2= str(phon[1])[1:-1]\n #print phon1,phon2\n if phon1 in self.phon_dict:\n lista=self.phon_dict[phon1]\n lista=lista.split('###')\n found=0\n if word not in lista:\n self.phon_dict[phon1]=self.phon_dict[phon1] + word +'###' \n else:\n \n self.phon_dict[phon1]=word+'###'\n\n if phon2 in self.phon_dict:\n lista=self.phon_dict[phon2]\n lista=lista.split('###')\n found=0\n if word not in lista:\n self.phon_dict[phon2]=self.phon_dict[phon2] + word +'###'\n else:\n if phon2!='None': \n self.phon_dict[phon2]=word+'###'\n \n \n \n\n ##def zip_sim(stringA, stringB):\n ##\ta = len(compress(stringA))\n ##\tb = len(compress(stringB))\n ##\tc = len(compress(stringA + stringB))\n ##\treturn 1.0 -(0.0 +a +b -c )/max (a ,b )\n\n\n def replace_numbers(self,foo):\n vocals=['a','e','i','o','u']\n if (foo.isdigit()==False):\n foo2=foo\n foo=foo.replace('0','o')\n foo=foo.replace('3','e')\n foo=foo.replace('5','s')\n foo=foo.replace('6','g')\n foo=foo.replace('7','t')\n foo=foo.replace('9','g')\n foo=foo.replace('8','eight')\n foo=foo.replace('4','for')\n foo=foo.replace('2','to')\n foo=foo.replace('1','one')\n return foo\n\n\n\n def cluster(self,met, words):\n \n clustered = False\n for key in self.groups:\n # Check for similarity\n seq=difflib.SequenceMatcher(a=key,b=met)\n dis=seq.ratio()\n if dis>0.80:\n if words!='' and words not in self.groups[key]:\n self.groups[key].append(words)\n clustered = True\n break\n \n if not clustered:\n if not self.groups.has_key(met):\n self.groups[met] = []\n \n self.groups[met].append(words)\n\n\n def process_text(self,text):\n punt_list=['.',',','!','?',';',':']\n s=list(text)\n texto=''.join([ o for o in s if not o in punt_list ]).split()\n\n for word in texto:\n if word in punt_list or word.find('http:')>-1 or word.find('www.')>-1:\n pass\n else:\n pattern = re.compile('[\\W_]+')\n word=pattern.sub('',word)\n if word!='':\n self.text2phon(word,self.replace_numbers(word))\n \n for met in self.phon_dict:\n #print met\n lista = self.phon_dict[met].split('###')\n for l in lista:\n self.cluster(met,l)\n \n \n\ndef main():\n kcluster=kMet()\n kcluster.process_text('praises prices precious, process, presses, precise, purses, growing, grunge, grunge, carrying, crying, caring, carnage, crank, grinch, chronic, crank, to, the, do, they, day, needed, nudity, noted, knitted, knotted, that, thought, they, those, this, thought, without')\n #print kcluster.groups\n for g in kcluster.groups:\n print str(g).replace(\"'\",'') + '###' + str(kcluster.groups[g]).replace(\"['\",'').replace(\"']\",'').replace(\"', '\",'|||')\n \n \n\n\nif __name__ == '__main__':\n\n\tmain()\n\t\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":3341968527875160600,"string":"3,341,968,527,875,160,600"},"line_mean":{"kind":"number","value":34.7974683544,"string":"34.797468"},"line_max":{"kind":"number","value":299,"string":"299"},"alpha_frac":{"kind":"number","value":0.4556223479,"string":"0.455622"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109471,"cells":{"repo_name":{"kind":"string","value":"BD2KGenomics/slugflow"},"path":{"kind":"string","value":"src/toil/jobStores/utils.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"12719"},"content":{"kind":"string","value":"import errno\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\n\nfrom toil.lib.threading import ExceptionalThread\n\nlog = logging.getLogger(__name__)\n\nclass WritablePipe(ABC):\n \"\"\"\n An object-oriented wrapper for os.pipe. Clients should subclass it, implement\n :meth:`.readFrom` to consume the readable end of the pipe, then instantiate the class as a\n context manager to get the writable end. See the example below.\n\n >>> import sys, shutil\n >>> class MyPipe(WritablePipe):\n ... def readFrom(self, readable):\n ... shutil.copyfileobj(codecs.getreader('utf-8')(readable), sys.stdout)\n >>> with MyPipe() as writable:\n ... _ = writable.write('Hello, world!\\\\n'.encode('utf-8'))\n Hello, world!\n\n Each instance of this class creates a thread and invokes the readFrom method in that thread.\n The thread will be join()ed upon normal exit from the context manager, i.e. the body of the\n `with` statement. If an exception occurs, the thread will not be joined but a well-behaved\n :meth:`.readFrom` implementation will terminate shortly thereafter due to the pipe having\n been closed.\n\n Now, exceptions in the reader thread will be reraised in the main thread:\n\n >>> class MyPipe(WritablePipe):\n ... def readFrom(self, readable):\n ... raise RuntimeError('Hello, world!')\n >>> with MyPipe() as writable:\n ... pass\n Traceback (most recent call last):\n ...\n RuntimeError: Hello, world!\n\n More complicated, less illustrative tests:\n\n Same as above, but proving that handles are closed:\n\n >>> x = os.dup(0); os.close(x)\n >>> class MyPipe(WritablePipe):\n ... def readFrom(self, readable):\n ... raise RuntimeError('Hello, world!')\n >>> with MyPipe() as writable:\n ... pass\n Traceback (most recent call last):\n ...\n RuntimeError: Hello, world!\n >>> y = os.dup(0); os.close(y); x == y\n True\n\n Exceptions in the body of the with statement aren't masked, and handles are closed:\n\n >>> x = os.dup(0); os.close(x)\n >>> class MyPipe(WritablePipe):\n ... def readFrom(self, readable):\n ... pass\n >>> with MyPipe() as writable:\n ... raise RuntimeError('Hello, world!')\n Traceback (most recent call last):\n ...\n RuntimeError: Hello, world!\n >>> y = os.dup(0); os.close(y); x == y\n True\n \"\"\"\n\n @abstractmethod\n def readFrom(self, readable):\n \"\"\"\n Implement this method to read data from the pipe. This method should support both\n binary and text mode output.\n\n :param file readable: the file object representing the readable end of the pipe. Do not\n explicitly invoke the close() method of the object, that will be done automatically.\n \"\"\"\n raise NotImplementedError()\n\n def _reader(self):\n with os.fdopen(self.readable_fh, 'rb') as readable:\n # TODO: If the reader somehow crashes here, both threads might try\n # to close readable_fh. Fortunately we don't do anything that\n # should be able to fail here.\n self.readable_fh = None # signal to parent thread that we've taken over\n self.readFrom(readable)\n self.reader_done = True\n\n def __init__(self, encoding=None, errors=None):\n \"\"\"\n The specified encoding and errors apply to the writable end of the pipe.\n\n :param str encoding: the name of the encoding used to encode the file. Encodings are the same\n as for encode(). Defaults to None which represents binary mode.\n\n :param str errors: an optional string that specifies how encoding errors are to be handled. Errors\n are the same as for open(). Defaults to 'strict' when an encoding is specified.\n \"\"\"\n super(WritablePipe, self).__init__()\n self.encoding = encoding\n self.errors = errors\n self.readable_fh = None\n self.writable = None\n self.thread = None\n self.reader_done = False\n\n def __enter__(self):\n self.readable_fh, writable_fh = os.pipe()\n self.writable = os.fdopen(writable_fh, 'wb' if self.encoding == None else 'wt', encoding=self.encoding, errors=self.errors)\n self.thread = ExceptionalThread(target=self._reader)\n self.thread.start()\n return self.writable\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # Closeing the writable end will send EOF to the readable and cause the reader thread\n # to finish.\n # TODO: Can close() fail? If so, whould we try and clean up after the reader?\n self.writable.close()\n try:\n if self.thread is not None:\n # reraises any exception that was raised in the thread\n self.thread.join()\n except Exception as e:\n if exc_type is None:\n # Only raise the child exception if there wasn't\n # already an exception in the main thread\n raise\n else:\n log.error('Swallowing additional exception in reader thread: %s', str(e))\n finally:\n # The responsibility for closing the readable end is generally that of the reader\n # thread. To cover the small window before the reader takes over we also close it here.\n readable_fh = self.readable_fh\n if readable_fh is not None:\n # Close the file handle. The reader thread must be dead now.\n os.close(readable_fh)\n\n\nclass ReadablePipe(ABC):\n \"\"\"\n An object-oriented wrapper for os.pipe. Clients should subclass it, implement\n :meth:`.writeTo` to place data into the writable end of the pipe, then instantiate the class\n as a context manager to get the writable end. See the example below.\n\n >>> import sys, shutil\n >>> class MyPipe(ReadablePipe):\n ... def writeTo(self, writable):\n ... writable.write('Hello, world!\\\\n'.encode('utf-8'))\n >>> with MyPipe() as readable:\n ... shutil.copyfileobj(codecs.getreader('utf-8')(readable), sys.stdout)\n Hello, world!\n\n Each instance of this class creates a thread and invokes the :meth:`.writeTo` method in that\n thread. The thread will be join()ed upon normal exit from the context manager, i.e. the body\n of the `with` statement. If an exception occurs, the thread will not be joined but a\n well-behaved :meth:`.writeTo` implementation will terminate shortly thereafter due to the\n pipe having been closed.\n\n Now, exceptions in the reader thread will be reraised in the main thread:\n\n >>> class MyPipe(ReadablePipe):\n ... def writeTo(self, writable):\n ... raise RuntimeError('Hello, world!')\n >>> with MyPipe() as readable:\n ... pass\n Traceback (most recent call last):\n ...\n RuntimeError: Hello, world!\n\n More complicated, less illustrative tests:\n\n Same as above, but proving that handles are closed:\n\n >>> x = os.dup(0); os.close(x)\n >>> class MyPipe(ReadablePipe):\n ... def writeTo(self, writable):\n ... raise RuntimeError('Hello, world!')\n >>> with MyPipe() as readable:\n ... pass\n Traceback (most recent call last):\n ...\n RuntimeError: Hello, world!\n >>> y = os.dup(0); os.close(y); x == y\n True\n\n Exceptions in the body of the with statement aren't masked, and handles are closed:\n\n >>> x = os.dup(0); os.close(x)\n >>> class MyPipe(ReadablePipe):\n ... def writeTo(self, writable):\n ... pass\n >>> with MyPipe() as readable:\n ... raise RuntimeError('Hello, world!')\n Traceback (most recent call last):\n ...\n RuntimeError: Hello, world!\n >>> y = os.dup(0); os.close(y); x == y\n True\n \"\"\"\n\n @abstractmethod\n def writeTo(self, writable):\n \"\"\"\n Implement this method to write data from the pipe. This method should support both\n binary and text mode input.\n\n :param file writable: the file object representing the writable end of the pipe. Do not\n explicitly invoke the close() method of the object, that will be done automatically.\n \"\"\"\n raise NotImplementedError()\n\n def _writer(self):\n try:\n with os.fdopen(self.writable_fh, 'wb') as writable:\n self.writeTo(writable)\n except IOError as e:\n # The other side of the pipe may have been closed by the\n # reading thread, which is OK.\n if e.errno != errno.EPIPE:\n raise\n\n def __init__(self, encoding=None, errors=None):\n \"\"\"\n The specified encoding and errors apply to the readable end of the pipe.\n\n :param str encoding: the name of the encoding used to encode the file. Encodings are the same\n as for encode(). Defaults to None which represents binary mode.\n\n :param str errors: an optional string that specifies how encoding errors are to be handled. Errors\n are the same as for open(). Defaults to 'strict' when an encoding is specified.\n \"\"\"\n super(ReadablePipe, self).__init__()\n self.encoding = encoding\n self.errors = errors\n self.writable_fh = None\n self.readable = None\n self.thread = None\n\n def __enter__(self):\n readable_fh, self.writable_fh = os.pipe()\n self.readable = os.fdopen(readable_fh, 'rb' if self.encoding == None else 'rt', encoding=self.encoding, errors=self.errors)\n self.thread = ExceptionalThread(target=self._writer)\n self.thread.start()\n return self.readable\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # Close the read end of the pipe. The writing thread may\n # still be writing to the other end, but this will wake it up\n # if that's the case.\n self.readable.close()\n try:\n if self.thread is not None:\n # reraises any exception that was raised in the thread\n self.thread.join()\n except:\n if exc_type is None:\n # Only raise the child exception if there wasn't\n # already an exception in the main thread\n raise\n\nclass ReadableTransformingPipe(ReadablePipe):\n \"\"\"\n A pipe which is constructed around a readable stream, and which provides a\n context manager that gives a readable stream.\n\n Useful as a base class for pipes which have to transform or otherwise visit\n bytes that flow through them, instead of just consuming or producing data.\n\n Clients should subclass it and implement :meth:`.transform`, like so:\n\n >>> import sys, shutil\n >>> class MyPipe(ReadableTransformingPipe):\n ... def transform(self, readable, writable):\n ... writable.write(readable.read().decode('utf-8').upper().encode('utf-8'))\n >>> class SourcePipe(ReadablePipe):\n ... def writeTo(self, writable):\n ... writable.write('Hello, world!\\\\n'.encode('utf-8'))\n >>> with SourcePipe() as source:\n ... with MyPipe(source) as transformed:\n ... shutil.copyfileobj(codecs.getreader('utf-8')(transformed), sys.stdout)\n HELLO, WORLD!\n\n The :meth:`.transform` method runs in its own thread, and should move data\n chunk by chunk instead of all at once. It should finish normally if it\n encounters either an EOF on the readable, or a :class:`BrokenPipeError` on\n the writable. This means tat it should make sure to actually catch a\n :class:`BrokenPipeError` when writing.\n\n See also: :class:`toil.lib.misc.WriteWatchingStream`.\n\n \"\"\"\n\n \n def __init__(self, source, encoding=None, errors=None):\n \"\"\"\n :param str encoding: the name of the encoding used to encode the file. Encodings are the same\n as for encode(). Defaults to None which represents binary mode.\n\n :param str errors: an optional string that specifies how encoding errors are to be handled. Errors\n are the same as for open(). Defaults to 'strict' when an encoding is specified.\n \"\"\"\n super(ReadableTransformingPipe, self).__init__(encoding=encoding, errors=errors)\n self.source = source\n\n @abstractmethod\n def transform(self, readable, writable):\n \"\"\"\n Implement this method to ship data through the pipe.\n\n :param file readable: the input stream file object to transform.\n\n :param file writable: the file object representing the writable end of the pipe. Do not\n explicitly invoke the close() method of the object, that will be done automatically.\n \"\"\"\n raise NotImplementedError()\n\n def writeTo(self, writable):\n self.transform(self.source, writable)\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":4686543333265231000,"string":"4,686,543,333,265,231,000"},"line_mean":{"kind":"number","value":38.6230529595,"string":"38.623053"},"line_max":{"kind":"number","value":131,"string":"131"},"alpha_frac":{"kind":"number","value":0.6297664911,"string":"0.629766"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109472,"cells":{"repo_name":{"kind":"string","value":"gary-pickens/HouseMonitor"},"path":{"kind":"string","value":"housemonitor/inputs/zigbeeinput/xbeeinputthread.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1775"},"content":{"kind":"string","value":"'''\nCreated on Oct 10, 2012\n\n@author: Gary\n'''\nimport threading\nimport os\n\nfrom housemonitor.inputs.zigbeeinput.beagleboneblackxbeecommunications import BeagleboneBlackXbeeCommunications\nfrom windowsxbeecommunications import WindowsXbeeCommunications\nfrom housemonitor.inputs.dataenvelope import DataEnvelope\nfrom housemonitor.lib.constants import Constants\nfrom housemonitor.lib.base import Base\n\n\nclass UnsupportedSystemError( Exception ):\n pass\n\n\nclass XBeeInputThread( Base, threading.Thread ):\n '''\n classdocs\n '''\n\n input_queue = None\n zigbee = None\n exit_flag = False\n\n communication_module = {'posix': BeagleboneBlackXbeeCommunications,\n 'nt': WindowsXbeeCommunications}\n\n @property\n def logger_name( self ):\n return Constants.LogKeys.inputsZigBee\n\n def __init__( self, queue ):\n '''\n Constructor\n args:\n queue is the InputQueue\n\n '''\n super( XBeeInputThread, self ).__init__()\n threading.Thread.__init__( self )\n self.input_queue = queue\n\n def startCorrectZigbee( self, os_name=os.name ):\n if ( os_name in self.communication_module ):\n self.logger.debug( 'connect to zigbee on {}'.format( os_name ) )\n self.zigbee = self.communication_module[os_name]()\n else:\n raise UnsupportedSystemError( \"System {} not supported\".format( os_name ) )\n\n def run( self ):\n self.startCorrectZigbee()\n self.zigbee.connect()\n while True:\n packet = self.zigbee.read()\n env = DataEnvelope( Constants.EnvelopeTypes.XBEE, **packet )\n self.input_queue.transmit( env, self.input_queue.MID_PRIORITY )\n if ( self.exit_flag ):\n break\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":8195371330058601000,"string":"8,195,371,330,058,601,000"},"line_mean":{"kind":"number","value":27.6290322581,"string":"27.629032"},"line_max":{"kind":"number","value":111,"string":"111"},"alpha_frac":{"kind":"number","value":0.6428169014,"string":"0.642817"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109473,"cells":{"repo_name":{"kind":"string","value":"bobslee/orbeon-xml-api"},"path":{"kind":"string","value":"orbeon_xml_api/tests/controls/test_currency.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2103"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Copyright 2017-2018 Bob Leers (http://www.novacode.nl)\n# See LICENSE file for full licensing details.\n\nfrom . import CommonTestCase\n\nfrom ..controls import DecimalControl\n\n\nclass CurrencyTestCase(CommonTestCase):\n\n def setUp(self):\n super(CurrencyTestCase, self).setUp()\n self.control = self.builder.controls['currency']\n\n def test_control(self):\n self.assertIsInstance(self.control, DecimalControl)\n\n def test_builder_bind(self):\n self.assertEqual(self.control._bind.id, 'currency-bind')\n self.assertEqual(self.control._bind.name, 'currency')\n\n def test_builder_parent(self):\n self.assertEqual(self.control._parent._bind.id, 'typed-controls-bind')\n self.assertEqual(self.control._parent._bind.name, 'typed-controls')\n self.assertEqual(self.control._parent._resource_element.label, 'Typed Controls')\n\n def test_builder_form(self):\n self.assertEqual(self.control.label, 'Currency')\n self.assertEqual(self.control.hint, 'Currency field')\n self.assertEqual(self.control.alert, None)\n\n self.assertEqual(self.control._resource_element.label, 'Currency')\n self.assertEqual(self.control._resource_element.hint, 'Currency field')\n\n # Doesn't exist, but shouldn't raise Exception\n self.assertEqual(self.control._resource_element.alert, None)\n\n def test_builder_form_default_value(self):\n self.assertEqual(self.control.default_raw_value, '10.99')\n self.assertEqual(self.control.default_value, 10.99)\n self.assertIsInstance(self.control.default_value, float)\n\n def test_runner_value(self):\n self.assertEqual(self.runner.get_value('currency'), 101.33)\n self.assertIsInstance(self.runner.get_value('currency'), float)\n\n def test_runner_form(self):\n self.assertEqual(self.runner.form.currency.label, 'Currency')\n self.assertEqual(self.runner.form.currency.value, 101.33)\n self.assertEqual(self.runner.form.currency.raw_value, '101.33')\n self.assertIsInstance(self.runner.form.currency.value, float)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":402809001055917440,"string":"402,809,001,055,917,440"},"line_mean":{"kind":"number","value":39.4423076923,"string":"39.442308"},"line_max":{"kind":"number","value":88,"string":"88"},"alpha_frac":{"kind":"number","value":0.7004279601,"string":"0.700428"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109474,"cells":{"repo_name":{"kind":"string","value":"kaspermunch/CoalhmmPipeline"},"path":{"kind":"string","value":"CoalhmmPipeline/MafJunkContentQualityFilter.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1480"},"content":{"kind":"string","value":"class MafJunkContentQualityFilter:\n def __init__(self, acceptableNpercentage, junkCharacters):\n\n self.acceptableNpercentage = acceptableNpercentage\n self.junkchars = junkCharacters\n \n def accept(self, maf):\n junkchars = self.junkchars\n for i in range(maf.count()): \n ns = 0\n total = 0\n for j in maf.data(i):\n if j in junkchars:\n ns += 1\n total += 1\n \n if ns > self.acceptableNpercentage*total:\n return False\n \n return True\n \n \nclass MafIngroupJunkContentQualityFilter:\n def __init__(self, ingroup, acceptableNpercentage, junkCharacters):\n\n self.acceptableNpercentage = acceptableNpercentage\n self.junkchars = junkCharacters\n self.ingroup = ingroup\n \n def accept(self, maf):\n junkchars = self.junkchars\n for i in range(maf.count()): \n if maf.name(i) not in self.ingroup:\n continue\n ns = 0\n total = 0 \n for j in maf.data(i):\n if j in junkchars:\n ns += 1\n total += 1\n \n if ns > self.acceptableNpercentage*total:\n return False\n \n return True\n \n \n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":858707482881620200,"string":"858,707,482,881,620,200"},"line_mean":{"kind":"number","value":30.4893617021,"string":"30.489362"},"line_max":{"kind":"number","value":71,"string":"71"},"alpha_frac":{"kind":"number","value":0.4621621622,"string":"0.462162"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109475,"cells":{"repo_name":{"kind":"string","value":"clld/lexibank"},"path":{"kind":"string","value":"lexibank/scripts/util.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5246"},"content":{"kind":"string","value":"from __future__ import unicode_literals\nfrom itertools import groupby\n\nimport transaction\nfrom six import text_type\nfrom clld.db.meta import DBSession\nfrom clld.db.models.common import ValueSet\nfrom clld.scripts.util import Data\nfrom clld.lib.bibtex import EntryType, FIELDS\nfrom clldutils.dsv import reader\nfrom pycldf.dataset import Dataset\nfrom pycldf.util import MD_SUFFIX\nfrom tqdm import tqdm\n\nfrom lexibank.models import (\n LexibankLanguage, Concept, Counterpart, Provider, CounterpartReference,\n LexibankSource, Cognateset, CognatesetCounterpart,\n)\n\n\ndef unique_id(contrib, local_id):\n return '%s-%s' % (contrib.id, local_id)\n\n\ndef cldf2clld(source, contrib, id_):\n name = source.id\n if source.get('author'):\n name = source['author']\n if source.get('year'):\n name += ' %s' % source['year']\n description = source.get('title')\n return LexibankSource(\n id=unique_id(contrib, id_),\n provider=contrib,\n bibtex_type=getattr(EntryType, source.genre, EntryType.misc),\n name=name,\n description=description,\n **{k: v for k, v in source.items() if k in FIELDS and k not in ['institution']})\n\n\ndef import_dataset(ds, contrib, languoids, conceptsets, sources, values):\n data = Data()\n concepts = {p.id: p for p in DBSession.query(Concept)}\n langs = {l.id: l for l in DBSession.query(LexibankLanguage)}\n\n for i, row in enumerate(ds.rows):\n if not row['Value'] or not row['Parameter_ID'] or not row['Language_ID']:\n continue\n\n lid = row['Language_ID'].lower()\n if lid == 'none':\n continue\n\n if not row['Parameter_ID'].strip():\n continue\n\n language = langs.get(lid)\n if language is None:\n languoid = languoids.get(lid)\n if not languoid:\n continue\n langs[lid] = language = LexibankLanguage(\n id=lid,\n name=languoid.name,\n level=text_type(languoid.level.name),\n latitude=languoid.latitude if languoid.id != 'plau1238' else -10,\n longitude=languoid.longitude)\n\n concept = concepts.get(row['Parameter_ID'])\n if concept is None:\n cs = conceptsets[row['Parameter_ID']]\n concepts[row['Parameter_ID']] = concept = Concept(\n id=row['Parameter_ID'],\n name=cs.gloss,\n description=cs.definition,\n semanticfield=cs.semanticfield)\n\n vsid = unique_id(contrib, '%s-%s-%s' % (ds.name, language.id, concept.id))\n vid = unique_id(contrib, row['ID'])\n\n vs = data['ValueSet'].get(vsid)\n if vs is None:\n vs = data.add(\n ValueSet, vsid,\n id=vsid,\n parameter=concept,\n language=language,\n contribution=contrib,\n source=None) # FIXME: add sources!\n\n counterpart = values.add(\n Counterpart, row['ID'],\n id=vid,\n valueset=vs,\n name=row['Form'],\n description=row.get('Comment'),\n context=row['Value'],\n variety_name=row.get('Language_name'),\n loan=row.get('Loan', False),\n )\n\n for ref in row.refs:\n CounterpartReference(\n counterpart=counterpart,\n source=sources[ref.source.id],\n description=ref.description)\n\n\ndef import_cldf(srcdir, md, languoids, conceptsets):\n with transaction.manager:\n contrib = Provider(\n id=srcdir.name,\n name=md['dc:title'],\n description=md.get('dc:bibliographicCitation'),\n url=md.get('dc:identifier'),\n license=md.get('dc:license'),\n aboutUrl=md.get('aboutUrl'),\n )\n DBSession.add(contrib)\n sources = {}\n cldfdir = srcdir.joinpath('cldf')\n values = Data()\n for fname in tqdm(list(cldfdir.glob('*' + MD_SUFFIX)), leave=False):\n ds = Dataset.from_metadata(fname)\n for src in ds.sources.items():\n if src.id not in sources:\n sources[src.id] = cldf2clld(src, contrib, len(sources) + 1)\n import_dataset(ds, contrib, languoids, conceptsets, sources, values)\n DBSession.flush()\n # import cognates:\n if cldfdir.joinpath('cognates.csv').exists():\n for csid, cognates in groupby(\n reader(cldfdir.joinpath('cognates.csv'), dicts=True),\n lambda i: i['Cognate_set_ID']):\n cs = Cognateset(id=unique_id(contrib, csid), contribution=contrib)\n for cognate in cognates:\n cp = values['Counterpart'].get(cognate['Word_ID'])\n if cp:\n DBSession.add(CognatesetCounterpart(\n cognateset=cs,\n counterpart=cp,\n cognate_detection_method=cognate['Cognate_detection_method'],\n alignment=cognate['Alignment'],\n alignment_method=cognate['Alignment_method'],\n doubt=cognate['Doubt'] == 'True'))\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":3760867208592166000,"string":"3,760,867,208,592,166,000"},"line_mean":{"kind":"number","value":35.1793103448,"string":"35.17931"},"line_max":{"kind":"number","value":89,"string":"89"},"alpha_frac":{"kind":"number","value":0.5596645063,"string":"0.559665"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109476,"cells":{"repo_name":{"kind":"string","value":"Syncano/syncano-cli"},"path":{"kind":"string","value":"syncano_cli/parse_to_syncano/processors/klass.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7049"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport json\n\nimport requests\nimport six\nfrom syncano_cli.base.formatters import Formatter\nfrom syncano_cli.parse_to_syncano.migrations.aggregation import ClassAggregate\nfrom syncano_cli.parse_to_syncano.parse.constants import ParseFieldTypeE\n\n\nclass SyncanoSchema(object):\n\n def __init__(self, class_name, schema, relations):\n self.class_name = class_name\n self.schema = schema\n self.relations = relations\n\n def process_relations(self):\n pass\n\n @property\n def has_relations(self):\n return bool(self.relations)\n\n\nclass ClassProcessor(object):\n map = {\n 'Number': 'integer',\n 'Date': 'datetime',\n 'Boolean': 'boolean',\n 'String': 'string',\n 'Array': 'array',\n 'Object': 'object',\n 'Pointer': 'reference',\n 'File': 'file',\n 'GeoPoint': 'geopoint',\n 'Relation': 'relation',\n }\n\n original_datetime_label = 'original_{}'\n\n @classmethod\n def handle_value(cls, value):\n return value\n\n @classmethod\n def handle_json_value(cls, value):\n return json.dumps(value)\n\n @classmethod\n def get_fields(cls, parse_fields):\n fields_to_skip = ['ACL', 'self'] # TODO: handle ACL later on\n\n fields = []\n\n for field in parse_fields:\n if field in fields_to_skip:\n continue\n\n fields.append(field.lower())\n return fields\n\n @classmethod\n def process_object(cls, parse_object, reference_map):\n syncano_fields = ClassProcessor.get_fields(parse_object.keys())\n processed_object = {}\n files = {}\n for key, value in six.iteritems(parse_object):\n if isinstance(value, dict):\n if '__type' in value:\n if value['__type'] == ParseFieldTypeE.RELATION:\n continue # will be handled in RelationProcessor\n cls._process_field_with_type(key, value, processed_object, files, reference_map)\n else: # and 'Object' case\n processed_object[key.lower()] = json.dumps(value)\n elif isinstance(value, list):\n cls._process_array_field(key, value, processed_object)\n\n else:\n cls._process_other_fields(key, value, processed_object, syncano_fields)\n return processed_object, files\n\n @classmethod\n def _process_field_with_type(cls, key, value, processed_object, files, reference_map):\n if value['__type'] == ParseFieldTypeE.DATE:\n processed_object[key.lower()] = value['iso']\n elif value['__type'] == ParseFieldTypeE.POINTER:\n processed_object[key.lower()] = reference_map.get(value['objectId'])\n elif value['__type'] == ParseFieldTypeE.FILE:\n file_data = requests.get(value['url'])\n file_path = '/tmp/{}'.format(value['name'])\n with open(file_path, 'wb+') as file_d:\n file_d.write(file_data.content)\n file_descriptor = open(file_path, 'rb')\n files[key] = file_descriptor\n elif value['__type'] == ParseFieldTypeE.GEO_POINT:\n processed_object[key.lower()] = {'longitude': value['longitude'], 'latitude': value['latitude']}\n\n @classmethod\n def _process_array_field(cls, key, value, processed_object):\n for i, item in enumerate(value):\n if isinstance(item, dict):\n if item.get('__type') == ParseFieldTypeE.POINTER:\n Formatter().write('Array of pointers not supported, writing: {}'.format(item.get('objectId')))\n value[i] = item['objectId']\n values_list = json.dumps(value)\n processed_object[key.lower()] = values_list\n\n @classmethod\n def _process_other_fields(cls, key, value, processed_object, syncano_fields):\n if key.lower() in syncano_fields:\n if key in ['createdAt', 'updatedAt']:\n processed_object[cls.original_datetime_label.format(key.lower())] = value\n else:\n processed_object[key.lower()] = value\n\n @classmethod\n def create_schema(cls, parse_schema):\n \"\"\"\n Return syncano schema for a Class;\n :param parse_schema: the schema from parse;\n :return: the Class name and the schema used in Syncano;\n \"\"\"\n\n fields_to_skip = ['ACL'] # TODO: handle ACL later on\n class_name = cls.normalize_class_name(parse_schema['className'])\n schema = []\n relations = []\n for field, field_meta in six.iteritems(parse_schema['fields']):\n if field not in fields_to_skip:\n type = field_meta['type']\n new_type = ClassProcessor.map[type]\n\n if type == 'Relation':\n if class_name == cls.normalize_class_name(field_meta['targetClass']):\n target = 'self'\n else:\n target = cls.normalize_class_name(field_meta['targetClass'])\n schema.append({\n 'name': field.lower(),\n 'type': new_type,\n 'target': target\n })\n relations.append({field: field_meta})\n continue\n\n if field == 'objectId':\n schema.append({\n 'name': field.lower(),\n 'type': new_type,\n 'filter_index': True\n })\n continue\n\n if field in ['updatedAt', 'createdAt']:\n schema.append({\n 'name': cls.original_datetime_label.format(field.lower()),\n 'type': new_type,\n 'filter_index': True,\n 'order_index': True,\n })\n continue\n\n if new_type == 'reference':\n schema.append({\n 'name': field.lower(),\n 'type': new_type,\n 'target': cls.normalize_class_name(field_meta['targetClass'])}\n )\n continue\n\n schema.append({'name': field.lower(), 'type': new_type})\n\n return SyncanoSchema(class_name=class_name, schema=schema, relations=relations)\n\n @classmethod\n def normalize_class_name(cls, class_name):\n name = class_name\n if name.startswith('_'):\n name = 'internal_' + name[1:].lower()\n return name\n\n @classmethod\n def show_class_name(cls, klass):\n \"\"\"\n Displays Class name in click progress bar.\n :param klass: the Class name;\n :return: Formatted Class name;\n \"\"\"\n if klass is not None:\n if isinstance(klass, ClassAggregate):\n return u\"Class: {}\".format(klass.syncano_name)\n elif isinstance(klass, tuple):\n return u\"Class: {}\".format(klass[0])\n return u'Done.'\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-1686309550870439400,"string":"-1,686,309,550,870,439,400"},"line_mean":{"kind":"number","value":34.9642857143,"string":"34.964286"},"line_max":{"kind":"number","value":114,"string":"114"},"alpha_frac":{"kind":"number","value":0.5339764506,"string":"0.533976"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109477,"cells":{"repo_name":{"kind":"string","value":"softak/webfaction_demo"},"path":{"kind":"string","value":"apps/stores/models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5850"},"content":{"kind":"string","value":"import math\nimport decimal\nfrom django.contrib.gis.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom django.db.models.query import QuerySet\nfrom utils import QuerySetManager\nfrom stores.fields import ColorField\n\n\nclass Category(models.Model):\n name = models.CharField(_('name'),\n max_length=50)\n icon = models.ImageField(_('icon'),\n upload_to='category-icons',\n null=True)\n marker = models.ImageField(_('marker'),\n upload_to='category-markers',\n null=True)\n\n class Meta:\n verbose_name_plural = \"categories\"\n\n def __unicode__(self):\n return self.name\n\n\nclass Store(models.Model):\n user = models.OneToOneField(User,\n related_name='store')\n name = models.CharField(_('business name'),\n max_length=100)\n category = models.ForeignKey(Category,\n related_name='stores')\n location = models.PointField(_('location'),\n srid=4326)\n address = models.CharField(_('address'),\n max_length=1000)\n is_active = models.BooleanField(_('active'),\n default=False)\n window_image = models.ImageField(_('window image'),\n upload_to='store-images',\n null=True,\n blank=True)\n phone = models.CharField(_('phone'),\n max_length=50)\n paypal_email = models.EmailField(_('PayPal e-mail'),\n null=True,\n blank=True,\n max_length=100)\n\n objects = models.GeoManager()\n\n def __unicode__(self):\n return self.name\n\n @models.permalink\n def get_absolute_url(self):\n return ('stores.view_store', [str(self.id)])\n\n def get_buyer_ids(self):\n from cart.models import SocialTag, PersonalTag\n r1 = SocialTag.objects.filter(buy__store=self) \\\n .values_list('user__id', flat=True).distinct()\n r2 = PersonalTag.objects.filter(item__store=self) \\\n .values_list('user__id', flat=True).distinct()\n return list(r1) + list(r2)\n\n def get_buyers(self):\n return User.objects.filter(id__in=self.get_buyer_ids())\n\n\nclass StoreDesign(models.Model):\n store = models.OneToOneField(Store, \n related_name='design')\n background_image = models.ImageField(_('Image'),\n upload_to='store_desings',\n null=True,\n blank=True)\n is_repeated = models.BooleanField(_('Repeat'), \n default=False)\n background_color = ColorField(_('Color'), \n default='#ffffff')\n\n\nclass ItemQuerySet(QuerySet):\n def in_stock(self):\n return self.exclude(is_out_of_stock=True) \\\n .filter(Q(quantity__gt=0) | Q(quantity__isnull=True))\n\n\nclass Item(models.Model):\n store = models.ForeignKey(Store,\n related_name='items')\n name = models.CharField(_('name'),\n max_length=100)\n description = models.CharField(_('description'),\n max_length=1000)\n price = models.DecimalField(_('price'),\n max_digits=10,\n decimal_places=2)\n discount = models.PositiveSmallIntegerField(_('advertised discount'),\n default=0)\n # TODO make NOT NULL\n quantity = models.PositiveIntegerField(_('quantity'),\n null=True,\n blank=True)\n is_out_of_stock = models.BooleanField(_('is out of stock'), \n default=False)\n discount_group = models.ForeignKey('DiscountGroup',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='items')\n\n objects = QuerySetManager(qs_class=ItemQuerySet)\n\n def save(self, *args, **kwargs):\n if not self.discount_group is None:\n if self.store != self.discount_group.discount.store:\n raise ValidationError('Item can\\'t belong to specified discount group!')\n return super(Item, self).save(*args, **kwargs)\n\n def get_default_image(self):\n default_images = self.images.filter(is_default=True)\n if default_images.exists():\n return default_images[0].image\n elif self.images.exists():\n return self.images.all()[0].image\n else:\n return None\n \n @models.permalink\n def get_absolute_url(self):\n return ('stores.item', [str(self.id)])\n\n def __unicode__(self):\n return self.name\n\n\nclass DiscountGroup(models.Model): # TODO add store field?\n name = models.CharField(_('discount name'),\n max_length=100)\n discount = models.ForeignKey('Discount',\n related_name='discount_groups')\n\n\nclass Discount(models.Model):\n store = models.ForeignKey(Store,\n related_name='discount_models')\n name = models.CharField(_('discount name'),\n max_length=100)\n for_additional_item = models.DecimalField(_('discount for each additional item'),\n max_digits=4,\n decimal_places=2)\n for_additional_buyer = models.DecimalField(_('discount for each additional buyer'),\n max_digits=4,\n decimal_places=2)\n lower_bound = models.DecimalField(_('lower bound'),\n max_digits=4,\n decimal_places=2)\n \n def __unicode__(self):\n return 'Discount: %s' % self.name\n\n def apply(self, items_number=None, buyers_number=None):\n discount = math.pow((100 - self.for_additional_buyer) / decimal.Decimal(100), buyers_number) * \\\n math.pow((100 - self.for_additional_item) / decimal.Decimal(100), items_number)\n return max(decimal.Decimal(discount), (100 - self.lower_bound) / decimal.Decimal(100))\n\n\nclass ItemImage(models.Model):\n item = models.ForeignKey(Item,\n related_name='images')\n image = models.ImageField(_('image'),\n upload_to='item-images')\n is_default = models.BooleanField(_('is default'),\n default=False)\n \n def __unicode__(self):\n return 'Image of %s' % self.item.name\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-5412624703133597000,"string":"-5,412,624,703,133,597,000"},"line_mean":{"kind":"number","value":31.1428571429,"string":"31.142857"},"line_max":{"kind":"number","value":104,"string":"104"},"alpha_frac":{"kind":"number","value":0.6213675214,"string":"0.621368"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109478,"cells":{"repo_name":{"kind":"string","value":"danzek/nlhbi-malware-extractor"},"path":{"kind":"string","value":"getNLindicators.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3176"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n\"\"\"\nDan O'Day, Purdue University\nCNIT581 Cyber Forensics of Malware\n\nExtract natural language host-based indicators from malware sample objects\n\"\"\"\n\n__author__ = \"Dan O'Day\"\n__credits__ = [\"Dan O'Day\", \"Sam Liles\"]\n__license__ = \"GNU General Public License\"\n__version__ = \"0.1\"\n__maintainer__ = \"Dan O'Day\"\n__email__ = \"doday@purdue.edu\"\n__status__ = \"Development\"\n\n\nimport os\nimport sys\nfrom malware import MalwareSample\nfrom nltk.tokenize import WordPunctTokenizer\nfrom nltk.corpus import wordnet\n\n\ndef enumerate_files(folders):\n \"\"\"\n Iterates through supplied folder(s) for PE32 files (.dll or .exe), creates malware sample objects\n\n :param folders: folder(s) containing PE32 files (.dll or .exe)\n :return: list of (parsed) malware sample objects\n \"\"\"\n msl = [] # list of malware sample objects\n for folder in folders:\n for root, dirs, files in os.walk(folder):\n for fn in files:\n if fn.lower().endswith(\".exe\") or fn.lower().endswith(\".dll\"):\n print \"---------------------------------------------------------------------------\"\n print \"analyzing\", os.path.join(root, fn), \"...\"\n ms = MalwareSample(os.path.join(root, fn))\n msl.append(ms) # add processed malware sample to list (msl)\n return msl\n\n\ndef extract_nl_text(ms):\n \"\"\"\n Extracts and tokenizes text from malware sample object\n\n :param ms: MalwareSample object\n :return: list of tokenized strings found in malware sample object's internal strings list\n \"\"\"\n wpt = WordPunctTokenizer()\n all_tokenized_strings_in_ms = []\n inside_xml_privileges = False\n for s in ms.strings:\n if 'requestedPrivileges' in s or 'This program cannot be run in DOS mode' in s:\n continue\n elif inside_xml_privileges:\n continue\n elif '' in s:\n inside_xml_privileges = False\n continue\n\n tokenized_string = []\n tokens = wpt.tokenize(s)\n if tokens:\n for t in tokens:\n if wordnet.synsets(t) and len(t) > 3: # had to use length to eliminate false positives\n tokenized_string.extend(tokens)\n break\n if tokenized_string:\n all_tokenized_strings_in_ms.append(tokenized_string)\n return all_tokenized_strings_in_ms\n\n\ndef process_malware_sample(ms):\n \"\"\"\n Central function for calling other functions used in processing malware sample objects\n\n :param ms: MalwareSample object\n \"\"\"\n tokenized_strings_in_ms = extract_nl_text(ms)\n print 'Strings from', str(ms), tokenized_strings_in_ms\n\n\ndef main():\n args = sys.argv[1:]\n\n if not args:\n print 'No arguments specified.\\nusage: ./malwareNLIndicators.py {folder(s) containing malware sample(s)}\\n'\n sys.exit(1)\n\n malware_samples = enumerate_files(args) # returns list of malware sample objects\n for ms in malware_samples:\n process_malware_sample(ms)\n\n\nif __name__ == '__main__':\n main()"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-2411974937378561500,"string":"-2,411,974,937,378,561,500"},"line_mean":{"kind":"number","value":30.1470588235,"string":"30.147059"},"line_max":{"kind":"number","value":115,"string":"115"},"alpha_frac":{"kind":"number","value":0.6095717884,"string":"0.609572"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109479,"cells":{"repo_name":{"kind":"string","value":"tensorflow/agents"},"path":{"kind":"string","value":"tf_agents/train/interval_trigger.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2315"},"content":{"kind":"string","value":"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Utility that Triggers every n calls.\"\"\"\n\nfrom typing import Callable\n\nfrom absl import logging\n\n\nclass IntervalTrigger(object):\n \"\"\"Triggers on every fixed interval.\n\n Note that as long as the >= `interval` number of steps have passed since the\n last trigger, the event gets triggered. The current value is not necessarily\n `interval` steps away from the last triggered value.\n \"\"\"\n\n def __init__(self, interval: int, fn: Callable[[], None], start: int = 0):\n \"\"\"Constructs the IntervalTrigger.\n\n Args:\n interval: The triggering interval.\n fn: callable with no arguments that gets triggered.\n start: An initial value for the trigger.\n \"\"\"\n self._interval = interval\n self._original_start_value = start\n self._last_trigger_value = start\n self._fn = fn\n\n if self._interval <= 0:\n logging.info(\n 'IntervalTrigger will not be triggered because interval is set to %d',\n self._interval)\n\n def __call__(self, value: int, force_trigger: bool = False) -> None:\n \"\"\"Maybe trigger the event based on the interval.\n\n Args:\n value: the value for triggering.\n force_trigger: If True, the trigger will be forced triggered unless the\n last trigger value is equal to `value`.\n \"\"\"\n if self._interval <= 0:\n return\n\n if (force_trigger and value != self._last_trigger_value) or (\n value >= self._last_trigger_value + self._interval):\n self._last_trigger_value = value\n self._fn()\n\n def reset(self) -> None:\n \"\"\"Resets the trigger interval.\"\"\"\n self._last_trigger_value = self._original_start_value\n\n def set_start(self, start: int) -> None:\n self._last_trigger_value = start\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":1041133332050366700,"string":"1,041,133,332,050,366,700"},"line_mean":{"kind":"number","value":31.6056338028,"string":"31.605634"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.6842332613,"string":"0.684233"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109480,"cells":{"repo_name":{"kind":"string","value":"kiip/statsite"},"path":{"kind":"string","value":"tests/helpers.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1340"},"content":{"kind":"string","value":"\"\"\"\nContains helper classes and methods for tests.\n\"\"\"\n\nfrom statsite.aggregator import Aggregator\nfrom statsite.collector import Collector\nfrom statsite.metrics_store import MetricsStore\n\nclass DumbAggregator(Aggregator):\n def __init__(self, *args, **kwargs):\n super(DumbAggregator, self).__init__(*args, **kwargs)\n\n self.flushed = False\n self.metrics = []\n\n def add_metrics(self, metrics):\n self.metrics.extend(metrics)\n\n def flush(self):\n self.flushed = True\n\nclass DumbCollector(Collector):\n # Note that the host/port arguments are to avoid exceptions when\n # setting the settings in the \"servers\" funcarg\n def __init__(self, host=None, port=None, aggregator=None):\n super(DumbCollector, self).__init__(aggregator)\n\n pass\n\nclass DumbMetricsStore(MetricsStore):\n # Note that the host/port arguments are to avoid exceptions when\n # setting the settings in the \"servers\" funcarg\n def __init__(self, host=None, port=None, prefix=None):\n self.data = []\n\n def flush(self, data):\n self.data.extend(data)\n\ndef statsite_settings(settings):\n \"\"\"\n Decorator to set the settings for Statsite for the \"servers\"\n funcarg.\n \"\"\"\n def decorator(func):\n func.func_dict[\"statsite_settings\"] = settings\n return func\n\n return decorator\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":3805028084313234400,"string":"3,805,028,084,313,234,400"},"line_mean":{"kind":"number","value":26.9166666667,"string":"26.916667"},"line_max":{"kind":"number","value":68,"string":"68"},"alpha_frac":{"kind":"number","value":0.6731343284,"string":"0.673134"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109481,"cells":{"repo_name":{"kind":"string","value":"hainm/ambertools-binary-test"},"path":{"kind":"string","value":"devtools/ci/download_circleci_AmberTools.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4060"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Note: This program is for internal use (developers) and\n# should be never included in release tar file.\n# (It has our circleci private token)\n\n# require: requests (python), wget\n\n# How?\n# - get info without downloading\n# python download_circleci_AmberTools.py --info\n# - download\n# python download_circleci_AmberTools.py\n\n# What does this script do? Will download latest succesful AmberTools build on circleci\n# https://circleci.com/gh/Amber-MD/ambertools-ci/481\n# (Note: If you are in Amber-MD organization on github, you can change settings)\n# Why does this matter? Collaborators (e.g: phenix) can download, untar and just use the binary distribution (dev)\n\nimport os\nimport argparse\nimport requests\nimport json\nimport subprocess\n\n\ndef get_circle_info(url):\n # info: List[Dict]\n print('url', url)\n x = requests.get(url)\n json.loads(x.content.decode())\n info = json.loads(x.content.decode())\n return info\n\n\ndef download_non_conda_install_tarfiles(url_artifact_info,\n dry_run=False, exclude_conda=False):\n # require: wget\n info = get_circle_info(url_artifact_info)\n\n for path_dict in info:\n url = path_dict['url']\n if exclude_conda:\n if 'non-conda-install' in path_dict['pretty_path']:\n print('Downloading ', url)\n if not dry_run:\n subprocess.check_output(['wget', url])\n else:\n print('Downloading ', url)\n if not dry_run:\n subprocess.check_output(['wget', url])\n\ndef get_latest_build_info(url_info):\n info_collection = get_circle_info(url_info)\n\n info = {}\n for info in info_collection:\n if info['status'] == 'success':\n break\n\n keys = [\n 'username',\n 'branch',\n 'author_name',\n 'committer_date',\n 'has_artifacts',\n 'build_url',\n 'vcs_url',\n 'status',\n 'build_num',\n 'all_commit_details',\n ]\n\n for k in keys:\n if k == 'all_commit_details':\n details = info.get(k)[0]\n print(details['body'])\n else:\n print(k, info.get(k))\n print(\"\")\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Download/get-info binary builds from circleci',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '-r',\n '--repo',\n default='ambertools-ci',\n help=\"(default: %(default)s)\")\n parser.add_argument(\n '-b', '--branch', default='nightly', help=\"(default: %(default)s)\")\n parser.add_argument(\n '--build-num', default='latest', help=\"(default: %(default)s)\")\n parser.add_argument(\n '--info',\n action='store_true',\n help='Only show information without downloading')\n parser.add_argument(\n '--exclude-conda',\n action='store_true',\n help='Exclude conda build')\n parser.add_argument('-d', '--dry-run', action='store_true', help='dry run')\n args = parser.parse_args()\n\n repo = args.repo\n branch = args.branch\n build_num = args.build_num\n\n url_info = \"https://circleci.com/api/v1.1/project/github/Amber-MD/{repo}/tree/{branch}\".format(\n repo=repo, branch=branch)\n # token was generated by visiting: https://circleci.com/account/api\n # (private env)\n token = os.getenv('AMBERTOOLS_TOKEN')\n base_dir = 'https://circleci.com/api/v1.1/project/github/Amber-MD/'\n my_branch = '{repo}/{build_num}/artifacts?circle-token={token}&branch={branch}&filter=successful'.format(\n token=token, repo=repo, branch=branch, build_num=build_num)\n url_artifact_info = base_dir + my_branch\n\n get_latest_build_info(url_info)\n if args.dry_run:\n print('Dry run')\n if not args.info:\n download_non_conda_install_tarfiles(url_artifact_info,\n dry_run=args.dry_run,\n exclude_conda=args.exclude_conda)\n else:\n print('skip downloading since --info is given')\n\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-244840541640465920,"string":"-244,840,541,640,465,920"},"line_mean":{"kind":"number","value":29.7575757576,"string":"29.757576"},"line_max":{"kind":"number","value":114,"string":"114"},"alpha_frac":{"kind":"number","value":0.6083743842,"string":"0.608374"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109482,"cells":{"repo_name":{"kind":"string","value":"Ecpy/ecpy_hqc_legacy"},"path":{"kind":"string","value":"exopy_hqc_legacy/tasks/tasks/instr/apply_mag_field_task.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3238"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.\n#\n# Distributed under the terms of the BSD license.\n#\n# The full license is in the file LICENCE, distributed with this software.\n# -----------------------------------------------------------------------------\n\"\"\"Task to apply a magnetic field.\n\n\"\"\"\nfrom time import sleep\nimport numbers\n\nfrom atom.api import (Unicode, Float, Bool, set_default)\n\nfrom exopy.tasks.api import InstrumentTask, validators\n\n\nclass ApplyMagFieldTask(InstrumentTask):\n \"\"\"Use a supraconducting magnet to apply a magnetic field. Parallel task.\n\n \"\"\"\n # Target magnetic field (dynamically evaluated)\n field = Unicode().tag(pref=True,\n feval=validators.SkipLoop(types=numbers.Real))\n\n # Rate at which to sweep the field.\n rate = Float(0.01).tag(pref=True)\n\n # Whether to stop the switch heater after setting the field.\n auto_stop_heater = Bool(True).tag(pref=True)\n\n # Time to wait before bringing the field to zero after closing the switch\n # heater.\n post_switch_wait = Float(30.0).tag(pref=True)\n\n parallel = set_default({'activated': True, 'pool': 'instr'})\n database_entries = set_default({'field': 0.01})\n\n def check_for_interruption(self):\n \"\"\"Check if the user required an interruption.\n\n \"\"\"\n return self.root.should_stop.is_set()\n\n def perform(self, target_value=None):\n \"\"\"Apply the specified magnetic field.\n\n \"\"\"\n # make ready\n if (self.driver.owner != self.name or\n not self.driver.check_connection()):\n self.driver.owner = self.name\n\n if target_value is None:\n target_value = self.format_and_eval_string(self.field)\n\n driver = self.driver\n normal_end = True\n if (abs(driver.read_persistent_field() - target_value) >\n driver.output_fluctuations):\n job = driver.sweep_to_persistent_field()\n if job.wait_for_completion(self.check_for_interruption,\n timeout=60, refresh_time=1):\n driver.heater_state = 'On'\n else:\n return False\n\n # set the magnetic field\n job = driver.sweep_to_field(target_value, self.rate)\n normal_end = job.wait_for_completion(self.check_for_interruption,\n timeout=60,\n refresh_time=10)\n\n # Always close the switch heater when the ramp was interrupted.\n if not normal_end:\n job.cancel()\n driver.heater_state = 'Off'\n self.write_in_database('field', driver.read_persistent_field())\n return False\n\n # turn off heater\n if self.auto_stop_heater:\n driver.heater_state = 'Off'\n sleep(self.post_switch_wait)\n job = driver.sweep_to_field(0)\n job.wait_for_completion(self.check_for_interruption,\n timeout=60, refresh_time=1)\n\n self.write_in_database('field', target_value)\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-6838290255176113000,"string":"-6,838,290,255,176,113,000"},"line_mean":{"kind":"number","value":34.5824175824,"string":"34.582418"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5642371834,"string":"0.564237"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109483,"cells":{"repo_name":{"kind":"string","value":"subuk/xtnews"},"path":{"kind":"string","value":"xtnews/views.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2322"},"content":{"kind":"string","value":"\nimport transaction\nfrom pyramid.view import view_config\nfrom pyramid.exceptions import NotFound\nfrom pyramid.httpexceptions import HTTPNoContent, HTTPCreated\nfrom forms import NewsForm\nfrom xtnews import models\n\nclass BaseView(object):\n\n def __init__(self, request):\n self.request = request\n self.request.response.content_type = 'application/xml'\n self.db = models.DBSession()\n\n\nclass Collection(BaseView):\n\n def get_object_list(self):\n return self.db.query(models.News).all()\n\n @view_config(route_name='list', renderer='object_list.xml.jinja2', request_method='GET')\n def get(self):\n return {\n 'object_list': (x.as_dict() for x in self.get_object_list())\n }\n\n @view_config(route_name='list', renderer='object_form_errors.xml.jinja2', request_method='POST')\n def post(self):\n form = NewsForm(**self.request.POST)\n if not form.validate():\n self.request.response.status = 400\n return {'form': form}\n obj = models.News()\n form.populate_obj(obj)\n self.db.add(obj)\n transaction.commit()\n return HTTPCreated()\n\n\nclass Item(BaseView):\n\n def get_object(self, object_id):\n obj = self.db.query(models.News).get(object_id)\n if not obj:\n raise NotFound(\"Object with id %s not found\" % object_id)\n return obj\n\n @view_config(route_name='item', renderer='object.xml.jinja2', request_method='GET')\n def get(self):\n obj = self.get_object(self.request.matchdict['id'])\n return {\n 'object': obj.as_dict(),\n }\n\n @view_config(route_name='item', renderer='object.xml.jinja2', request_method='DELETE')\n def delete(self):\n obj = self.get_object(self.request.matchdict['id'])\n self.db.delete(obj)\n transaction.commit()\n return HTTPNoContent()\n\n @view_config(route_name='item', renderer='object_form_errors.xml.jinja2', request_method='PUT')\n def put(self):\n form = NewsForm(**self.request.POST)\n if not form.validate():\n self.request.response.status = 400\n return {'form': form}\n obj = self.get_object(self.request.matchdict['id'])\n form.populate_obj(obj)\n self.db.add(obj)\n transaction.commit()\n return HTTPNoContent()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-6245548743591391000,"string":"-6,245,548,743,591,391,000"},"line_mean":{"kind":"number","value":30.8082191781,"string":"30.808219"},"line_max":{"kind":"number","value":100,"string":"100"},"alpha_frac":{"kind":"number","value":0.624461671,"string":"0.624462"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109484,"cells":{"repo_name":{"kind":"string","value":"akshayparopkari/phylotoast"},"path":{"kind":"string","value":"bin/filter_ambiguity.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"3638"},"content":{"kind":"string","value":"'''\nCreated on Dec 6, 2012\n\nAuthor: Shareef M. Dabdoub\n'''\nimport sys\ntry:\n from Bio import SeqIO\nexcept ImportError as ie:\n sys.exit('Import Error. Please install missing module: {}'.format(ie))\nimport argparse\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\n\n\ndef filter_ambiguity(records, percent=0.5): # , repeats=6)\n \"\"\"\n Filters out sequences with too much ambiguity as defined by the method\n parameters.\n\n :type records: list\n :param records: A list of sequences\n :type repeats: int\n :param repeats: Defines the number of repeated N that trigger truncating a\n sequence.\n :type percent: float\n :param percent: Defines the overall percentage of N in a sequence that\n will cause the sequence to be filtered out.\n \"\"\"\n seqs = []\n # Ns = ''.join(['N' for _ in range(repeats)])\n count = 0\n for record in records:\n if record.seq.count('N')/float(len(record)) < percent:\n# pos = record.seq.find(Ns)\n# if pos >= 0:\n# record.seq = Seq(str(record.seq)[:pos])\n seqs.append(record)\n count += 1\n\n return seqs, count\n\n\ndef handle_program_options():\n \"\"\"\n Uses the built-in argparse module to handle command-line options for the\n program.\n\n :return: The gathered command-line options specified by the user\n :rtype: argparse.ArgumentParser\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Filter an input \\\n FASTA-formatted file to \\\n remove or truncate sequences\\\n based on ambiguous base (N) \\\n content.\")\n parser.add_argument('--version', action='version',\n version='Sequence Ambiguity Filter v0.1')\n parser.add_argument('fasta_input', help=\"QIIME-formatted mapping file \\\n linking Sample IDs with barcodes \\\n and primers.\")\n parser.add_argument('-o', '--output', default='output.fna',\n help='The name of the file to output the set of \\\n filtered sequences. Default: \\'output.fna\\'.')\n# parser.add_argument('-r', '--repeats', type=int, default=6,\n# help='Truncates a sequence when a string of ambiguous \\\n# bases (N) of REPEATS or longer is found. \\\n# Default: REPEATS=6.')\n parser.add_argument('-p', '--percent', type=int, default=5,\n help='Removes any sequence containing the specified \\\n percentage (or greater) of ambiguous bases (N).\\\n Default: PERCENT=5')\n parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()\n\n\ndef main():\n args = handle_program_options()\n\n try:\n with open(args.fasta_input):\n pass\n except IOError as ioe:\n sys.exit('\\nError with QIIME formatted mapping file:{}\\n'.format(ioe))\n\n with open(args.fasta_input, 'rU') as inF:\n in_records = SeqIO.parse(inF, 'fasta')\n records, count = filter_ambiguity(in_records, args.percent/100.0) # , args.repeats)\n\n SeqIO.write(records, args.output, \"fasta\")\n\n if args.verbose:\n print '%i sequences found.' % count\n print '%i sequences kept.' % len(records)\n print\n print 'Output written to: %s' % args.output\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-8652287533092176000,"string":"-8,652,287,533,092,176,000"},"line_mean":{"kind":"number","value":35.38,"string":"35.38"},"line_max":{"kind":"number","value":92,"string":"92"},"alpha_frac":{"kind":"number","value":0.5511269929,"string":"0.551127"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109485,"cells":{"repo_name":{"kind":"string","value":"8devices/IoTPy"},"path":{"kind":"string","value":"IoTPy/transport.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1376"},"content":{"kind":"string","value":"import socket\nfrom IoTPy.detect_sfp_serial import detect_sfp_serial\nfrom IoTPy.errors import IoTPy_IOError\n\n\nclass SocketTransport(object):\n def __init__(self, host='127.0.0.1', port=7777):\n self.host = host\n self.port = port\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(self.host, self.port)\n self.socket.connect((self.host, self.port))\n\n def read(self):\n return self.socket.recv(1024)\n\n def write(self, data):\n self.socket.send(data)\n\n def close(self):\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()\n\n\nclass SerialTransport(object):\n def __init__(self, serial_port=None, uid=None):\n self.serial_port = serial_port\n if not self.serial_port:\n self.serial_port = detect_sfp_serial(uid)\n\n def read(self):\n try:\n data = self.serial_port.read(1) # read one, blocking\n n = self.serial_port.inWaiting() # look if there is more\n if n:\n data = data + self.serial_port.read(n) # and get as much as possible\n return data\n except IOError:\n pass\n except TypeError:\n pass\n\n return None\n\n def write(self, data):\n self.serial_port.write(data)\n\n def close(self):\n self.serial_port.close()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":7456966032519541000,"string":"7,456,966,032,519,541,000"},"line_mean":{"kind":"number","value":27.0816326531,"string":"27.081633"},"line_max":{"kind":"number","value":86,"string":"86"},"alpha_frac":{"kind":"number","value":0.5850290698,"string":"0.585029"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109486,"cells":{"repo_name":{"kind":"string","value":"faddai/newfies-dialer"},"path":{"kind":"string","value":"newfies/survey/urls.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1222"},"content":{"kind":"string","value":"#\n# Newfies-Dialer License\n# http://www.newfies-dialer.org\n#\n# This Source Code Form is subject to the terms of the Mozilla Public \n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Copyright (C) 2011-2012 Star2Billing S.L.\n# \n# The Initial Developer of the Original Code is\n# Arezqui Belaid \n#\n\nfrom django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom survey.views import *\n\n\nurlpatterns = patterns('',\n # Survey urls\n (r'^survey/$', 'survey.views.survey_list'),\n (r'^survey_grid/$', 'survey.views.survey_grid'),\n (r'^survey/add/$', 'survey.views.survey_add'),\n (r'^survey/del/(.+)/$', 'survey.views.survey_del'),\n (r'^survey/(.+)/$', 'survey.views.survey_change'),\n (r'^survey_finestatemachine/$', 'survey.views.survey_finestatemachine'),\n\n (r'^survey_report/$', 'survey.views.survey_report'),\n\n\n # Audio urls\n (r'^audio/$', 'survey.views.audio_list'),\n (r'^audio_grid/$', 'survey.views.audio_grid'),\n (r'^audio/add/$', 'survey.views.audio_add'),\n (r'^audio/del/(.+)/$', 'survey.views.audio_del'),\n (r'^audio/(.+)/$', 'survey.views.audio_change'),\n)\n\n"},"license":{"kind":"string","value":"mpl-2.0"},"hash":{"kind":"number","value":3954223356471408000,"string":"3,954,223,356,471,408,000"},"line_mean":{"kind":"number","value":30.3333333333,"string":"30.333333"},"line_max":{"kind":"number","value":76,"string":"76"},"alpha_frac":{"kind":"number","value":0.647299509,"string":"0.6473"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109487,"cells":{"repo_name":{"kind":"string","value":"analphagamma/SorosTracker9000"},"path":{"kind":"string","value":"SCrawler.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4913"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport re\nimport json\nimport requests\nfrom datetime import date\nfrom bs4 import BeautifulSoup\n\nclass SorosCrawler(object):\n '''This object is a crawler that\n -opens the website\n -gets all links from it\n -picks out the links that have \"soros\" in them\n\n To instantiate the object provide a name (i.e. the website name) and an URL\n Example: obj = SorosCrawler('Best News Site', 'http://www.bestnewssite.com')'''\n\n\n def __init__(self, source_name, url):\n self.source_name = source_name\n self.url = url\n\n def get_links(self):\n '''opens the website and gets all links from the html source\n Returns a set of the links to prevent duplication'''\n\n html = requests.get(self.url).content\n soup = BeautifulSoup(html, 'html.parser')\n\n all_links = []\n for link in soup.find_all('a'):\n all_links.append(link.get('href'))\n\n return set(all_links) #all the hyperlinks from the website + duplicates removed\n\n def keyword_filter(self, keyword, href_link):\n '''uses regex to find the keyword in the link\n then transform the link to a valid hyperlink\n returns an url in string format'''\n\n if re.search(keyword, href_link.lower()) and not re.search('cth', href_link.lower()):\n if href_link[0] == '/':\n #no double slashes\n href_link = href_link.strip('/')\n if href_link[:3] == 'www':\n href_link = 'http://' + href_link\n if not re.search(self.url, href_link):\n #sometimes the href doesn't have the full, absolute path\n href_link = self.url + href_link\n \n return href_link\n else:\n return None\n\n def parse_links(self):\n '''Picks all links that have 'soros' in them\n If the href is incomplete it adds 'http:' to the front'''\n\n links = []\n for link in self.get_links():\n try:\n re.search('soros', link)\n except TypeError:\n pass\n else:\n #search for the word soros in link\n link = self.keyword_filter('soros', link)\n if link != None:\n print(link)\n try:\n html = requests.get(link).content\n except:\n print('Requests encountered an error with the link:\\n', link)\n else:\n soup = BeautifulSoup(html, 'html.parser')\n links.append((soup.title.string.strip('\\n').strip(), link))\n\n return links #list of tuples (article title, article link)\n\ndef simple_log(source_website, links):\n '''updates the JSON log with today's articles'''\n\n with open('tweet_log.json', 'r+') as f: tweet_log = json.load(f)\n try:\n #if there's no entry for today it creates key\n tweet_log[str(date.today())]\n except KeyError:\n tweet_log[str(date.today())] = {}\n\n tweet_log[str(date.today())][source_website] = links\n\n with open('tweet_log.json', 'w+') as f: json.dump(tweet_log, f)\n print('Links for {} logged for {}'.format(source_website, date.today()))\n f.close()\n\n\ndef crawl_websites(websites):\n '''collects all the articles from all sources\n then picks out the sources that has the most articles\n\n [In] -> takes a dictionary as an argument {source name: link to source's main page}\n [Out] -> dict {source: list of links}'''\n\n todays_articles = {}\n\n for name, url in websites.items():\n '''Iterating through all the news sources\n Scraping links\n Logging links to date'''\n\n obj = SorosCrawler(name, url)\n\n print('\\nGetting news from {}'.format(name))\n todays_links = obj.parse_links()\n print('Number of articles found: ', len(todays_links))\n\n #logging links\n with open('links.txt', 'r+') as f: linkdb = f.read().split('\\n')\n rejected_links = []\n for link in todays_links:\n print(link)\n #removing the ones that are already in the db\n if link[1] in linkdb:\n rejected_links.append(link)\n print('Link already in list')\n else:\n print('New article found.')\n \n for link in rejected_links:\n todays_links.remove(link)\n\n simple_log(name, todays_links)\n todays_articles[name] = todays_links\n\n return todays_articles\n\nif __name__ == '__main__':\n NEWS_SOURCES = {'Magyar Hírlap': 'http://magyarhirlap.hu/',\n 'Hirado.hu': 'http://www.hirado.hu/',\n 'Magyar Idők': 'http://magyaridok.hu/',\n 'Origo.hu': 'http://www.origo.hu/',\n '888.hu': 'http://888.hu/'}\n print(crawl_websites(NEWS_SOURCES))\n"},"license":{"kind":"string","value":"bsd-2-clause"},"hash":{"kind":"number","value":-5828904765428295000,"string":"-5,828,904,765,428,295,000"},"line_mean":{"kind":"number","value":33.3426573427,"string":"33.342657"},"line_max":{"kind":"number","value":93,"string":"93"},"alpha_frac":{"kind":"number","value":0.5563021788,"string":"0.556302"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109488,"cells":{"repo_name":{"kind":"string","value":"lungsi/cerebellum-unit"},"path":{"kind":"string","value":"cerebunit/validation_tests/cells/PurkinjeCell/test_for_quasilinear_behavior.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"15037"},"content":{"kind":"string","value":"# ============================================================================\n# test_for_quasilinear_behavior.py\n#\n# created 06 September 2017 Lungsi\n# modified 09 October 2017 Lungsi\n#\n# ============================================================================\n#\nimport sciunit\nimport quantities as pq\nfrom elephant.statistics import mean_firing_rate as mfr\n#\nfrom cerebunit.capabilities.cells.response import ProducesSpikeTrain\nfrom cerebunit.score_manager import BinaryScore, OverallBinaryScore\n#\n#\nclass QuasiLinearTest(sciunit.Test, BinaryScore, OverallBinaryScore):\n '''\n The QuasiLinear Test is a test where the model is injected with currents.\n First the model is injected with increasing currents in steps.\n This is followed by decreasing currents (same amplitudes).\n For each respective amplitude current injection the mean spiking frequencies\n are compared. The Binary score is 1 if the frequencies are different.\n The OverallBinary score is 1 if this is the case for all the amplitudes.\n '''\n required_capabilities = (ProducesSpikeTrain,)\n score_type = OverallBinaryScore\n#\n#\n def generate_prediction(self, model, verbose=False):\n '''\n Generates spike train from \"vm_soma\", cell region.\n The function is automatically called by sciunit.Test whic this test\n is a child of.\n Therefore as part of sciunit generate_prediction is mandatory.\n '''\n # ============Ramp Up and then Down Step Currents (nA)==============\n self.ramp_up_down_currents = \\\n { #\"current1\": {\"amp\": 0.6, \"dur\": 100.0, \"delay\": 100.0},\n #\"current2\": {\"amp\": 0.8, \"dur\": 100.0, \"delay\": 200.0},\n #\"current3\": {\"amp\": 1.0, \"dur\": 100.0, \"delay\": 300.0},\n #\"current4\": {\"amp\": 0.8, \"dur\": 100.0, \"delay\": 400.0},\n #\"current5\": {\"amp\": 0.6, \"dur\": 100.0, \"delay\": 500.0},\n \"current1\": {\"amp\": 0.4, \"dur\": 250.0, \"delay\": 250.0},\n \"current2\": {\"amp\": 0.8, \"dur\": 250.0, \"delay\": 500.0},\n \"current3\": {\"amp\": 1.2, \"dur\": 250.0, \"delay\": 750.0},\n \"current4\": {\"amp\": 1.6, \"dur\": 250.0, \"delay\": 1000.0},\n \"current5\": {\"amp\": 1.2, \"dur\": 250.0, \"delay\": 1250.0},\n \"current6\": {\"amp\": 0.8, \"dur\": 250.0, \"delay\": 1500.0},\n \"current7\": {\"amp\": 0.4, \"dur\": 250.0, \"delay\": 1750.0}\n }\n stimulus = \\\n model.set_stimulation_properties( self.ramp_up_down_currents )\n # below line is necessary for the simulation to run \"correctly\"\n [ stimulus[i].loc(0.5, sec=model.cell.soma) \\\n for i in range(len(stimulus)) ]\n # =============================================================\n self.setup_parameters = { \"dt\": 0.025, \"celsius\": 37,\n \"tstop\": 2250, \"v_init\": -65 }\n #\"tstop\": 600, \"v_init\": -65 }\n model.set_simulation_properties(self.setup_parameters)\n # =============================================================\n model.produce_spike_train()\n return model\n#\n#\n def get_spike_train_for_each_current(self, model):\n '''\n The model.produce_spike_train() results in spike train for all\n the current inject in one place\n model.predictions[\"spike_train\"][\"vm_soma\"]\n This function slices the spike train for ramp-up current phase\n and ramp-down current phases.\n And for each ramp spike trains for each respective current\n amplitude is stored in a dictionary such that\n ramp_currents = {\"currentid\": sliced_spike_train, ... }\n =======================Use Case================================\n ramp_up_train, ramp_down_train = \\\n self.get_spike_train_for_each_current(model)\n ===============================================================\n This function is called by process_prediction\n '''\n last_I_id = len(self.ramp_up_down_currents)\n # get all the spike train for desired cell region\n cell_region = \"vm_soma\"\n response_type = \"spike_train\"\n all_spike_train = model.predictions[response_type][cell_region]\n #\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n # +++++++Spike trains for current0 => no current injection++++++\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n #\n # ====================Setup For Ramp-Up=========================\n # set the time boundaries\n spike_start = 0.0\n spike_stop = self.ramp_up_down_currents[\"current1\"][\"delay\"]\n # get the spike train for the time boundaries\n ramp_up_spike_train_for = \\\n { \"current0\":\n all_spike_train.time_slice(spike_start, spike_stop) }\n #\n # ====================Setup For Ramp-Down======================\n # set the time boundaries\n spike_start = \\\n self.ramp_up_down_currents[\"current\"+str(last_I_id)][\"delay\"] \\\n + self.ramp_up_down_currents[\"current\"+str(last_I_id)][\"dur\"]\n spike_stop = self.setup_parameters[\"tstop\"]\n # get the spike train for the time boundaries\n ramp_down_spike_train_for = \\\n { \"current0\":\n all_spike_train.time_slice(spike_start, spike_stop) }\n #\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n # ++Spike trains for currenti for each ith current injections++\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n #\n # ==========Setup indices for Ramp-Up and Ramp-Down============\n # total number of injections\n no_of_Iclamps = len(self.ramp_up_down_currents)\n ramp_up_start_idx = 1 # first current is current1\n ramp_down_stop_idx = no_of_Iclamps # last current is currentN\n if no_of_Iclamps % 2 == 0:\n # there is no largest current in the middle of ramp-up/down\n # that is, all the currents are repeated in ramp-down\n # so the middle current is the last current in ramp-up\n ramp_up_stop_idx = no_of_Iclamps / 2\n # so ramp-down current starts from ramp-up last current + 1\n ramp_down_start_idx = ramp_up_stop_idx + 1\n else:\n # the largest current is the middle of ramp-up & ramp-down\n # so the last ramp-up current is the one before the largest\n ramp_up_stop_idx = (no_of_Iclamps - 1) / 2\n # so ramp-down current starts from ramp-up last current + 1\n ramp_down_start_idx = ramp_up_stop_idx + 2\n #\n # create list of current indices from current1 for both ramps\n ramp_down_indices = \\\n [ k for k in range(ramp_down_stop_idx+1)\n if k >= ramp_down_start_idx\n and k <= ramp_down_stop_idx ]\n #\n # Note: ramp_down_stop_idx is not the last currentID in ramp-down\n # The last currentID in ramp-down = first currentID in ramp-up\n ramp_down_indices.reverse()\n # This is done as follows:\n # ============Loop through each current injection==============\n no_of_I_per_ramp = len(ramp_down_indices) # ramp-up = ramp-down\n for i in range(no_of_Iclamps):\n # currentID in self.ramp_up_down_currents start from current1\n idx = i+1\n # get current stimulation parameters for currenti\n inj_times = self.ramp_up_down_currents[\"current\"+str(idx)]\n # lower bound of the time boundary\n spike_start = inj_times[\"delay\"]\n # upper bound of the time boundary\n spike_stop = spike_start + inj_times[\"dur\"]\n # if the current stimulation is during ramp-up phase\n # i.e idx in ramp_up_indices\n if idx <= ramp_up_stop_idx:\n # slice the spike train from total spike train into a\n # dictionary with respective currenti tag\n spike_train = \\\n { \"current\"+str(idx):\n all_spike_train.time_slice(spike_start, spike_stop) }\n # add the dictionary into the dictionary for ramp-up trains\n ramp_up_spike_train_for.update(spike_train)\n # on the other hand if the stimulation is during ramp-down\n # do the above and add the dictionary inot ramp-down trains\n elif idx in ramp_down_indices:\n dwn_idx = ramp_down_indices.index(idx)+1\n spike_train = \\\n { \"current\"+str(dwn_idx): # 0 is reserved for no injection\n all_spike_train.time_slice(spike_start, spike_stop) }\n ramp_down_spike_train_for.update(spike_train)\n # ============================================================\n # return the dictionaries for both ramp-up and ramp-down phases\n return ramp_up_spike_train_for, ramp_down_spike_train_for\n#\n#\n def get_prediction_for_each_current(self, ramp_spike_train):\n '''\n For a given ramp (up or down) dictionary of spike trains tagged\n with respective current id their mean frequencies are calculated\n and its magnitude is stored in a dictionary of the form\n {currentid: {mean_freq: magnitude}}\n for all the currents in A ramp.\n ========================Use Case===============================\n ramp_up_mean_spike_freq = \\\n self.get_prediction_for_each_current(ramp_up_spike_train)\n ===============================================================\n This function is called by process_prediction\n '''\n ramp_mean_spike_freq = {}\n for current_id, spike_array in ramp_spike_train.iteritems():\n x = mfr(spike_array)\n y = {current_id: {\"mean_freq\": x.rescale(pq.Hz).item()} } # just the magnitude\n ramp_mean_spike_freq.update(y)\n return ramp_mean_spike_freq\n#\n#\n def process_prediction(self, model):\n '''\n Once the model has run, this function can be used to process the\n spike_train prediction to get the prediction of interest,\n mean firing rate.\n =======================Use Case===============================\n ramp_up_freq, ramp_down_freq = process_prediction(model)\n ==============================================================\n This function is called by compute_score\n '''\n # First,\n # get spike trains for respective currents during both\n # ramp Up and ramp Down stages\n ramp_up_spike_train, ramp_down_spike_train = \\\n self.get_spike_train_for_each_current(model)\n #\n # Now for each ramps get the spike frequencies\n # For Ramp-Up stage\n # compute and store mean firing rate for each spike train\n ramp_up_mean_spike_freq = \\\n self.get_prediction_for_each_current(ramp_up_spike_train)\n # For Ramp-Down stage\n # compute and store mean firing rate for each spike train\n ramp_down_mean_spike_freq = \\\n self.get_prediction_for_each_current(ramp_down_spike_train)\n # For both Ramp-Up and Ramp-Down\n # Return the mean firing rates (respective currents)\n return ramp_up_mean_spike_freq, ramp_down_mean_spike_freq\n#\n#\n def compute_score(self, observation, model, verbose=False):\n '''\n This function is like generate_prediction. It is therefore\n called automatically by sciunit which this test is a child of.\n This function with the same name compute_score is also therefore\n mandatory.\n '''\n # Since the model has already run, call process_prediction\n # to get the spike freqs for ramp up and ramp down phases\n ramp_up_mean_spike_freq_for, ramp_down_mean_spike_freq_for = \\\n self.process_prediction(model)\n score_breakdown = {} # store here the score breakdowns\n list_of_scores = [] # store here the list of scores\n # =======Loop through each current id in ramp up phase======\n # Note: this includes current0, no injection\n for current_id in ramp_up_mean_spike_freq_for.keys():\n # take corresponding freq at ramp up as observation\n raw_observation = ramp_up_mean_spike_freq_for[current_id]\n observation = \\\n { \"inequality\":\n \"!= \" + str(raw_observation[\"mean_freq\"]) }\n # if this current id is also in ramp down phase\n if current_id in ramp_down_mean_spike_freq_for.keys():\n # take corresponding freq at ramp down as prediction\n a_prediction = ramp_down_mean_spike_freq_for[current_id]\n # get their Binary score\n x = BinaryScore.compute( observation, a_prediction )\n y = BinaryScore(x)\n # Create details to be added in score_breakdown dict\n step_up_freq = \\\n \"stepUp = \"+str(raw_observation[\"mean_freq\"])+\" Hz\"\n step_down_freq = \\\n \"stepDown = \"+str(a_prediction[\"mean_freq\"])+\" Hz\"\n if current_id==\"current0\":\n score_detail = { current_id: [ \"0 nA\",\n step_up_freq,\n step_down_freq,\n y ] }\n else:\n amp = \\\n self.ramp_up_down_currents[current_id][\"amp\"]\n score_detail = { current_id: [ str(amp)+\" nA\",\n step_up_freq,\n step_down_freq,\n y ] }\n # For the respective current id\n # Store the score breakdown in the dictionary\n score_breakdown.update(score_detail)\n # Store the score in the list\n list_of_scores.append(y.score)\n # Send all the scores and its breakdown to get OverallBinary score\n x2 = OverallBinaryScore.compute( list_of_scores, score_breakdown )\n score = OverallBinaryScore(x2)\n if score.score==1:\n score.description = \"The model \" + model.name + \" passed the \" + self.__class__.__name__ + \". The mean spike frequencies of a given amplitude of injection during ramp-up phase is different from those during ramp-down phase.\"\n else:\n score.description = \"The model \" + model.name + \" failed the \" + self.__class__.__name__ + \". The mean spike frequencies of an (or many) amplitude of injection are similar for ramp-up phase versus ramp-down phase.\"\n print score.description\n return score\n\n\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":1648019135874678500,"string":"1,648,019,135,874,678,500"},"line_mean":{"kind":"number","value":50.6735395189,"string":"50.67354"},"line_max":{"kind":"number","value":236,"string":"236"},"alpha_frac":{"kind":"number","value":0.5253042495,"string":"0.525304"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109489,"cells":{"repo_name":{"kind":"string","value":"lneuhaus/pyrpl"},"path":{"kind":"string","value":"pyrpl/test/test_registers.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"7911"},"content":{"kind":"string","value":"import logging\nlogger = logging.getLogger(name=__name__)\nfrom pyrpl.modules import Module\nfrom pyrpl.attributes import *\nfrom .test_redpitaya import TestRedpitaya\n\n\nclass TestRegisters(TestRedpitaya):\n \"\"\" This test verifies that all registers behave as expected.\n\n The test is not only useful to test the python interface,\n but also checks that the fpga is not behaving stragely,\n i.e. loosing data or writing the wrong data. Thus, it is the\n principal test to execute on new fpga designs. \"\"\"\n def test_generator(self):\n if self.r is None:\n assert False\n for modulekey, module in self.r.__dict__.items():\n if isinstance(module, Module):\n logger.info(\"Scanning module %s...\", modulekey)\n for regkey, regclass in type(module).__dict__.items():\n if isinstance(regclass, BaseRegister):\n logger.info(\"Scanning register %s...\", regkey)\n yield self.register_validation, module, modulekey, \\\n regclass, regkey\n\n def register_validation(self, module, modulekey, reg, regkey):\n logger.debug(\"%s %s\", modulekey, regkey)\n if type(reg) is BaseRegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents an int\n if not isinstance(value, int):\n assert False, 'wrong type: int != %s' % str(type(value))\n # write back to it to test setter\n module.__setattr__(regkey, value)\n newvalue = module.__getattribute__(regkey)\n assert value == newvalue, \\\n \"Mismatch: value=\" + str(value) + \" new value = \" + str(\n newvalue)\n if type(reg) is LongRegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents an int\n if not isinstance(value, int) and not isinstance(value, long):\n assert False, 'wrong type: int/long != %s' % str(type(value))\n # write back to it to test setter\n module.__setattr__(regkey, value)\n newvalue = module.__getattribute__(regkey)\n if regkey not in [\"current_timestamp\"]:\n assert value == newvalue, \"Mismatch: value=\" + str(value) \\\n + \" new value = \" + str(newvalue)\n if type(reg) is BoolRegister or type(reg) is IORegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents an int\n if type(value) != bool:\n assert False\n # exclude read-only registers\n if regkey in ['_reset_writestate_machine',\n '_trigger_armed',\n '_trigger_delay_running',\n 'pretrig_ok',\n 'armed',\n 'on']:\n return\n # write opposite value and confirm it has changed\n module.__setattr__(regkey, not value)\n if value == module.__getattribute__(regkey):\n assert False\n # write back original value and check for equality\n module.__setattr__(regkey, value)\n if value != module.__getattribute__(regkey):\n assert False\n if type(reg) is FloatRegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents a float\n if not isinstance(value, float):\n assert False\n # exclude read-only registers\n if regkey in ['pfd_integral',\n 'ch1_firstpoint',\n 'ch2_firstpoint',\n 'voltage_out1',\n 'voltage_out2',\n 'voltage_in1',\n 'voltage_in2',\n 'firstpoint',\n 'lastpoint'\n ] or modulekey == 'sampler':\n return\n # write something different and confirm change\n if value == 0:\n write = 1e10\n else:\n write = 0\n module.__setattr__(regkey, write)\n if value == module.__getattribute__(regkey):\n assert False\n # write sth negative\n write = -1e10\n module.__setattr__(regkey, write)\n if module.__getattribute__(regkey) >= 0:\n if reg.signed:\n assert False\n else:\n # unsigned registers should use absolute value and\n # therefore not be zero when assigned large negative values\n if module.__getattribute__(regkey) == 0:\n assert False\n # set back original value\n module.__setattr__(regkey, value)\n if value != module.__getattribute__(regkey):\n assert False\n if type(reg) is PhaseRegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents a float\n if not isinstance(value, float):\n assert False\n # make sure any random phase has an error below 1e-6 degrees !\n if regkey not in ['scopetriggerphase']:\n for phase in np.linspace(-1234, 5678, 90):\n module.__setattr__(regkey, phase)\n diff = abs(module.__getattribute__(regkey) - (phase % 360))\n bits = getattr(module.__class__, regkey).bits\n thr = 360.0/2**bits/2 # factor 2 because rounding is used\n if diff > thr:\n assert False, \\\n \"at phase \" + str(phase) + \": diff = \" + str(diff)\n # set back original value\n module.__setattr__(regkey, value)\n if value != module.__getattribute__(regkey):\n assert False\n if type(reg) is FrequencyRegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents a float\n if not isinstance(value, float):\n assert False\n # make sure any frequency has an error below 100 mHz!\n if regkey not in []:\n for freq in [0, 1, 10, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7,\n 125e6 / 2]: # FrequencyRegisters are now limited.\n module.__setattr__(regkey, freq)\n diff = abs(module.__getattribute__(regkey) - freq)\n if diff > 0.1:\n assert False, \\\n \"at freq \" + str(freq) + \": diff = \" + str(diff)\n # set back original value\n module.__setattr__(regkey, value)\n if value != module.__getattribute__(regkey):\n assert False\n if type(reg) is SelectRegister:\n # try to read\n value = module.__getattribute__(regkey)\n # make sure Register represents an int\n if not isinstance((sorted(reg.options(module))[0]), type(value)):\n assert False\n # exclude read-only registers\n if regkey in [\"id\"]:\n return\n # try all options and confirm change that they are saved\n for option in sorted(reg.options(module)):\n module.__setattr__(regkey, option)\n if option != module.__getattribute__(regkey):\n assert False\n # set back original value\n module.__setattr__(regkey, value)\n if value != module.__getattribute__(regkey):\n assert False\n return"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-5935855645894125000,"string":"-5,935,855,645,894,125,000"},"line_mean":{"kind":"number","value":44.7341040462,"string":"44.734104"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5025913285,"string":"0.502591"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109490,"cells":{"repo_name":{"kind":"string","value":"davidhawkes11/p3w"},"path":{"kind":"string","value":"p3w_06.0d.3.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1072"},"content":{"kind":"string","value":"# Code to ask for user's two favourite numbers, to operate on them and to display the output to the screen.\r\n# Input: two numbers, integer variables\r\n# Output: the result of a mathematical operation on the two numbers\r\n# The above text is commentary. The actual program starts below:\r\n\r\nprint (\"\\n\\n\\nPython 3.0 Workbook\\nStudent Work Booklet\\nStudent Activity p3w_06.0d.3\\n\\n\")\r\nprint (\"A program to demonstrate string selection and to display the output to the screen.\\n\\n\" )\r\n\r\n\r\nnumber1 = int(input(\"What is your first favourite number? \"))\r\nnumber2 = int(input(\"What is your second favourite number? \"))\r\n\r\nmenu = \"What would you like to do with these two numbers? Enter 1, 2, 3, 4 or 9:\\n\\\r\n 1. Add +\\n\\\r\n 2. Subtract -\\n\\\r\n 3. Multiply *\\n\\\r\n 4. Divide /\\n\\\r\n 9. Quit\\n\"\r\n\r\nx = int(input(menu))\r\nif x == 1:\r\n print(\"\", number1 + number2)\r\nelif x == 2:\r\n print(\"\", number1- number2)\r\nelif x == 3:\r\n print(\"\", number1 * number2)\r\nelif x == 4:\r\n print(\"\", number1 / number2)\r\nelif x == 9:\r\n print(\"Goodbye!\")\r\n\r\n\r\n\r\n\r\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":1125062174275446800,"string":"1,125,062,174,275,446,800"},"line_mean":{"kind":"number","value":29.5294117647,"string":"29.529412"},"line_max":{"kind":"number","value":107,"string":"107"},"alpha_frac":{"kind":"number","value":0.6324626866,"string":"0.632463"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109491,"cells":{"repo_name":{"kind":"string","value":"orf/websocket_stdout_example"},"path":{"kind":"string","value":"runner.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3081"},"content":{"kind":"string","value":"from twisted.internet import reactor, protocol\nfrom autobahn.websocket import WebSocketServerFactory, \\\n WebSocketServerProtocol, \\\n listenWS\nfrom twisted.python.log import startLogging, msg\nimport sys\nstartLogging(sys.stdout)\n\n# Examples:\n# runner.py /bin/sh -c \"tail -f /var/log/nginx/access.log | grep -v secret_admin_page\" --line-buffered | awk '{\\$1=\\\"\\\"; print}'\"\n# runner.py tail tail -F /var/log/nginx/access.log\n\nCOMMAND_NAME = sys.argv[1]\nCOMMAND_ARGS = sys.argv[1:]\nLOCAL_ONLY = False\nDEBUG = True\n\n\nclass ProcessProtocol(protocol.ProcessProtocol):\n \"\"\" I handle a child process launched via reactor.spawnProcess.\n I just buffer the output into a list and call WebSocketProcessOutputterThingFactory.broadcast when\n any new output is read\n \"\"\"\n def __init__(self, websocket_factory):\n self.ws = websocket_factory\n self.buffer = []\n \n def outReceived(self, message):\n self.ws.broadcast(message)\n self.buffer.append(message)\n self.buffer = self.buffer[-10:] # Last 10 messages please\n\n def errReceived(self, data):\n print \"Error: %s\" % data\n\n\n# http://autobahn.ws/python\nclass WebSocketProcessOutputterThing(WebSocketServerProtocol):\n \"\"\" I handle a single connected client. We don't need to do much here, simply call the register and un-register\n functions when needed.\n \"\"\"\n def onOpen(self):\n self.factory.register(self)\n for line in self.factory.process.buffer:\n self.sendMessage(line)\n\n def connectionLost(self, reason):\n WebSocketServerProtocol.connectionLost(self, reason)\n #super(WebSocketProcessOutputterThing, self).connectionLost(self, reason)\n self.factory.unregister(self)\n\n\nclass WebSocketProcessOutputterThingFactory(WebSocketServerFactory):\n \"\"\" I maintain a list of connected clients and provide a method for pushing a single message to all of them.\n \"\"\"\n protocol = WebSocketProcessOutputterThing\n \n def __init__(self, *args, **kwargs):\n WebSocketServerFactory.__init__(self, *args, **kwargs)\n #super(WebSocketProcessOutputterThingFactory, self).__init__(self, *args, **kwargs)\n self.clients = []\n self.process = ProcessProtocol(self)\n reactor.spawnProcess(self.process,COMMAND_NAME, COMMAND_ARGS, {}, usePTY=True)\n\n def register(self, client):\n msg(\"Registered client %s\" % client)\n if not client in self.clients:\n self.clients.append(client)\n\n def unregister(self, client):\n msg(\"Unregistered client %s\" % client)\n if client in self.clients:\n self.clients.remove(client)\n\n def broadcast(self, message):\n for client in self.clients:\n client.sendMessage(message)\n\n\nif __name__ == \"__main__\":\n print \"Running process %s with args %s\" % (COMMAND_NAME, COMMAND_ARGS)\n factory = WebSocketProcessOutputterThingFactory(\"ws://%s:9000\" % (\"localhost\" if LOCAL_ONLY else \"0.0.0.0\"), debug=False)\n listenWS(factory)\n reactor.run()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2425071918070515700,"string":"2,425,071,918,070,515,700"},"line_mean":{"kind":"number","value":35.6785714286,"string":"35.678571"},"line_max":{"kind":"number","value":129,"string":"129"},"alpha_frac":{"kind":"number","value":0.6689386563,"string":"0.668939"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109492,"cells":{"repo_name":{"kind":"string","value":"googleapis/googleapis-gen"},"path":{"kind":"string","value":"google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/ad_group_bid_modifier_service/client.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"23635"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom collections import OrderedDict\nfrom distutils import util\nimport os\nimport re\nfrom typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union\nimport pkg_resources\n\nfrom google.api_core import client_options as client_options_lib # type: ignore\nfrom google.api_core import exceptions as core_exceptions # type: ignore\nfrom google.api_core import gapic_v1 # type: ignore\nfrom google.api_core import retry as retries # type: ignore\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.auth.transport import mtls # type: ignore\nfrom google.auth.transport.grpc import SslCredentials # type: ignore\nfrom google.auth.exceptions import MutualTLSChannelError # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\nfrom google.ads.googleads.v8.common.types import criteria\nfrom google.ads.googleads.v8.enums.types import bid_modifier_source\nfrom google.ads.googleads.v8.resources.types import ad_group_bid_modifier\nfrom google.ads.googleads.v8.services.types import ad_group_bid_modifier_service\nfrom google.rpc import status_pb2 # type: ignore\nfrom .transports.base import AdGroupBidModifierServiceTransport, DEFAULT_CLIENT_INFO\nfrom .transports.grpc import AdGroupBidModifierServiceGrpcTransport\n\n\nclass AdGroupBidModifierServiceClientMeta(type):\n \"\"\"Metaclass for the AdGroupBidModifierService client.\n\n This provides class-level methods for building and retrieving\n support objects (e.g. transport) without polluting the client instance\n objects.\n \"\"\"\n _transport_registry = OrderedDict() # type: Dict[str, Type[AdGroupBidModifierServiceTransport]]\n _transport_registry['grpc'] = AdGroupBidModifierServiceGrpcTransport\n\n def get_transport_class(cls,\n label: str = None,\n ) -> Type[AdGroupBidModifierServiceTransport]:\n \"\"\"Return an appropriate transport class.\n\n Args:\n label: The name of the desired transport. If none is\n provided, then the first transport in the registry is used.\n\n Returns:\n The transport class to use.\n \"\"\"\n # If a specific transport is requested, return that one.\n if label:\n return cls._transport_registry[label]\n\n # No transport is requested; return the default (that is, the first one\n # in the dictionary).\n return next(iter(cls._transport_registry.values()))\n\n\nclass AdGroupBidModifierServiceClient(metaclass=AdGroupBidModifierServiceClientMeta):\n \"\"\"Service to manage ad group bid modifiers.\"\"\"\n\n @staticmethod\n def _get_default_mtls_endpoint(api_endpoint):\n \"\"\"Convert api endpoint to mTLS endpoint.\n Convert \"*.sandbox.googleapis.com\" and \"*.googleapis.com\" to\n \"*.mtls.sandbox.googleapis.com\" and \"*.mtls.googleapis.com\" respectively.\n Args:\n api_endpoint (Optional[str]): the api endpoint to convert.\n Returns:\n str: converted mTLS api endpoint.\n \"\"\"\n if not api_endpoint:\n return api_endpoint\n\n mtls_endpoint_re = re.compile(\n r\"(?P[^.]+)(?P\\.mtls)?(?P\\.sandbox)?(?P\\.googleapis\\.com)?\"\n )\n\n m = mtls_endpoint_re.match(api_endpoint)\n name, mtls, sandbox, googledomain = m.groups()\n if mtls or not googledomain:\n return api_endpoint\n\n if sandbox:\n return api_endpoint.replace(\n \"sandbox.googleapis.com\", \"mtls.sandbox.googleapis.com\"\n )\n\n return api_endpoint.replace(\".googleapis.com\", \".mtls.googleapis.com\")\n\n DEFAULT_ENDPOINT = 'googleads.googleapis.com'\n DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore\n DEFAULT_ENDPOINT\n )\n\n @classmethod\n def from_service_account_info(cls, info: dict, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials info.\n\n Args:\n info (dict): The service account private key info.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n AdGroupBidModifierServiceClient: The constructed client.\n \"\"\"\n credentials = service_account.Credentials.from_service_account_info(info)\n kwargs[\"credentials\"] = credentials\n return cls(*args, **kwargs)\n\n @classmethod\n def from_service_account_file(cls, filename: str, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n file.\n\n Args:\n filename (str): The path to the service account private key json\n file.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n AdGroupBidModifierServiceClient: The constructed client.\n \"\"\"\n credentials = service_account.Credentials.from_service_account_file(\n filename)\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n from_service_account_json = from_service_account_file\n\n @property\n def transport(self) -> AdGroupBidModifierServiceTransport:\n \"\"\"Return the transport used by the client instance.\n\n Returns:\n AdGroupBidModifierServiceTransport: The transport used by the client instance.\n \"\"\"\n return self._transport\n\n @staticmethod\n def ad_group_path(customer_id: str,ad_group_id: str,) -> str:\n \"\"\"Return a fully-qualified ad_group string.\"\"\"\n return \"customers/{customer_id}/adGroups/{ad_group_id}\".format(customer_id=customer_id, ad_group_id=ad_group_id, )\n\n @staticmethod\n def parse_ad_group_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a ad_group path into its component segments.\"\"\"\n m = re.match(r\"^customers/(?P.+?)/adGroups/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n @staticmethod\n def ad_group_bid_modifier_path(customer_id: str,ad_group_id: str,criterion_id: str,) -> str:\n \"\"\"Return a fully-qualified ad_group_bid_modifier string.\"\"\"\n return \"customers/{customer_id}/adGroupBidModifiers/{ad_group_id}~{criterion_id}\".format(customer_id=customer_id, ad_group_id=ad_group_id, criterion_id=criterion_id, )\n\n @staticmethod\n def parse_ad_group_bid_modifier_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a ad_group_bid_modifier path into its component segments.\"\"\"\n m = re.match(r\"^customers/(?P.+?)/adGroupBidModifiers/(?P.+?)~(?P.+?)$\", path)\n return m.groupdict() if m else {}\n @staticmethod\n def common_billing_account_path(billing_account: str, ) -> str:\n \"\"\"Return a fully-qualified billing_account string.\"\"\"\n return \"billingAccounts/{billing_account}\".format(billing_account=billing_account, )\n\n @staticmethod\n def parse_common_billing_account_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a billing_account path into its component segments.\"\"\"\n m = re.match(r\"^billingAccounts/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_folder_path(folder: str, ) -> str:\n \"\"\"Return a fully-qualified folder string.\"\"\"\n return \"folders/{folder}\".format(folder=folder, )\n\n @staticmethod\n def parse_common_folder_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a folder path into its component segments.\"\"\"\n m = re.match(r\"^folders/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_organization_path(organization: str, ) -> str:\n \"\"\"Return a fully-qualified organization string.\"\"\"\n return \"organizations/{organization}\".format(organization=organization, )\n\n @staticmethod\n def parse_common_organization_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a organization path into its component segments.\"\"\"\n m = re.match(r\"^organizations/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_project_path(project: str, ) -> str:\n \"\"\"Return a fully-qualified project string.\"\"\"\n return \"projects/{project}\".format(project=project, )\n\n @staticmethod\n def parse_common_project_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a project path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n @staticmethod\n def common_location_path(project: str, location: str, ) -> str:\n \"\"\"Return a fully-qualified location string.\"\"\"\n return \"projects/{project}/locations/{location}\".format(project=project, location=location, )\n\n @staticmethod\n def parse_common_location_path(path: str) -> Dict[str,str]:\n \"\"\"Parse a location path into its component segments.\"\"\"\n m = re.match(r\"^projects/(?P.+?)/locations/(?P.+?)$\", path)\n return m.groupdict() if m else {}\n\n def __init__(self, *,\n credentials: Optional[ga_credentials.Credentials] = None,\n transport: Union[str, AdGroupBidModifierServiceTransport, None] = None,\n client_options: Optional[client_options_lib.ClientOptions] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n \"\"\"Instantiate the ad group bid modifier service client.\n\n Args:\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n transport (Union[str, ~.AdGroupBidModifierServiceTransport]): The\n transport to use. If set to None, a transport is chosen\n automatically.\n client_options (google.api_core.client_options.ClientOptions): Custom options for the\n client. It won't take effect if a ``transport`` instance is provided.\n (1) The ``api_endpoint`` property can be used to override the\n default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT\n environment variable can also be used to override the endpoint:\n \"always\" (always use the default mTLS endpoint), \"never\" (always\n use the default regular endpoint) and \"auto\" (auto switch to the\n default mTLS endpoint if client certificate is present, this is\n the default value). However, the ``api_endpoint`` property takes\n precedence if provided.\n (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable\n is \"true\", then the ``client_cert_source`` property can be used\n to provide client certificate for mutual TLS transport. If\n not provided, the default SSL client certificate will be used if\n present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is \"false\" or not\n set, no client certificate will be used.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you're developing\n your own client library.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport\n creation failed for any reason.\n \"\"\"\n if isinstance(client_options, dict):\n client_options = client_options_lib.from_dict(client_options)\n if client_options is None:\n client_options = client_options_lib.ClientOptions()\n\n # Create SSL credentials for mutual TLS if needed.\n use_client_cert = bool(util.strtobool(os.getenv(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"false\")))\n\n ssl_credentials = None\n is_mtls = False\n if use_client_cert:\n if client_options.client_cert_source:\n import grpc # type: ignore\n\n cert, key = client_options.client_cert_source()\n ssl_credentials = grpc.ssl_channel_credentials(\n certificate_chain=cert, private_key=key\n )\n is_mtls = True\n else:\n creds = SslCredentials()\n is_mtls = creds.is_mtls\n ssl_credentials = creds.ssl_credentials if is_mtls else None\n\n # Figure out which api endpoint to use.\n if client_options.api_endpoint is not None:\n api_endpoint = client_options.api_endpoint\n else:\n use_mtls_env = os.getenv(\"GOOGLE_API_USE_MTLS_ENDPOINT\", \"auto\")\n if use_mtls_env == \"never\":\n api_endpoint = self.DEFAULT_ENDPOINT\n elif use_mtls_env == \"always\":\n api_endpoint = self.DEFAULT_MTLS_ENDPOINT\n elif use_mtls_env == \"auto\":\n api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT\n else:\n raise MutualTLSChannelError(\n \"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always\"\n )\n\n # Save or instantiate the transport.\n # Ordinarily, we provide the transport, but allowing a custom transport\n # instance provides an extensibility point for unusual situations.\n if isinstance(transport, AdGroupBidModifierServiceTransport):\n # transport is a AdGroupBidModifierServiceTransport instance.\n if credentials:\n raise ValueError('When providing a transport instance, '\n 'provide its credentials directly.')\n self._transport = transport\n elif isinstance(transport, str):\n Transport = type(self).get_transport_class(transport)\n self._transport = Transport(\n credentials=credentials, host=self.DEFAULT_ENDPOINT\n )\n else:\n self._transport = AdGroupBidModifierServiceGrpcTransport(\n credentials=credentials,\n host=api_endpoint,\n ssl_channel_credentials=ssl_credentials,\n client_info=client_info,\n )\n\n def get_ad_group_bid_modifier(self,\n request: ad_group_bid_modifier_service.GetAdGroupBidModifierRequest = None,\n *,\n resource_name: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> ad_group_bid_modifier.AdGroupBidModifier:\n r\"\"\"Returns the requested ad group bid modifier in full detail.\n\n List of thrown errors: `AuthenticationError <>`__\n `AuthorizationError <>`__ `HeaderError <>`__\n `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__\n\n Args:\n request (:class:`google.ads.googleads.v8.services.types.GetAdGroupBidModifierRequest`):\n The request object. Request message for\n [AdGroupBidModifierService.GetAdGroupBidModifier][google.ads.googleads.v8.services.AdGroupBidModifierService.GetAdGroupBidModifier].\n resource_name (:class:`str`):\n Required. The resource name of the ad\n group bid modifier to fetch.\n\n This corresponds to the ``resource_name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.ads.googleads.v8.resources.types.AdGroupBidModifier:\n Represents an ad group bid modifier.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n if request is not None and any([resource_name]):\n raise ValueError('If the `request` argument is set, then none of '\n 'the individual field arguments should be set.')\n\n # Minor optimization to avoid making a copy if the user passes\n # in a ad_group_bid_modifier_service.GetAdGroupBidModifierRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, ad_group_bid_modifier_service.GetAdGroupBidModifierRequest):\n request = ad_group_bid_modifier_service.GetAdGroupBidModifierRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if resource_name is not None:\n request.resource_name = resource_name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.get_ad_group_bid_modifier]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n ('resource_name', request.resource_name),\n )),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def mutate_ad_group_bid_modifiers(self,\n request: ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest = None,\n *,\n customer_id: str = None,\n operations: Sequence[ad_group_bid_modifier_service.AdGroupBidModifierOperation] = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> ad_group_bid_modifier_service.MutateAdGroupBidModifiersResponse:\n r\"\"\"Creates, updates, or removes ad group bid modifiers. Operation\n statuses are returned.\n\n List of thrown errors: `AdGroupBidModifierError <>`__\n `AuthenticationError <>`__ `AuthorizationError <>`__\n `ContextError <>`__ `CriterionError <>`__ `DatabaseError <>`__\n `DistinctError <>`__ `FieldError <>`__ `FieldMaskError <>`__\n `HeaderError <>`__ `IdError <>`__ `InternalError <>`__\n `MutateError <>`__ `NewResourceCreationError <>`__\n `NotEmptyError <>`__ `OperatorError <>`__ `QuotaError <>`__\n `RangeError <>`__ `RequestError <>`__\n `ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__\n `StringFormatError <>`__ `StringLengthError <>`__\n\n Args:\n request (:class:`google.ads.googleads.v8.services.types.MutateAdGroupBidModifiersRequest`):\n The request object. Request message for\n [AdGroupBidModifierService.MutateAdGroupBidModifiers][google.ads.googleads.v8.services.AdGroupBidModifierService.MutateAdGroupBidModifiers].\n customer_id (:class:`str`):\n Required. ID of the customer whose ad\n group bid modifiers are being modified.\n\n This corresponds to the ``customer_id`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n operations (:class:`Sequence[google.ads.googleads.v8.services.types.AdGroupBidModifierOperation]`):\n Required. The list of operations to\n perform on individual ad group bid\n modifiers.\n\n This corresponds to the ``operations`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.ads.googleads.v8.services.types.MutateAdGroupBidModifiersResponse:\n Response message for ad group bid\n modifiers mutate.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n if request is not None and any([customer_id, operations]):\n raise ValueError('If the `request` argument is set, then none of '\n 'the individual field arguments should be set.')\n\n # Minor optimization to avoid making a copy if the user passes\n # in a ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest.\n # There's no risk of modifying the input as we've already verified\n # there are no flattened fields.\n if not isinstance(request, ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest):\n request = ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest(request)\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if customer_id is not None:\n request.customer_id = customer_id\n if operations is not None:\n request.operations = operations\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = self._transport._wrapped_methods[self._transport.mutate_ad_group_bid_modifiers]\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata((\n ('customer_id', request.customer_id),\n )),\n )\n\n # Send the request.\n response = rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n\n__all__ = (\n 'AdGroupBidModifierServiceClient',\n)\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-5977166958312141000,"string":"-5,977,166,958,312,141,000"},"line_mean":{"kind":"number","value":45.162109375,"string":"45.162109"},"line_max":{"kind":"number","value":175,"string":"175"},"alpha_frac":{"kind":"number","value":0.6292363021,"string":"0.629236"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109493,"cells":{"repo_name":{"kind":"string","value":"JohnVillalovos/hardlinkpy"},"path":{"kind":"string","value":"hardlinkpy/hardlink.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"24410"},"content":{"kind":"string","value":"#!/usr/bin/python3 -ttu\n\n# hardlink - Goes through a directory structure and creates hardlinks for\n# files which are identical.\n#\n# Copyright (C) 2003 - 2019 John L. Villalovos, Hillsboro, Oregon\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc., 59\n# Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n#\n#\n# ------------------------------------------------------------------------\n# John Villalovos\n# email: john@sodarock.com\n# http://www.sodarock.com/\n#\n# Inspiration for this program came from the hardlink.c code. I liked what it\n# did but did not like the code itself, to me it was very unmaintainable. So I\n# rewrote in C++ and then I rewrote it in python. In reality this code is\n# nothing like the original hardlink.c, since I do things quite differently.\n# Even though this code is written in python the performance of the python\n# version is much faster than the hardlink.c code, in my limited testing. This\n# is mainly due to use of different algorithms.\n#\n# Original inspirational hardlink.c code was written by: Jakub Jelinek\n# \n#\n# ------------------------------------------------------------------------\n#\n# TODO(jlvillal):\n# * Thinking it might make sense to walk the entire tree first and collect\n# up all the file information before starting to do comparisons. Thought\n# here is we could find all the files which are hardlinked to each other\n# and then do a comparison. If they are identical then hardlink\n# everything at once.\n\nimport argparse\nimport logging\nimport os\nimport re\nimport stat\nimport sys\nimport time\nfrom typing import Dict, List, NamedTuple, Optional, Tuple\n\n\nclass FileInfo(NamedTuple):\n filename: str\n stat_info: os.stat_result\n\n\n# MAX_HASHES must be a power of 2, so that MAX_HASHES - 1 will be a value with\n# all bits set to 1\nMAX_HASHES = 2 ** 17\nassert (MAX_HASHES & (MAX_HASHES - 1)) == 0, \"MAX_HASHES must be a power of 2\"\nMAX_HASHES_MINUS_1 = MAX_HASHES - 1\n\n\n# Hash functions\n# Create a hash from a file's size and time values\ndef hash_size_time(*, size: int, time: float) -> int:\n return (size ^ int(time)) & (MAX_HASHES_MINUS_1)\n\n\ndef hash_size(size: int) -> int:\n return (size) & (MAX_HASHES_MINUS_1)\n\n\ndef hash_value(*, size: int, time: float, notimestamp: bool) -> int:\n if notimestamp:\n return hash_size(size)\n else:\n return hash_size_time(size=size, time=time)\n\n\n# If two files have the same inode and are on the same device then they are\n# already hardlinked.\ndef is_already_hardlinked(*, st1: os.stat_result, st2: os.stat_result) -> bool:\n result = (st1.st_ino == st2.st_ino) and (st1.st_dev == st2.st_dev)\n return result\n\n\n# Determine if a file is eligibile for hardlinking. Files will only be\n# considered for hardlinking if this function returns true.\ndef eligible_for_hardlink(\n *, st1: os.stat_result, st2: os.stat_result, args: argparse.Namespace\n) -> bool:\n\n # Must meet the following\n # criteria:\n # * NOT already hard linked to each other\n # * sizes are equal\n # * size is greater than or equal to args.min_size\n # * file modes are equal OR we are comparing content only\n # * owner user ids are equal OR we are comparing content only\n # * owner group ids are equal OR we are comparing content only\n # * modified times are equal OR date hashing is off OR we are comparing\n # content only\n # * device is the same\n\n # * sizes are equal\n if not (st1.st_size == st2.st_size):\n return False\n\n # * size is greater than or equal to args.min_size\n # The size should always be greater than or equal to the min size as the\n # caller should ensure that, but to be safe we check anyway.\n if st1.st_size < args.min_size:\n return False\n\n if not args.content_only:\n # * file modes are equal\n if not (st1.st_mode == st2.st_mode):\n return False\n\n # * owner user ids are equal\n if not (st1.st_uid == st2.st_uid):\n return False\n\n # * owner group ids are equal\n if not (st1.st_gid == st2.st_gid):\n return False\n\n if not args.content_only and not args.notimestamp:\n # * modified times are equal\n if not (st1.st_mtime == st2.st_mtime):\n return False\n\n # * device is the same\n if not (st1.st_dev == st2.st_dev):\n return False\n\n # * NOT already hard linked to each other\n # The files should not be hardlinked to each other as the caller should\n # ensure that, but to be safe we check anyway.\n if is_already_hardlinked(st1=st1, st2=st2):\n return False\n\n return True\n\n\ndef are_file_contents_equal(\n *, filename1: str, filename2: str, args: argparse.Namespace\n) -> bool:\n \"\"\"Determine if the contents of two files are equal.\n\n **!! This function assumes that the file sizes of the two files are\n equal.\n \"\"\"\n\n try:\n # Open our two files\n with open(filename1, \"rb\") as file1:\n with open(filename2, \"rb\") as file2:\n gStats.did_comparison()\n if args.show_progress:\n print(f\"Comparing: {filename1}\")\n print(f\" to : {filename2}\")\n buffer_size = 1024 * 1024\n while True:\n buffer1 = file1.read(buffer_size)\n buffer2 = file2.read(buffer_size)\n if buffer1 != buffer2:\n return False\n\n if not buffer1:\n return True\n except (OSError, PermissionError) as exc:\n print(\"Error opening file in are_file_contents_equal()\")\n print(\"Was attempting to open:\")\n print(f\"file1: {filename1}\")\n print(f\"file2: {filename2}\")\n print(\"When an exception occurred: {}\".format(exc))\n return False\n\n\n# Determines if two files should be hard linked together.\ndef are_files_hardlinkable(\n *, file_info_1: FileInfo, file_info_2: FileInfo, args: argparse.Namespace\n) -> bool:\n\n # See if the files are eligible for hardlinking\n if not eligible_for_hardlink(\n st1=file_info_1.stat_info, st2=file_info_2.stat_info, args=args\n ):\n return False\n\n if args.samename:\n # Check if the base filenames are the same\n basename1 = os.path.basename(file_info_1.filename)\n basename2 = os.path.basename(file_info_2.filename)\n if basename1 != basename2:\n return False\n\n return are_file_contents_equal(\n filename1=file_info_1.filename, filename2=file_info_2.filename, args=args\n )\n\n\n# Hardlink two files together\ndef hardlink_files(\n *,\n sourcefile: str,\n destfile: str,\n stat_info: os.stat_result,\n args: argparse.Namespace,\n) -> bool:\n # rename the destination file to save it\n temp_name = destfile + \".$$$___cleanit___$$$\"\n try:\n if not args.dry_run:\n os.rename(destfile, temp_name)\n except OSError as error:\n print(f\"Failed to rename: {destfile} to {temp_name}\")\n print(error)\n result = False\n else:\n # Now link the sourcefile to the destination file\n try:\n if not args.dry_run:\n os.link(sourcefile, destfile)\n except: # noqa TODO(fix this bare except)\n logging.exception(f\"Failed to hardlink: {sourcefile} to {destfile}\")\n # Try to recover\n try:\n os.rename(temp_name, destfile)\n except: # noqa TODO(fix this bare except)\n logging.exception(\n \"BAD BAD - failed to rename back {} to {}\".format(\n temp_name, destfile\n )\n )\n result = False\n else:\n # hard link succeeded\n # Delete the renamed version since we don't need it.\n if not args.dry_run:\n try:\n os.unlink(temp_name)\n except FileNotFoundError:\n # If our temporary file disappears under us, ignore it.\n # Probably an rsync is running and deleted it.\n logging.warning(f\"Temporary file vanished: {temp_name}\")\n pass\n # update our stats\n gStats.did_hardlink(sourcefile, destfile, stat_info)\n if args.show_progress:\n if args.dry_run:\n print(\"Did NOT link. Dry run\")\n size = stat_info.st_size\n print(f\"Linked: {sourcefile}\")\n print(f\" to: {destfile}, saved {size}\")\n result = True\n return result\n\n\ndef hardlink_identical_files(\n *, dir_entry: os.DirEntry, args: argparse.Namespace\n) -> None:\n \"\"\"hardlink identical files\n\n The purpose of this function is to hardlink files together if the files are\n the same. To be considered the same they must be equal in the following\n criteria:\n * file size\n * file contents\n * file mode (default)\n * owner user id (default)\n * owner group id (default)\n * modified time (default)\n\n Also, files will only be hardlinked if they are on the same device. This\n is because hardlink does not allow you to hardlink across file systems.\n\n The basic idea on how this is done is as follows:\n\n Walk the directory tree building up a list of the files.\n\n For each file, generate a simple hash based on the size and modified time.\n\n For any other files which share this hash make sure that they are not\n identical to this file. If they are identical then hardlink the files.\n\n Add the file info to the list of files that have the same hash value.\n \"\"\"\n\n for exclude in args.excludes:\n if re.search(exclude, dir_entry.path):\n return\n\n stat_info = dir_entry.stat(follow_symlinks=False)\n # Is it a regular file?\n if stat.S_ISREG(stat_info.st_mode):\n # Create the hash for the file.\n file_hash = hash_value(\n size=stat_info.st_size,\n time=stat_info.st_mtime,\n notimestamp=(args.notimestamp or args.content_only),\n )\n # Bump statistics count of regular files found.\n gStats.found_regular_file()\n if args.verbose >= 2:\n print(f\"File: {dir_entry.path}\")\n work_file_info = (dir_entry.path, stat_info)\n work_file_info = FileInfo(filename=dir_entry.path, stat_info=stat_info)\n if file_hash in file_hashes:\n # We have file(s) that have the same hash as our current file.\n # Let's go through the list of files with the same hash and see if\n # we are already hardlinked to any of them.\n for temp_file_info in file_hashes[file_hash]:\n if is_already_hardlinked(st1=stat_info, st2=temp_file_info.stat_info):\n gStats.found_hardlink(\n temp_file_info.filename,\n dir_entry.path,\n temp_file_info.stat_info,\n )\n break\n else:\n # We did not find this file as hardlinked to any other file\n # yet. So now lets see if our file should be hardlinked to any\n # of the other files with the same hash.\n for temp_file_info in file_hashes[file_hash]:\n if are_files_hardlinkable(\n file_info_1=work_file_info,\n # file_info_2=(temp_filename, temp_stat_info),\n file_info_2=temp_file_info,\n args=args,\n ):\n hardlink_files(\n sourcefile=temp_file_info.filename,\n destfile=dir_entry.path,\n stat_info=temp_file_info.stat_info,\n args=args,\n )\n break\n else:\n # The file should NOT be hardlinked to any of the other\n # files with the same hash. So we will add it to the list\n # of files.\n file_hashes[file_hash].append(work_file_info)\n else:\n # There weren't any other files with the same hash value so we will\n # create a new entry and store our file.\n file_hashes[file_hash] = [work_file_info]\n\n\nclass cStatistics(object):\n def __init__(self) -> None:\n self.dircount = 0 # how many directories we find\n self.regularfiles = 0 # how many regular files we find\n self.comparisons = 0 # how many file content comparisons\n self.hardlinked_thisrun = 0 # hardlinks done this run\n self.hardlinked_previously = 0\n # hardlinks that are already existing\n self.bytes_saved_thisrun = 0 # bytes saved by hardlinking this run\n self.bytes_saved_previously = 0 # bytes saved by previous hardlinks\n self.hardlinkstats: List[\n Tuple[str, str]\n ] = [] # list of files hardlinked this run\n self.starttime = time.time() # track how long it takes\n self.previouslyhardlinked: Dict[\n str, Tuple[os.stat_result, List[str]]\n ] = {} # list of files hardlinked previously\n\n def found_directory(self) -> None:\n self.dircount = self.dircount + 1\n\n def found_regular_file(self) -> None:\n self.regularfiles = self.regularfiles + 1\n\n def did_comparison(self) -> None:\n self.comparisons = self.comparisons + 1\n\n def found_hardlink(\n self, sourcefile: str, destfile: str, stat_info: os.stat_result\n ) -> None:\n filesize = stat_info.st_size\n self.hardlinked_previously = self.hardlinked_previously + 1\n self.bytes_saved_previously = self.bytes_saved_previously + filesize\n if sourcefile not in self.previouslyhardlinked:\n self.previouslyhardlinked[sourcefile] = (stat_info, [destfile])\n else:\n self.previouslyhardlinked[sourcefile][1].append(destfile)\n\n def did_hardlink(\n self, sourcefile: str, destfile: str, stat_info: os.stat_result\n ) -> None:\n filesize = stat_info.st_size\n self.hardlinked_thisrun = self.hardlinked_thisrun + 1\n self.bytes_saved_thisrun = self.bytes_saved_thisrun + filesize\n self.hardlinkstats.append((sourcefile, destfile))\n\n def print_stats(self, args: argparse.Namespace) -> None:\n if args.show_progress:\n print(\"\")\n print(\"Hard linking Statistics:\")\n # Print out the stats for the files we hardlinked, if any\n if self.previouslyhardlinked and args.printprevious:\n keys = self.previouslyhardlinked.keys()\n print(\"Files Previously Hardlinked:\")\n for key in sorted(keys):\n stat_info, file_list = self.previouslyhardlinked[key]\n size = stat_info.st_size\n print(f\"Hardlinked together: {key}\")\n for filename in file_list:\n print(f\" : {filename}\")\n print(\n \"Size per file: {} Total saved: {}\".format(\n size, size * len(file_list)\n )\n )\n print()\n if self.hardlinkstats:\n if args.dry_run:\n print(\"Statistics reflect what would have happened if not a dry run\")\n print(\"Files Hardlinked this run:\")\n for (source, dest) in self.hardlinkstats:\n print(f\"Hardlinked: {source}\")\n print(f\" to: {dest}\")\n print()\n print(f\"Directories : {self.dircount:,}\")\n print(f\"Regular files : {self.regularfiles:,}\")\n print(f\"Comparisons : {self.comparisons:,}\")\n print(f\"Hardlinked this run : {self.hardlinked_thisrun:,}\")\n print(\n \"Total hardlinks : {:,}\".format(\n self.hardlinked_previously + self.hardlinked_thisrun\n )\n )\n print(\n \"Bytes saved this run : {:,} ({})\".format(\n self.bytes_saved_thisrun, humanize_number(self.bytes_saved_thisrun)\n )\n )\n totalbytes = self.bytes_saved_thisrun + self.bytes_saved_previously\n print(\n \"Total bytes saved : {:,} ({})\".format(\n totalbytes, humanize_number(totalbytes)\n )\n )\n run_time = time.time() - self.starttime\n print(\n \"Total run time : {:,.2f} seconds ({})\".format(\n run_time, humanize_time(run_time)\n )\n )\n\n\ndef humanize_time(seconds: float) -> str:\n if seconds > 3600: # 3600 seconds = 1 hour\n return \"{:0.2f} hours\".format(seconds / 3600.0)\n if seconds > 60:\n return \"{:0.2f} minutes\".format(seconds / 60.0)\n return f\"{seconds:,.2f} seconds\"\n\n\ndef humanize_number(number: int) -> str:\n if number > 1024 ** 4:\n return \"{:0.3f} tibibytes\".format(number / (1024.0 ** 4))\n if number > 1024 ** 3:\n return \"{:0.3f} gibibytes\".format(number / (1024.0 ** 3))\n if number > 1024 ** 2:\n return \"{:0.3f} mebibytes\".format(number / (1024.0 ** 2))\n if number > 1024:\n return \"{:0.3f} kibibytes\".format(number / 1024.0)\n return f\"{number} bytes\"\n\n\ndef parse_args(passed_args: Optional[List[str]] = None) -> argparse.Namespace:\n parser = argparse.ArgumentParser() # usage=usage)\n parser.add_argument(\n \"directories\", nargs=\"+\", metavar=\"DIRECTORY\", help=\"Directory name\"\n )\n parser.add_argument(\"--version\", action=\"version\", version=VERSION)\n parser.add_argument(\n \"-f\",\n \"--filenames-equal\",\n help=\"Filenames have to be identical\",\n action=\"store_true\",\n dest=\"samename\",\n )\n\n parser.add_argument(\n \"-n\", \"--dry-run\", help=\"Do NOT actually hardlink files\", action=\"store_true\"\n )\n\n parser.add_argument(\n \"-p\",\n \"--print-previous\",\n help=\"Print previously created hardlinks\",\n action=\"store_true\",\n dest=\"printprevious\",\n )\n\n parser.add_argument(\n \"--no-progress\",\n help=\"Don't print progress information during execution\",\n action=\"store_false\",\n dest=\"show_progress\",\n )\n\n parser.add_argument(\n \"-q\",\n \"--no-stats\",\n help=\"Do not print the final statistics\",\n action=\"store_false\",\n dest=\"printstats\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--timestamp-ignore\",\n \"--ignore-timestamp\",\n help=\"File modification times do NOT have to be identical\",\n action=\"store_true\",\n dest=\"notimestamp\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--content-only\",\n help=\"Only file contents have to match\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--min-size\",\n help=\"Minimum file size to perform a hard link. Must be 1 or greater\",\n type=int,\n default=1,\n )\n\n parser.add_argument(\n \"-x\",\n \"--exclude\",\n help=(\n \"Regular expression used to exclude files/dirs (may specify multiple \"\n \"times\"\n ),\n metavar=\"REGEX\",\n action=\"append\",\n dest=\"excludes\",\n default=[],\n )\n\n verbosity_group = parser.add_mutually_exclusive_group()\n verbosity_group.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Verbosity level. Can be used multiple times.\",\n action=\"count\",\n default=1,\n )\n\n verbosity_group.add_argument(\n \"--quiet\", help=\"Minimizes output\", action=\"store_true\"\n )\n\n args = parser.parse_args(args=passed_args)\n if args.quiet:\n args.verbose = 0\n args.show_progress = False\n args.printstats = False\n if args.min_size < 1:\n parser.error(\"-s/--min-size must be 1 or greater\")\n args.directories = [\n os.path.abspath(os.path.expanduser(dirname)) for dirname in args.directories\n ]\n for dirname in args.directories:\n if not os.path.isdir(dirname):\n parser.print_help()\n print()\n print(f\"Error: {dirname} is NOT a directory\")\n sys.exit(1)\n return args\n\n\ndef check_python_version() -> None:\n # Make sure we have the minimum required Python version\n if sys.version_info < (3, 6, 0):\n sys.exit(\"ERROR: This program requires Python 3.6 or higher to run\")\n\n\ndef setup_logger(verbose_level: int) -> None:\n log_level = logging.INFO\n if verbose_level >= 1:\n log_level = logging.DEBUG\n # Setup logging format.\n logging.basicConfig(\n format=\"%(levelname)s:%(filename)s:%(funcName)s():L%(lineno)d %(message)s\",\n level=log_level,\n )\n\n\n# Start of global declarations\ndebug = None\ndebug1 = None\n\ngStats = cStatistics()\n\nfile_hashes: Dict[int, List[FileInfo]] = {}\n\nVERSION = \"0.7.0 - 2020-05-13 (13-May-2020)\"\n\n\ndef main(passed_args: Optional[List[str]] = None) -> int:\n check_python_version()\n\n # Parse our argument list and get our list of directories\n args = parse_args(passed_args=passed_args)\n # Compile up our regexes ahead of time\n MIRROR_PL_REGEX = re.compile(r\"^\\.in\\.\")\n RSYNC_TEMP_REGEX = re.compile((r\"^\\..*\\.\\?{6,6}$\"))\n # Now go through all the directories that have been added.\n # NOTE: hardlink_identical_files() will add more directories to the\n # directories list as it finds them.\n directories = args.directories.copy()\n while directories:\n # Get the last directory in the list\n directory = directories.pop() + \"/\"\n if not os.path.isdir(directory):\n print(f\"{directory} is NOT a directory!\")\n else:\n gStats.found_directory()\n # Loop through all the files in the directory\n try:\n dir_entries = os.scandir(directory)\n except (OSError, PermissionError) as exc:\n print(\n f\"Error: Unable to do an os.scandir on: {directory} Skipping...\",\n exc,\n )\n continue\n directories_found = []\n for dir_entry in sorted(dir_entries, key=lambda x: x.name):\n pathname = dir_entry.path\n # Look at files/dirs beginning with \".\"\n if dir_entry.name.startswith(\".\"):\n # Ignore any mirror.pl files. These are the files that\n # start with \".in.\"\n if MIRROR_PL_REGEX.match(dir_entry.name):\n continue\n # Ignore any RSYNC files. These are files that have the\n # format .FILENAME.??????\n if RSYNC_TEMP_REGEX.match(dir_entry.name):\n continue\n if dir_entry.is_symlink():\n if debug1:\n print(f\"{pathname}: is a symbolic link, ignoring\")\n continue\n\n if dir_entry.is_dir():\n directories_found.append(pathname)\n continue\n\n if dir_entry.stat(follow_symlinks=False).st_size < args.min_size:\n if debug1:\n print(f\"{pathname}: Size is not large enough, ignoring\")\n continue\n hardlink_identical_files(dir_entry=dir_entry, args=args)\n # Add our found directories in reverse order because we pop them\n # off the end. Goal is to go through our directories in\n # alphabetical order.\n directories.extend(reversed(directories_found))\n if args.printstats:\n gStats.print_stats(args)\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":8132395972646693000,"string":"8,132,395,972,646,693,000"},"line_mean":{"kind":"number","value":34.7393850659,"string":"34.739385"},"line_max":{"kind":"number","value":86,"string":"86"},"alpha_frac":{"kind":"number","value":0.5781646866,"string":"0.578165"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109494,"cells":{"repo_name":{"kind":"string","value":"cynnyx/doormat"},"path":{"kind":"string","value":"utils/measurement_convergence.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1267"},"content":{"kind":"string","value":"#/usr/bin/python\nfrom __future__ import division\nimport fileinput\nimport sys\nimport math\n\ndef parse_measurement_line(line):\n (name, value) = line.split(':')\n return name, float(value)\n\nall_values = {} \naverages = {}\nm2 = {}\nsamples = {}\nacceptancy = 0.95\n\nfor line in sys.stdin.readlines():\n\t(name, value) = parse_measurement_line(line)\n\tif name in all_values:\n\t\tall_values[name].append(value)\n\t\tsamples[name] += 1\n\t\toldavg = averages[name]\n\t\tdelta = value - oldavg\n\t\taverages[name] += delta/samples[name]\n\t\tm2[name] += delta * (value - averages[name])\n\telse:\n\t\tall_values[name] = [value]\n\t\taverages[name] = value\n\t\tsamples[name] = 1\n\t\tm2[name] = 0\n\nfor name, values_list in all_values.items():\n\tretained = 0\n\tfor v in values_list:\n\t\tif abs(v - averages[name]) < 3 * math.sqrt((m2[name]/samples[name])):\n\t\t\tretained += 1\n\tif retained/samples[name] < acceptancy:\n\t\taccepted = float(retained/samples[name])\n\t\tprint name, \"has not reached convergency. Only \", retained, \" samples were valid over \", samples[name], \"[\", accepted*100, \"%]\"\n\t\texit(0)\n\noutput_file = open(sys.argv[1], 'w+')\nfor name, value in averages.items():\n\tst = name\n\tst += \"\\t\" + str(value) + \"\\t\" + str(math.sqrt(m2[name]/samples[name]))+\"\\n\"\n\toutput_file.write(st)\noutput_file.close()\n\nexit(1)"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2172345804804676400,"string":"2,172,345,804,804,676,400"},"line_mean":{"kind":"number","value":24.8775510204,"string":"24.877551"},"line_max":{"kind":"number","value":129,"string":"129"},"alpha_frac":{"kind":"number","value":0.6582478295,"string":"0.658248"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109495,"cells":{"repo_name":{"kind":"string","value":"mozilla/ichnaea"},"path":{"kind":"string","value":"ichnaea/api/tests.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8473"},"content":{"kind":"string","value":"import json\nimport time\nfrom unittest import mock\n\nimport colander\nimport pytest\nfrom pyramid.request import Request\n\nfrom ichnaea.api.key import get_key, Key\nfrom ichnaea.api import exceptions as api_exceptions\nfrom ichnaea.api.rate_limit import rate_limit_exceeded\nfrom ichnaea.api.schema import RenamingMapping\nfrom ichnaea.tests.factories import ApiKeyFactory, KeyFactory\n\n\nclass TestKey(object):\n def test_empty(self, session_tracker):\n key = Key()\n assert isinstance(key, Key)\n assert key.valid_key is None\n session_tracker(0)\n\n def test_get(self, session, session_tracker):\n api_key = ApiKeyFactory()\n session.flush()\n session_tracker(1)\n\n result = get_key(session, api_key.valid_key)\n assert isinstance(result, Key)\n session_tracker(2)\n\n # Test get cache\n result2 = get_key(session, api_key.valid_key)\n assert isinstance(result2, Key)\n session_tracker(2)\n\n def test_get_miss(self, session, session_tracker):\n result = get_key(session, \"unknown\")\n assert result is None\n session_tracker(1)\n\n # Test get cache\n result2 = get_key(session, \"unknown\")\n assert result2 is None\n session_tracker(1)\n\n def test_allowed(self):\n def one(**kw):\n return KeyFactory(**kw)\n\n key = one(allow_locate=True, allow_region=True)\n assert key.allowed(\"locate\")\n assert key.allowed(\"region\")\n assert key.allowed(\"submit\")\n assert key.allowed(\"unknown\") is None\n\n assert not one(allow_locate=None).allowed(\"locate\")\n assert not one(allow_locate=False).allowed(\"locate\")\n assert not one(allow_region=None).allowed(\"region\")\n assert not one(allow_region=False).allowed(\"region\")\n\n def test_store_sample(self):\n key = KeyFactory(store_sample_locate=None, store_sample_submit=None)\n assert key.store_sample(\"locate\") is False\n assert key.store_sample(\"submit\") is False\n assert key.store_sample(\"region\") is False\n\n key = KeyFactory(store_sample_locate=0, store_sample_submit=100)\n assert key.store_sample(\"locate\") is False\n assert key.store_sample(\"submit\") is True\n\n # A global_locate_sample_rate can turn off samples\n assert key.store_sample(\"locate\", global_locate_sample_rate=0.0) is False\n\n # And can raise a sample rate\n key = KeyFactory(store_sample_locate=50, store_sample_submit=None)\n assert key.store_sample(\"locate\", global_locate_sample_rate=200.0) is True\n\n @mock.patch(\"ichnaea.api.key.random\")\n def test_store_sample_mock_random(self, mock_random):\n key = KeyFactory(store_sample_locate=50)\n mock_random.return_value = 0.1\n assert key.store_sample(\"locate\") is True\n mock_random.return_value = 0.5\n assert key.store_sample(\"locate\") is True\n mock_random.return_value = 0.51\n assert key.store_sample(\"locate\") is False\n mock_random.return_value = 0.9\n assert key.store_sample(\"locate\") is False\n\n @pytest.mark.parametrize(\n \"global_rate, q1, q2, q3, q4\",\n [\n (100.0, 0.1, 0.5, 0.501, 0.7),\n (50.0, 0.1, 0.25, 0.251, 0.5),\n (1.0, 0.004, 0.005, 0.006, 1.0),\n ],\n )\n @mock.patch(\"ichnaea.api.key.random\")\n def test_store_sample_mock_random_with_global_rate(\n self, mock_random, global_rate, q1, q2, q3, q4\n ):\n assert 0.0 < (q3 - q2) < 0.1\n key = KeyFactory(store_sample_locate=50)\n mock_random.return_value = q1\n assert key.store_sample(\"locate\", global_rate) is True\n mock_random.return_value = q2\n assert key.store_sample(\"locate\", global_rate) is True\n mock_random.return_value = q3\n assert key.store_sample(\"locate\", global_rate) is False\n mock_random.return_value = q4\n assert key.store_sample(\"locate\", global_rate) is False\n\n def test_can_fallback(self):\n def one(**kw):\n return KeyFactory(**kw)\n\n assert one(allow_fallback=True).can_fallback()\n assert not one(allow_fallback=False).can_fallback()\n assert not one(allow_fallback=None).can_fallback()\n assert not (one(allow_fallback=True, fallback_name=None).can_fallback())\n assert not (one(allow_fallback=True, fallback_url=None).can_fallback())\n assert not (one(allow_fallback=True, fallback_ratelimit=None).can_fallback())\n assert one(allow_fallback=True, fallback_ratelimit=0).can_fallback()\n assert not (\n one(allow_fallback=True, fallback_ratelimit_interval=None).can_fallback()\n )\n assert not (\n one(allow_fallback=True, fallback_ratelimit_interval=0).can_fallback()\n )\n assert one(allow_fallback=True, fallback_cache_expire=None).can_fallback()\n assert one(allow_fallback=True, fallback_cache_expire=0).can_fallback()\n\n\nclass TestRenamingMapping(object):\n def test_to_name(self):\n class SampleSchema(colander.MappingSchema):\n schema_type = RenamingMapping\n\n input_name = colander.SchemaNode(colander.String(), to_name=\"output_name\")\n name = colander.SchemaNode(colander.String())\n\n def __init__(self, *args, **kwargs):\n super(SampleSchema, self).__init__(*args, **kwargs)\n\n input_data = {\"input_name\": \"foo\", \"name\": \"bar\"}\n\n output_data = SampleSchema().deserialize(input_data)\n assert output_data[\"output_name\"] == \"foo\"\n assert output_data[\"name\"] == \"bar\"\n assert \"input_name\" not in output_data\n\n\nclass TestExceptions(object):\n def _check(self, error, status, json=True, content_type=\"application/json\"):\n response = Request.blank(\"/\").get_response(error)\n if content_type:\n assert response.content_type == content_type\n assert response.status_code == status\n if json:\n assert response.json == error.json_body()\n return response\n\n def test_str(self):\n error = api_exceptions.LocationNotFound()\n assert str(error) == \": 404\"\n\n def test_daily_limit(self):\n error = api_exceptions.DailyLimitExceeded()\n response = self._check(error, 403)\n assert b\"dailyLimitExceeded\" in response.body\n\n def test_invalid_apikey(self):\n error = api_exceptions.InvalidAPIKey()\n response = self._check(error, 400)\n assert b\"keyInvalid\" in response.body\n\n def test_location_not_found(self):\n error = api_exceptions.LocationNotFound()\n response = self._check(error, 404)\n assert b\"notFound\" in response.body\n\n def test_parse_error(self):\n error = api_exceptions.ParseError()\n response = self._check(error, 400)\n assert b\"parseError\" in response.body\n\n def test_parse_error_details(self):\n error = api_exceptions.ParseError(details=[\"Details of Error\"])\n response = self._check(error, 400, json=False)\n assert b\"parseError\" in response.body\n content = json.loads(response.body.decode())\n assert content[\"details\"] == [\"Details of Error\"]\n\n def test_upload_success(self):\n error = api_exceptions.UploadSuccess()\n response = self._check(error, 200)\n assert response.body == b\"{}\"\n\n def test_upload_success_v0(self):\n error = api_exceptions.UploadSuccessV0()\n response = self._check(error, 204, json=False, content_type=None)\n assert response.body == b\"\"\n\n\nclass TestLimiter(object):\n def test_maxrequests(self, redis):\n rate_key = \"apilimit:key_a:v1.geolocate:20150101\"\n maxreq = 5\n expire = 1\n for i in range(maxreq):\n assert not rate_limit_exceeded(\n redis, rate_key, maxreq=maxreq, expire=expire\n )\n assert rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)\n\n def test_expiry(self, redis):\n rate_key = \"apilimit:key_a:v1.geolocate:20150101\"\n maxreq = 100\n expire = 1\n assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)\n time.sleep(1.0)\n assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)\n\n def test_no_limit(self):\n rate_key = \"apilimit:key_a:v1.geolocate:20150101\"\n broken_redis = None\n assert not rate_limit_exceeded(broken_redis, rate_key, maxreq=0, expire=1)\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-4846981050123046000,"string":"-4,846,981,050,123,046,000"},"line_mean":{"kind":"number","value":36.1622807018,"string":"36.162281"},"line_max":{"kind":"number","value":86,"string":"86"},"alpha_frac":{"kind":"number","value":0.6340139266,"string":"0.634014"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109496,"cells":{"repo_name":{"kind":"string","value":"openstack/storlets"},"path":{"kind":"string","value":"tests/functional/java/test_thumbnail_storlet.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5625"},"content":{"kind":"string","value":"# Copyright IBM Corp. 2015, 2015 All Rights Reserved\n# Copyright (c) 2010-2016 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom swiftclient import client as c\nfrom tests.functional.java import StorletJavaFunctionalTest\nimport unittest\nfrom six.moves.urllib.request import Request, urlopen\n\n\nclass TestThumbnailStorlet(StorletJavaFunctionalTest):\n def setUp(self):\n self.storlet_log = None\n self.additional_headers = {}\n main_class = 'org.openstack.storlet.thumbnail.ThumbnailStorlet'\n super(TestThumbnailStorlet, self).setUp('ThumbnailStorlet',\n 'thumbnail-1.0.jar',\n main_class,\n 'sample.jpg')\n\n def invoke_storlet_on_get(self):\n headers = {'X-Run-Storlet': self.storlet_name}\n headers.update(self.additional_headers)\n resp = dict()\n resp_headers, gf = c.get_object(self.url, self.token,\n self.container,\n self.storlet_file,\n response_dict=resp,\n headers=headers)\n with open('/tmp/sample.jpg', 'wb') as f:\n f.write(gf)\n\n self.assertIn(resp['status'], [200, 202])\n\n def invoke_storlet_on_put(self):\n headers = {'X-Run-Storlet': self.storlet_name,\n 'x-object-meta-name': 'thumbnail'}\n headers.update(self.additional_headers)\n resp = dict()\n source_file = '%s/%s' % (self.path_to_bundle, self.storlet_file)\n with open(source_file, 'rb') as f:\n c.put_object(self.url, self.token,\n self.container, 'gen_thumb_on_put.jpg', f,\n headers=headers,\n response_dict=resp)\n\n status = resp.get('status')\n self.assertIn(status, [201, 202])\n\n headers = c.head_object(self.url, self.token,\n self.container, 'gen_thumb_on_put.jpg')\n self.assertLess(int(headers['content-length']), 1087318)\n self.assertEqual('thumbnail', headers['x-object-meta-name'])\n\n def invoke_storlet_on_copy_from(self):\n headers = {'X-Run-Storlet': self.storlet_name,\n 'X-Object-Meta-Name': 'thumbnail',\n 'X-Copy-From': '%s/%s' %\n (self.container, self.storlet_file)}\n headers.update(self.additional_headers)\n resp = dict()\n c.put_object(self.url, self.token,\n self.container, 'gen_thumb_on_copy.jpg', '',\n headers=headers,\n response_dict=resp)\n\n status = resp.get('status')\n self.assertIn(status, [201, 202])\n rh = resp['headers']\n self.assertEqual(rh['x-storlet-generated-from'],\n '%s/%s' %\n (self.container, self.storlet_file))\n self.assertEqual(rh['x-storlet-generated-from-account'],\n self.acct)\n self.assertIn('x-storlet-generated-from-last-modified', rh)\n\n headers = c.head_object(self.url, self.token,\n self.container, 'gen_thumb_on_copy.jpg')\n self.assertLess(int(headers['content-length']), 1087318)\n self.assertEqual('thumbnail', headers['x-object-meta-name'])\n self.assertTrue('x-object-meta-x-timestamp' not in headers)\n self.assertTrue('x-timestamp' in headers)\n\n def invoke_storlet_on_copy_dest(self):\n # No COPY in swiftclient. Using urllib instead...\n url = '%s/%s/%s' % (self.url, self.container, self.storlet_file)\n headers = {'X-Auth-Token': self.token,\n 'X-Run-Storlet': self.storlet_name,\n 'X-Object-Meta-Name': 'thumbnail',\n 'Destination': '%s/gen_thumb_on_copy_.jpg' % self.container}\n headers.update(self.additional_headers)\n req = Request(url, headers=headers)\n req.get_method = lambda: 'COPY'\n conn = urlopen(req, timeout=10)\n status = conn.getcode()\n self.assertIn(status, [201, 202])\n\n headers = c.head_object(self.url, self.token,\n self.container, 'gen_thumb_on_copy_.jpg')\n self.assertLess(int(headers['content-length']), 1087318)\n self.assertEqual('thumbnail', headers['x-object-meta-name'])\n self.assertTrue('x-object-meta-x-timestamp' not in headers)\n self.assertTrue('x-timestamp' in headers)\n\n def test_get(self):\n self.invoke_storlet_on_get()\n\n def test_put(self):\n self.invoke_storlet_on_put()\n\n def test_copy_put(self):\n self.invoke_storlet_on_copy_from()\n\n def test_copy(self):\n self.invoke_storlet_on_copy_dest()\n\n\nclass TestThumbnailStorletOnProxy(TestThumbnailStorlet):\n def setUp(self):\n super(TestThumbnailStorletOnProxy, self).setUp()\n self.additional_headers = {'X-Storlet-Run-On-Proxy': ''}\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":8418980704809856000,"string":"8,418,980,704,809,856,000"},"line_mean":{"kind":"number","value":40.0583941606,"string":"40.058394"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5772444444,"string":"0.577244"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109497,"cells":{"repo_name":{"kind":"string","value":"kcsaff/CA"},"path":{"kind":"string","value":"src/algorithms/rivers.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"5824"},"content":{"kind":"string","value":"# Copyright (C) 2010 by Kevin Saff\r\n\r\n# This file is part of the CA scanner.\r\n\r\n# The CA scanner is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n\r\n# The CA scanner is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n\r\n# You should have received a copy of the GNU General Public License\r\n# along with the CA scanner. If not, see .\r\n\r\n\r\n\"\"\"\r\nThis algorithm handles double-precision states.\r\nRules are defined using 10 weights, 2 modifiers, 2 limits, and 2 setpoints.\r\n\r\nThe first 9 weights describe the amount of each of the neighbors to\r\nuse to generate the new value:\r\n[ 0][ 1][ 2]\r\n[ 3][*4][ 5]\r\n[ 6][ 7][ 8]\r\nThe tenth weight is the amount to weight the cell's second-to-last value.\r\n\r\nOne modifier is the dampener; the total will be multiplied by this.\r\nThe other modifier is the exciter; this will be added to the total.\r\n\r\nThe limits identify what the minimum and maximum allowed values are.\r\n\r\nFinally, the two setpoints indicate what to do if the total goes out of range.\r\nOne indicates what to set the value to if it goes below the min, the other\r\nif it goes above the max.\r\n\"\"\"\r\n\r\nimport generate\r\n\r\ndef rivers_evolve(input, output, lookup):\r\n \"\"\"Evolve it.\"\"\"\r\n return generate.inline(\"\"\"\r\n \r\n#define ABS(X) ((X) < 0 ? -(X) : +(X))\r\n#define POS(X) ((X) > 0 ? (X) : 0)\r\n#define NEG(X) ((X) < 0 ? (X) : 0)\r\n#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))\r\n#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))\r\n#define VS 0.001\r\n#define WS 0.2\r\n#define WIN(V0, W0, V1, W1) (POS(MIN((W1), ((V1)+(W1))-((V0)+(W0)) )))\r\n#define WOUT(V0, W0, V1, W1) (WIN((V1), (W1), (V0), (W0)))\r\n#define THRESH 0.2\r\n// These define soil/water when we transform\r\n#define TRANSERODE 1.0\r\n#define TRANSDEPOSIT 1.0\r\n// These define rate of erosion/deposition per velocity unit\r\n#define RATEERODE 0.01\r\n#define RATEDEPOSIT 0.001\r\n \r\n PyArrayObject *input;\r\n PyArrayObject *output;\r\n PyArrayObject *lookup;\r\n char h = 0;\r\n unsigned xstride, ystride, zstride;\r\n unsigned xa, x0, x1, xM;\r\n unsigned ya, y0, y1, yM;\r\n\r\n double *ind, *oud, *look;\r\n \r\n double v00, va0, v10, v0a, v01;\r\n double w00, wa0, w10, w0a, w01;\r\n double win, wout;\r\n double wvel, Dw, Dv;\r\n\r\n if (!PyArg_ParseTuple(args, \"O!O!O!\",\r\n &PyArray_Type, &input,\r\n &PyArray_Type, &output,\r\n &PyArray_Type, &lookup\r\n ))\r\n return NULL;\r\n xstride = input-> strides[0] >> 3;\r\n ystride = input-> strides[1] >> 3;\r\n zstride = input-> strides[2] >> 3;\r\n\r\n xM = (input-> dimensions[0] - 1) * xstride;\r\n yM = (input-> dimensions[1] - 1) * ystride;\r\n //zM = (input-> dimensions[2] - 1) * zstride;\r\n\r\n ind = (double*)(input-> data);\r\n oud = (double*)(output-> data);\r\n look = (double*)(lookup-> data);\r\n\r\n for (x0 = xstride; x0 < xM; x0 += xstride)\r\n {\r\n xa = x0 - xstride;\r\n x1 = x0 + xstride;\r\n for (y0 = ystride; y0 < yM; y0 += ystride)\r\n {\r\n ya = y0 - ystride;\r\n y1 = y0 + ystride;\r\n \r\n // v00 is sediment, w00 is water.\r\n v00 = ind[x0 + y0 + 0*zstride];\r\n va0 = ind[xa + y0 + 0*zstride];\r\n v10 = ind[x1 + y0 + 0*zstride];\r\n v0a = ind[x0 + ya + 0*zstride];\r\n v01 = ind[x0 + y1 + 0*zstride];\r\n \r\n w00 = ind[x0 + y0 + 1*zstride];\r\n wa0 = ind[xa + y0 + 1*zstride];\r\n w10 = ind[x1 + y0 + 1*zstride];\r\n w0a = ind[x0 + ya + 1*zstride];\r\n w01 = ind[x0 + y1 + 1*zstride];\r\n \r\n // Determine water flow in.\r\n \r\n win = 0;\r\n win += WIN(v00, w00, va0, wa0);\r\n win += WIN(v00, w00, v10, w10);\r\n win += WIN(v00, w00, v0a, w0a);\r\n win += WIN(v00, w00, v01, w01);\r\n win *= WS;\r\n \r\n // Determine water flow out.\r\n \r\n wout = 0;\r\n wout += WOUT(v00, w00, va0, wa0);\r\n wout += WOUT(v00, w00, v10, w10);\r\n wout += WOUT(v00, w00, v0a, w0a);\r\n wout += WOUT(v00, w00, v01, w01);\r\n wout *= WS;\r\n \r\n w00 = POS(w00 + win - wout);\r\n \r\n Dw = Dv = 0;\r\n // Determine water velocity.\r\n if (w00 > 0) {\r\n wvel = win / w00;\r\n // Perform erosion.\r\n if (wvel > THRESH) { // Erosion\r\n Dw = (wvel - THRESH) * RATEERODE;\r\n Dv = -Dw * TRANSERODE;\r\n } else { // Deposition\r\n Dw = -MIN(w00, (THRESH - wvel) * RATEDEPOSIT);\r\n Dv = -Dw * TRANSDEPOSIT;\r\n }\r\n }\r\n \r\n // Determine sediment shift.\r\n \r\n Dv += VS * (va0 + v10 + v0a + v01 - 4 * v00);\r\n \r\n // Write results.\r\n \r\n oud[x0 + y0 + 0*zstride] = v00 + Dv;\r\n oud[x0 + y0 + 1*zstride] = w00 + Dw;\r\n }\r\n }\r\n return PyFloat_FromDouble(1.0);\r\n \"\"\")(input, output, lookup)\r\n\r\nimport numpy\r\n\r\nfrom _util import register\r\nfrom _algorithm import algorithm\r\n\r\n@register('compile_rule', type='rivers', quality=1.0)\r\ndef _rivers(X):\r\n lookup = list()\r\n return algorithm('floatscan', \r\n planes=3,\r\n evolve=rivers_evolve,\r\n table=numpy.asarray(lookup, dtype = numpy.float))\r\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":1988516181111120400,"string":"1,988,516,181,111,120,400"},"line_mean":{"kind":"number","value":31.0909090909,"string":"31.090909"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.5192307692,"string":"0.519231"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109498,"cells":{"repo_name":{"kind":"string","value":"IsmaelRLG/simpbot"},"path":{"kind":"string","value":"simpbot/commands/requires.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"10605"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Simple Bot (SimpBot)\n# Copyright 2016-2017, Ismael Lugo (kwargs)\n\nimport logging\nfrom simpbot.bottools import irc as irctools\nfrom simpbot import localedata\n\n\nrequerimentls = {}\nlogging = logging.getLogger('simpbot')\nfailed = True\ni18n = localedata.get()\n\n\ndef get(requeriment):\n \"\"\"get('admin:simple')\"\"\"\n args = []\n if isinstance(requeriment, tuple) or isinstance(requeriment, list):\n return requeriment\n elif ':' in requeriment:\n requeriment = requeriment.split(':', 1)\n if requeriment[1] != '':\n if ',' in requeriment[1]:\n args.extend(requeriment[1].split(','))\n else:\n args.append(requeriment[1])\n requeriment = requeriment[0]\n\n if requeriment in requerimentls:\n return requerimentls[requeriment], args\n\n\ndef req_nickserv(vars):\n irc = vars['self'].irc\n user = vars['user']\n target = vars['target']\n args = vars['watchdog'][1]\n if user.account is None and not 'ninja' in args:\n irc.error(target, localedata.get(vars['lang'])['not logged'])\n return failed\nrequerimentls['requires nickserv'] = req_nickserv\n\n\ndef only(vars):\n irc = vars['self'].irc\n args = vars['watchdog'][1]\n target = vars['user'].nick\n if len(args) == 0:\n logging.error(vars['msg'] % i18n['without params'] % 'only')\n return failed\n\n if args[0] == 'private':\n if vars['privbot']:\n return\n elif len(args) == 1:\n irc.error(target, localedata.get(vars['lang'])['only private'])\n return failed\n elif args[0] == 'channel':\n if not vars['privbot']:\n return\n elif len(args) == 1:\n irc.error(target, localedata.get(vars['lang'])['only channel'])\n return failed\nrequerimentls['only'] = only\n\n\ndef chan_register(vars):\n irc = vars['self'].irc\n msg = vars['msg']\n dbstore = irc.dbstore\n channel = vars['channel']\n privbot = vars['privbot']\n target = vars['target']\n locale = localedata.get(vars['lang'])\n result = vars['result']\n tgroup = {}\n groupnames = vars['watchdog'][1]\n non_channel = (len(groupnames) > 0 and groupnames[0] == 'non-channel')\n for group in groupnames:\n if not '=' in group:\n continue\n\n try:\n v, group = group.split('=', 1)\n except ValueError:\n continue\n tgroup[v] = group.split()\n\n if privbot and not non_channel:\n\n if len(groupnames) == 0:\n logging.error(msg % i18n['without params'] % 'chan_register')\n return failed\n\n for group in (tgroup['private'] if 'private' in tgroup else groupnames):\n try:\n channel = result.group(group)\n except (IndexError, KeyError):\n logging.error(msg % i18n['invalid params'] % 'chan_register')\n return failed\n else:\n if channel is None:\n continue\n if not irctools.ischannel(channel, irc=irc):\n irc.error(target, locale['invalid channel'] % channel)\n return failed\n if dbstore.get_chan(channel) is None:\n irc.error(target, locale['unregistered channel'] % channel)\n return failed\n vars['channel'] = channel\n return\n irc.error(target, locale['channel needed'])\n return failed\n elif len(groupnames) > 0:\n try:\n if 'channel' in tgroup:\n if tgroup['channel'][0] == 'non-channel':\n assert False\n else:\n groupname = tgroup['channel'][0]\n else:\n groupname = groupnames[0]\n\n if not non_channel:\n channel = vars['result'].group(groupname)\n except AssertionError:\n pass\n except (IndexError, KeyError):\n irc.verbose('error', msg % i18n['invalid params'] % 'chan_register')\n return failed\n\n vars['channel'] = channel\n if dbstore.get_chan(channel) is None:\n if not non_channel:\n irc.error(target, locale['unregistered channel'] % channel)\n return failed\n elif dbstore.get_chan(channel) is None:\n if not non_channel:\n irc.error(target, locale['unregistered channel'] % channel)\n return failed\n\nrequerimentls['registered chan'] = chan_register\n\n\ndef unregistered_chan(vars):\n irc = vars['self'].irc\n msg = vars['msg']\n dbstore = irc.dbstore\n channel1 = vars['channel']\n channel2 = vars['watchdog'][1]\n privbot = vars['privbot']\n target = vars['target']\n locale = localedata.get(vars['lang'])\n if privbot:\n if len(channel2) == 0:\n logging.error(msg % i18n['without params'] % 'unregistered_chan')\n return failed\n for group in channel2:\n try:\n channel = vars['result'].group(group)\n except (IndexError, KeyError):\n logging.error(msg % i18n['invalid params'] % 'unregistered_chan')\n return failed\n else:\n if channel is None:\n continue\n if not irctools.ischannel(channel, irc=irc):\n irc.error(target, locale['invalid channel'] % channel)\n return failed\n if dbstore.get_chan(channel) is not None:\n irc.error(target, locale['registered channel'] % channel)\n return failed\n vars['channel'] = channel\n return\n irc.error(target, )\n return failed\n elif len(channel2) > 0:\n\n try:\n channel = vars['result'].group(channel2[0])\n except (IndexError, KeyError):\n logging.error(msg % i18n['invalid params'] % 'unregistered_chan')\n return failed\n\n vars['channel'] = channel\n if dbstore.get_chan(channel) is not None:\n irc.error(vars['target'], locale['registered channel'] % channel)\n return failed\n elif dbstore.get_chan(channel1) is not None:\n irc.error(vars['target'], locale['registered channel'] % channel1)\n return failed\nrequerimentls['unregistered chan'] = unregistered_chan\n\n\ndef user_register(vars):\n irc = vars['self'].irc\n msg = vars['msg']\n user = vars['user']\n args = vars['watchdog'][1]\n target = user.nick\n locale = localedata.get(vars['lang'])\n dbstore = irc.dbstore\n if len(args) == 0 or args[0] == 'ninja':\n if user.account is None:\n if not 'ninja' in args:\n irc.error(target, locale['not logged'])\n return failed\n\n if dbstore.get_user(user.account) is None:\n if not 'ninja' in args:\n irc.error(target, locale['you are not registered'])\n return failed\n return\n\n try:\n usr = vars['result'].group(args[0])\n except (IndexError, KeyError):\n logging.error(msg % i18n['invalid params'] % 'user_register')\n return failed\n\n if usr is None:\n if 'optional' in args:\n return\n else:\n return failed\n\n if '*' in usr or '@' in usr or '!' in usr:\n irc.error(target, locale['invalid user'] % usr)\n return failed\n\n if dbstore.get_user(usr) is None:\n irc.error(target, locale['user no registered'] % usr)\nrequerimentls['registered user'] = user_register\n\n\ndef unregistered_user(vars):\n irc = vars['self'].irc\n user = vars['user']\n target = user.nick\n dbstore = irc.dbstore\n if dbstore.get_user(user.account) is not None:\n irc.error(target, localedata.get(vars['lang'])['already registered'])\n return failed\n return\nrequerimentls['unregistered user'] = unregistered_user\n\n\ndef flags(vars):\n irc = vars['self'].irc\n msg = vars['msg']\n user = vars['user']\n args = vars['watchdog'][1]\n channel = vars['channel']\n target = user.nick\n dbstore = irc.dbstore\n\n if len(args) == 0:\n logging.error(msg % i18n['without params'] % 'flags')\n return failed\n if not args[0].isalnum():\n logging.error(msg % i18n['invalid params'] % 'flags')\n return failed\n\n chan = dbstore.get_chan(channel)\n flag = chan.get_flags(dbstore.get_user(user.account))\n error = False\n if flag is None:\n flag = chan.get_flags(user.mask)\n if flag is None or not args[0] in flag:\n error = True\n elif not args[0] in flag:\n error = True\n\n if error:\n locale = localedata.get(vars['lang'])\n irc.error(target, locale['permission denied'] % args[0])\n return failed\nrequerimentls['flags'] = flags\n\n\ndef admin(vars):\n irc = vars['self'].irc\n msg = vars['msg']\n user = vars['user']\n args = vars['watchdog'][1]\n tar = user.nick\n dbstore = irc.dbstore\n locale = localedata.get(vars['lang'])\n\n usr = dbstore.get_user(user.account)\n if not usr.isadmin():\n # this line has been commented for security reasons, please\n # uncomment this line if you are sure of what makes\n #irc.error(tar, locale['only admins'])\n return failed\n\n if len(args) == 0:\n return\n\n for capab in args:\n if not usr.admin.has_capab(capab):\n irc.error(tar, locale['no capabs'])\n\n _ = vars['_']\n _['usr'] = usr\n irc.verbose('fail use', msg % _(locale['fail use']))\n return failed\nrequerimentls['admin'] = admin\n\n\ndef channel_status(vars):\n chan_name = vars['channel']\n irc = vars['irc']\n target = vars['target']\n mode_req = vars['watchdog'][1]\n locale = localedata.get(vars['lang'])\n\n channel = irc.request.get_chan(chan_name)\n if channel is None:\n irc.error(target, locale['not on the channel'] % chan_name)\n return failed\n\n status_bot = channel.get_user(irc.nickname).get_status(chan_name)\n if status_bot == '':\n irc.error(target, locale['mode required'] % '|+'.join(mode_req))\n return failed\n\n if not hasattr(irc.features, 'modeprefix'):\n irc.features.modeprefix = {}\n for char, mode in irc.features.prefix.items():\n irc.features.modeprefix[mode] = char\n\n prefix = irc.features.modeprefix\n for mode in mode_req:\n if not mode in prefix:\n continue\n char = prefix[mode]\n if char in status_bot:\n return\n\n irc.error(target, locale['mode required'] % '|+'.join(mode_req))\n return failed\nrequerimentls['channel_status'] = channel_status"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-3391833250695264000,"string":"-3,391,833,250,695,264,000"},"line_mean":{"kind":"number","value":29.8313953488,"string":"29.831395"},"line_max":{"kind":"number","value":81,"string":"81"},"alpha_frac":{"kind":"number","value":0.5679396511,"string":"0.56794"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":109499,"cells":{"repo_name":{"kind":"string","value":"shiki0711/cmock"},"path":{"kind":"string","value":"include/generator/generator.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"14573"},"content":{"kind":"string","value":"\nimport re\n\n\nTOKEN_TYPE_TEXT = 0\nTOKEN_TYPE_VARIABLE = 1\nTOKEN_TYPE_BLOCK = 2\nTOKEN_TYPE_BLOCK_IF = 3\nTOKEN_TYPE_BLOCK_ELSE = 4\nTOKEN_TYPE_BLOCK_ENDIF = 5\nTOKEN_TYPE_BLOCK_LOOP = 6\nTOKEN_TYPE_BLOCK_ENDLOOP = 7\nTOKEN_TYPE_BLOCK_INCLUDE = 8\n\nPARSE_STATUS_ROOT = 0\nPARSE_STATUS_IF = 1\nPARSE_STATUS_ELSE = 2\nPARSE_STATUS_LOOP = 3\n\n\nclass ParseError(RuntimeError):\n def __init__(self, arg):\n self.args = arg\n\n\nclass Token(object):\n def __init__(self, token_string, token_type):\n self.token_string = token_string\n self.token_type = token_type\n\n\nBLOCK_TAG_START = '{% '\nBLOCK_TAG_END = ' %}'\nVARIABLE_TAG_START = '{{ '\nVARIABLE_TAG_END = ' }}'\n\nclass Lexer(object):\n def __init__(self, template_string):\n self.template_string = template_string\n\n def tokenize(self):\n result = []\n s = 0\n status = 'TEXT'\n for index in range(0, len(self.template_string)):\n if self.template_string[index:].startswith(VARIABLE_TAG_START):\n if status == 'TEXT':\n status = 'VAR_START'\n result.append(Token(self.template_string[s:index], TOKEN_TYPE_TEXT))\n s = index\n else:\n errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...'\n raise ParseError(errmsg)\n elif self.template_string[index:].startswith(VARIABLE_TAG_END):\n if status == 'VAR_START':\n status = 'TEXT'\n result.append(Token(self.template_string[s:index+3], TOKEN_TYPE_VARIABLE))\n s = index+3\n else:\n errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...'\n raise ParseError(errmsg)\n elif self.template_string[index:].startswith(BLOCK_TAG_START):\n if status == 'TEXT':\n status = 'BLOCK_START'\n result.append(Token(self.template_string[s:index], TOKEN_TYPE_TEXT))\n s = index\n else:\n errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...'\n raise ParseError(errmsg)\n elif self.template_string[index:].startswith(BLOCK_TAG_END):\n if status == 'BLOCK_START':\n status = 'TEXT'\n result.append(Token(self.template_string[s:index+3], TOKEN_TYPE_BLOCK))\n s = index+3\n else:\n errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...'\n raise ParseError(errmsg)\n else:\n pass\n result.append(Token(self.template_string[s:index], TOKEN_TYPE_TEXT))\n #for item in result:\n # print 'token: '+item.token_string+' type: '+str(item.token_type)\n return result\n\n\nclass NodeList(object):\n def __init__(self):\n self.container = []\n \n def append(self, node):\n self.container.append(node)\n\nclass Node(object):\n def __init__(self):\n self.parent = None\n\n def render(self, context):\n pass\n \n def resolve(self, variable):\n node = self\n while node:\n if node.context.has_key(variable):\n return node.context[variable]\n else:\n node = node.parent\n errmsg = 'resolve not found: '+variable\n raise ParseError(errmsg)\n return None\n\nclass RootNode(Node):\n def __init__(self):\n Node.__init__(self)\n self.child_nodelist = NodeList()\n self.context = {}\n \n def addNode(self, node):\n self.child_nodelist.append(node)\n node.parent = self\n\n def render(self, context):\n self.context = context\n result = ''\n for node in self.child_nodelist.container:\n r = node.render(context)\n result += node.render({})\n return result\n \n\n\nclass TextNode(Node):\n def __init__(self, s):\n Node.__init__(self)\n self.s = s\n\n def render(self, context):\n self.context = context\n return self.s\n\n\nclass VariableNode(Node):\n def __init__(self, var_string):\n Node.__init__(self)\n self.var_string = var_string\n\n def render(self, context):\n self.context = context\n resolved_var = self.resolve(self.var_string)\n if resolved_var:\n return resolved_var\n else:\n return ''\n\n\nclass ConditionNode(Node):\n def __init__(self, condition_var_string):\n Node.__init__(self)\n self.condition_var_string = condition_var_string\n self.true_nodelist = NodeList()\n self.false_nodelist = NodeList()\n\n def render(self, context):\n self.context = context\n resolved_var = self.resolve(self.condition_var_string)\n if resolved_var:\n self.context[self.condition_var_string] = resolved_var\n if int(resolved_var) > 0:\n condition = True\n else:\n condition = False\n else:\n condition = False\n if condition:\n nodelist = self.true_nodelist.container\n else:\n nodelist = self.false_nodelist.container\n result = ''\n for node in nodelist:\n result += node.render({})\n return result\n\n def addTrueNode(self, node):\n self.true_nodelist.append(node)\n node.parent = self\n\n def addFalseNode(self, node):\n self.false_nodelist.append(node)\n node.parent = self\n\n\nclass LoopNode(Node):\n def __init__(self, loop_vars_list):\n Node.__init__(self)\n self.loop_vars_list = loop_vars_list\n self.loop_nodelist = NodeList()\n\n def render(self, context):\n self.context = context\n result = ''\n separator = ''\n resolved_list = self.resolve(self.loop_vars_list[1])\n if resolved_list:\n for item in resolved_list:\n result += separator\n if self.loop_vars_list[0] == '_':\n for (k,v) in item.items():\n self.context[k] = v\n else:\n self.context[self.loop_vars_list[0]] = item\n for node in self.loop_nodelist.container:\n result += node.render({})\n separator = self.loop_vars_list[2]\n return result\n\n def addLoopNode(self, node):\n self.loop_nodelist.container.append(node)\n node.parent = self\n\n\nclass Parser(object):\n def __init__(self, tokens):\n self.tokens = tokens\n self.parse_stack = []\n self.root = RootNode()\n self.parse_stack_init(self.root)\n\n def parse(self):\n while self.tokens:\n token = self.next_token()\n if token.token_type == TOKEN_TYPE_TEXT:\n self.append(token.token_string, TOKEN_TYPE_TEXT)\n elif token.token_type == TOKEN_TYPE_VARIABLE:\n if token.token_string:\n regex_req = ur\"\\{\\{\\s([a-zA-Z_][a-zA-Z_0-9]*)\\s\\}\\}\"\n m = re.match(regex_req, token.token_string)\n if m:\n var_string = m.group(1)\n self.append(var_string, TOKEN_TYPE_VARIABLE)\n else:\n errmsg = 'parse error: ' + token.token_string\n raise ParseError(errmsg)\n elif token.token_type == TOKEN_TYPE_BLOCK:\n if token.token_string:\n regex_req_if = ur\"\\{%\\sif\\s([a-zA-Z_][a-zA-Z_0-9]*)\\s%\\}\"\n regex_req_else = ur\"\\{%\\selse\\s%\\}\"\n regex_req_endif = ur\"\\{%\\sendif\\s%\\}\"\n regex_req_loop = ur\"\\{%\\sfor\\s([a-zA-Z_][a-zA-Z_0-9]*)\\sin\\s([a-zA-Z_][a-zA-Z_0-9]*)\\s%\\}\"\n regex_req_loop_with = ur\"\\{%\\sfor\\s([a-zA-Z_][a-zA-Z_0-9]*)\\sin\\s([a-zA-Z_][a-zA-Z_0-9]*)\\swith\\s([^\\s]+)\\s%\\}\"\n regex_req_endloop = ur\"\\{%\\sendfor\\s%\\}\"\n regex_req_include = ur\"\\{%\\sinclude\\s([^\\s]+)\\s%\\}\"\n m = re.match(regex_req_if, token.token_string)\n if m:\n if_var_string = m.group(1)\n self.append(if_var_string, TOKEN_TYPE_BLOCK_IF)\n continue\n m = re.match(regex_req_else, token.token_string)\n if m:\n self.append('', TOKEN_TYPE_BLOCK_ELSE)\n continue\n m = re.match(regex_req_endif, token.token_string)\n if m:\n self.append('', TOKEN_TYPE_BLOCK_ENDIF)\n continue\n m = re.match(regex_req_loop, token.token_string)\n if m:\n loop_item_string = m.group(1)\n loop_list_string = m.group(2)\n self.append([loop_item_string, loop_list_string, ''], TOKEN_TYPE_BLOCK_LOOP)\n continue\n m = re.match(regex_req_loop_with, token.token_string)\n if m:\n loop_item_string = m.group(1)\n loop_list_string = m.group(2)\n loop_with_string = m.group(3)\n self.append([loop_item_string, loop_list_string, loop_with_string], TOKEN_TYPE_BLOCK_LOOP)\n continue\n m = re.match(regex_req_endloop, token.token_string)\n if m:\n self.append('', TOKEN_TYPE_BLOCK_ENDLOOP)\n continue\n m = re.match(regex_req_include, token.token_string)\n if m:\n sub_template_file = m.group(1)\n self.append(sub_template_file, TOKEN_TYPE_BLOCK_INCLUDE)\n continue\n errmsg = 'parse error: ' + token.token_string\n raise ParseError(errmsg)\n else:\n errmsg = 'parse error: ' + token.token_string\n raise ParseError(errmsg)\n return self.root\n \n def append(self, parsed_content, type):\n current = self.parse_stack[-1]\n current_node = current['node']\n current_status = current['status']\n if type == TOKEN_TYPE_TEXT or type == TOKEN_TYPE_VARIABLE:\n if type == TOKEN_TYPE_TEXT:\n node = TextNode(parsed_content)\n else:\n node = VariableNode(parsed_content)\n self.insert_node(current_node, current_status, node)\n elif type == TOKEN_TYPE_BLOCK_IF:\n node = ConditionNode(parsed_content)\n self.insert_node(current_node, current_status, node)\n self.parse_stack.append({'node':node, 'status':PARSE_STATUS_IF})\n elif type == TOKEN_TYPE_BLOCK_ELSE:\n if current_status != PARSE_STATUS_IF:\n raise ParseError(parsed_content)\n else:\n current['status'] = PARSE_STATUS_ELSE\n elif type == TOKEN_TYPE_BLOCK_ENDIF:\n if current_status != PARSE_STATUS_IF and current_status != PARSE_STATUS_ELSE:\n self.print_parse_stack()\n raise ParseError(parsed_content)\n else:\n self.parse_stack.pop()\n elif type == TOKEN_TYPE_BLOCK_LOOP:\n node = LoopNode(parsed_content)\n self.insert_node(current_node, current_status, node)\n self.parse_stack.append({'node':node, 'status':PARSE_STATUS_LOOP})\n elif type == TOKEN_TYPE_BLOCK_ENDLOOP:\n if current_status != PARSE_STATUS_LOOP:\n raise ParseError(parsed_content)\n else:\n self.parse_stack.pop()\n elif type == TOKEN_TYPE_BLOCK_INCLUDE:\n t = Templete(TEMPLATE_FROM_FILE, parsed_content)\n node = t.compile()\n self.insert_node(current_node, current_status, node)\n else:\n raise ParseError(parsed_content)\n \n \n def parse_stack_init(self, root):\n self.parse_stack = []\n self.parse_stack.append({'node':root, 'status':PARSE_STATUS_ROOT})\n \n def insert_node(self, parent_node, parent_status, node):\n if parent_status == PARSE_STATUS_IF:\n parent_node.addTrueNode(node)\n elif parent_status == PARSE_STATUS_ELSE:\n parent_node.addFalseNode(node)\n elif parent_status == PARSE_STATUS_LOOP:\n parent_node.addLoopNode(node)\n else:\n parent_node.addNode(node)\n\n def next_token(self):\n return self.tokens.pop(0)\n \n def print_parse_stack(self):\n for item in self.parse_stack:\n print 'status: '+str(item['status'])\n\nTEMPLATE_FROM_FILE = 0\nTEMPLATE_FROM_STRING = 1\nclass Templete(object):\n def __init__(self, source, content):\n self.parse_node = None\n self.generated_string = None\n if source == TEMPLATE_FROM_FILE:\n try:\n fp = open(content, 'r')\n except Exception:\n errmsg = 'Open template file '+str(content) + ' error!'\n raise ParseError(errmsg)\n self.template_string = fp.read()\n fp.close()\n else:\n self.template_string = content\n \n def compile(self):\n lex = Lexer(self.template_string)\n tokens = lex.tokenize()\n \n parser = Parser(tokens)\n self.parse_node = parser.parse()\n return self.parse_node \n \n def render(self, context):\n self.generated_string = self.parse_node.render(context)\n return self.generated_string\n \n def to_file(self, filepath):\n try:\n fp = open(filepath, 'w')\n except Exception:\n errmsg = 'Open output file '+str(filepath) + ' error!'\n raise ParseError(errmsg)\n fp.write(self.generated_string)\n fp.close()\n\n\ndef main():\n t = Templete(TEMPLATE_FROM_FILE, 'cmock_mock.template')\n t.compile()\n function_settings = []\n for i in range(0, 20):\n argrange = range(1, i+1)\n argrange_str = map(lambda x: str(x), argrange)\n function_settings.append({'noret':1, 'args':str(i), 'argrange':argrange_str})\n function_settings.append({'noret':0, 'args':str(i), 'argrange':argrange_str})\n t.render({'function_settings':function_settings})\n t.to_file('../cmock_mock.h')\n\nif __name__ == '__main__':\n main()\n print 'done.'\n \n \n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":1245056475781457700,"string":"1,245,056,475,781,457,700"},"line_mean":{"kind":"number","value":34.4574209246,"string":"34.457421"},"line_max":{"kind":"number","value":131,"string":"131"},"alpha_frac":{"kind":"number","value":0.5276195704,"string":"0.52762"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1094,"numItemsPerPage":100,"numTotalItems":110960,"offset":109400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODAyMDkwMiwic3ViIjoiL2RhdGFzZXRzL2NvZGVwYXJyb3QvY29kZXBhcnJvdC12YWxpZC1uZWFyLWRlZHVwbGljYXRpb24iLCJleHAiOjE3NTgwMjQ1MDIsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.T8-MZYLmiTbZTvjkPmKFbQlCxO2GM6RHwld5EfYO-jdoPDVxkgrRW3pWEZAlYhX3efJHv_viMXcL6QR5fe_xDw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
22 values
size
stringlengths
4
7
content
stringlengths
626
1.05M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
5.21
99.9
line_max
int64
12
999
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
all-of-us/raw-data-repository
rdr_service/tools/csv_importer.py
1
6013
"""Imports entities into the database based on a CSV file. """ import csv import logging class CsvImporter(object): """Importer for database entities from CSV input. Subclasses indicate in the constructor the name of the entity (for logging purposes), the DAO used to save the entity, the name of the primary key database ID field, the name of the external ID (referenced in the CSV file), and columns that must be populated with some value in the CSV. They then define _entity_from_row(row) to parse an entity out of a row dictionary. """ def __init__(self, entity_name, dao, id_field, external_id_field, required_columns): self.entity_name = entity_name self.dao = dao self.id_field = id_field self.external_id_field = external_id_field self.required_columns = required_columns self.errors = list() self.deletion_count = 0 def run(self, filename, dry_run): """Imports entities from the CSV file with the specified name. When dry_run flag is true, entities are not updated; instead logging indicates what would be updated.""" skip_count = 0 new_count = 0 updated_count = 0 matched_count = 0 row_list = [] logging.info("Importing %ss from %r.", self.entity_name, filename) with open(filename, "r") as csv_file: #reader = UnicodeDictReader(csv_file) reader = csv.DictReader(csv_file) existing_map = {getattr(entity, self.external_id_field): entity for entity in self.dao.get_all()} with self.dao.session() as session: for row in reader: # Strip leading and trailing whitespace row = {k.strip(): v.strip() for k, v in row.items()} missing_fields = [] for column in self.required_columns: value = row.get(column) if value is None or value == "": missing_fields.append(column) if missing_fields: logging.info("Skipping %s with missing columns: %s", self.entity_name, missing_fields) skip_count += 1 continue entity = self._entity_from_row(row) if entity is None: skip_count += 1 continue existing_entity = existing_map.get(getattr(entity, self.external_id_field)) row_list.append(row) if existing_entity: changed, skipped = self._update_entity(entity, existing_entity, session, dry_run) if changed: updated_count += 1 elif skipped: skip_count += 1 else: matched_count += 1 else: entity = self._insert_entity(entity, existing_map, session, dry_run) if not entity: skip_count += 1 else: new_count += 1 self._cleanup_old_entities(session, row_list, dry_run) if self.errors: for err in self.errors: logging.warn(err) logging.info( "Done importing %ss%s: %d skipped, %d new, %d updated, %d not changed, " "%d deleted, %d errors.", self.entity_name, " (dry run)" if dry_run else "", skip_count, new_count, updated_count, matched_count, self.deletion_count, len(self.errors), ) def _entity_from_row(self, row): # pylint: disable=unused-argument raise Exception("Subclasses must implement _entity_from_row") @staticmethod def _maybe_convert_unicode(s): if not isinstance(s, str): return s return s.encode("utf-8") @staticmethod def _diff_dicts(old_dict, new_dict): changes = {} keys = set(list(old_dict.keys()) + list(new_dict.keys())) for k in keys: old = CsvImporter._maybe_convert_unicode(old_dict.get(k)) new = CsvImporter._maybe_convert_unicode(new_dict.get(k)) if old != new: changes[k] = "{} -> {}".format(old, new) return changes def _update_entity(self, entity, existing_entity, session, dry_run): new_dict = entity.asdict() new_dict[self.id_field] = None existing_dict = existing_entity.asdict() existing_dict[self.id_field] = None if existing_dict == new_dict: logging.info("Not updating %s.", new_dict[self.external_id_field]) return False, False else: changes = CsvImporter._diff_dicts(existing_dict, new_dict) log_prefix = "(dry run) " if dry_run else "" logging.info( log_prefix + 'Updating %s "%s": changes = %s', self.entity_name, new_dict[self.external_id_field], changes, ) if not dry_run: self._do_update(session, entity, existing_entity) return True, False def _do_update(self, session, entity, existing_entity): for k, v in entity.asdict().items(): if k != self.external_id_field and k != self.id_field: setattr(existing_entity, k, v) self.dao.update_with_session(session, existing_entity) def _insert_entity(self, entity, existing_map, session, dry_run): # pylint: disable=unused-argument log_prefix = "(dry run) " if dry_run else "" logging.info(log_prefix + "Inserting %s: %s", self.entity_name, entity.asdict()) if not dry_run: self.dao.insert_with_session(session, entity) return True
bsd-3-clause
6,735,288,725,375,239,000
39.355705
110
0.536837
false
shaurz/ome
ome/symbol.py
1
2732
# ome - Object Message Expressions # Copyright (c) 2015-2016 Luke McCarthy <[email protected]> import re re_symbol_part = re.compile(r'([~]?[a-zA-Z][a-zA-Z0-9]*(?:-[a-zA-Z0-9]+)*)(:,*)?') re_hyphen_or_tilde = re.compile(r'[~-]') operator_labels = { '+' : '__ADD', '-' : '__SUB', '*' : '__MUL', '/' : '__DIV', '==': '__EQ', '!=': '__NE', '<' : '__LT', '<=': '__LE', '>' : '__GT', '>=': '__GE', } operator_aliases = { '×' : '*', '÷' : '/', '≠' : '!=', '≤' : '<=', '≥' : '>=', } def symbol_to_label(symbol): """ Encodes a symbol into a form that can be used for a label. >>> symbol_to_label('foo') 'foo__0' >>> symbol_to_label('foo:') 'foo__1' >>> symbol_to_label('foo-bar-baz') 'foo_bar_baz__0' >>> symbol_to_label('foo:,,') 'foo__3' >>> symbol_to_label('foo4:,,bar5:,') 'foo4__3bar5__2' >>> symbol_to_label('~foo:') '_foo__1' >>> symbol_to_label('≠') '__NE' >>> symbol_to_label('!=') '__NE' >>> symbol_to_label('') Traceback (most recent call last): ... ValueError: Invalid symbol '' >>> symbol_to_label('~') Traceback (most recent call last): ... ValueError: Invalid symbol '~' >>> symbol_to_label(':foo') Traceback (most recent call last): ... ValueError: Invalid symbol ':foo' >>> symbol_to_label('foo::') Traceback (most recent call last): ... ValueError: Invalid symbol 'foo::' >>> symbol_to_label('foo-') Traceback (most recent call last): ... ValueError: Invalid symbol 'foo-' """ op = operator_aliases.get(symbol, symbol) if op in operator_labels: return operator_labels[op] m = None pos = 0 label = [] for m in re_symbol_part.finditer(symbol): if m.start() != pos: raise ValueError('Invalid symbol {}'.format(repr(symbol))) pos = m.end() name, args = m.groups() label.append(re_hyphen_or_tilde.sub('_', name) + '__' + str(len(args or ()))) if not m or m.end() != len(symbol): raise ValueError('Invalid symbol {}'.format(repr(symbol))) return ''.join(label) def symbol_arity(symbol): """ >>> symbol_arity('foo') 1 >>> symbol_arity('foo:') 2 >>> symbol_arity('foo:bar:') 3 >>> symbol_arity('foo:,,bar:') 5 >>> symbol_arity('*') 2 """ if symbol in operator_labels: return 2 return symbol.count(':') + symbol.count(',') + 1 def is_private_symbol(name): return name.startswith('~') __all__ = [ 'symbol_to_label', 'symbol_arity', 'is_private_symbol', ] if __name__ == '__main__': import doctest doctest.testmod()
mit
-4,885,103,565,144,054,000
22.669565
85
0.503306
false
maxiyommi/primeros-pasos-BBB
CODIGOS/python/audio/audioplay.py
1
1170
# -*- coding: utf-8 -*- # metodo para reproducir audio con pyaudio # creamos un callback para generar audio en tiempo real # con este método es posible procesar audio mientras está siendo grabado import pyaudio import wave import time awav = wave.open('record.wav') # creamos una instancia de pyaudio p = pyaudio.PyAudio() # definimos el callback, el mismo devuelve los datos de audio y un flag # para indicar que el stream está activo def callback(in_data, frame_count, time_info, status): data = awav.readframes(frame_count) return (data, pyaudio.paContinue) # abrimos stream, indicando además que usamos el callback stream = p.open(format=p.get_format_from_width(awav.getsampwidth()), channels=awav.getnchannels(), rate=awav.getframerate(), output=True, stream_callback=callback) # luego lo iniciamos stream.start_stream() # esperamos a que el stream termine # cada 0.1 seg el programa chequea (en el flag del callback) # si el stream está activo while stream.is_active(): time.sleep(0.1) # detenemos y cerramos stream.stop_stream() stream.close() awav.close() p.terminate()
gpl-3.0
1,116,448,062,652,420,200
26.761905
72
0.709013
false
hammerlab/cohorts
cohorts/survival.py
1
14564
# -*- coding: utf-8 -*- # Copyright (c) 2017. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from lifelines import KaplanMeierFitter, CoxPHFitter from lifelines.statistics import logrank_test import logging import matplotlib.colors as colors from matplotlib import pyplot as plt import numbers import numpy as np import seaborn as sb import patsy from .rounding import float_str from .utils import get_logger logger = get_logger(__name__, level=logging.INFO) def _plot_kmf_single(df, condition_col, survival_col, censor_col, threshold, title, xlabel, ylabel, ax, with_condition_color, no_condition_color, with_condition_label, no_condition_label, color_map, label_map, color_palette, ci_show, print_as_title): """ Helper function to produce a single KM survival plot, among observations in df by groups defined by condition_col. All inputs are required - this function is intended to be called by `plot_kmf`. """ # make color inputs consistent hex format if colors.is_color_like(with_condition_color): with_condition_color = colors.to_hex(with_condition_color) if colors.is_color_like(no_condition_color): no_condition_color = colors.to_hex(no_condition_color) ## prepare data to be plotted; producing 3 outputs: # - `condition`, series containing category labels to be plotted # - `label_map` (mapping condition values to plot labels) # - `color_map` (mapping condition values to plotted colors) if threshold is not None: is_median = threshold == "median" if is_median: threshold = df[condition_col].median() label_suffix = float_str(threshold) condition = df[condition_col] > threshold default_label_no_condition = "%s ≤ %s" % (condition_col, label_suffix) if is_median: label_suffix += " (median)" default_label_with_condition = "%s > %s" % (condition_col, label_suffix) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} elif df[condition_col].dtype == 'O' or df[condition_col].dtype.name == "category": condition = df[condition_col].astype("category") if not label_map: label_map = dict() [label_map.update({condition_value: '{} = {}'.format(condition_col, condition_value)}) for condition_value in condition.unique()] if not color_map: rgb_values = sb.color_palette(color_palette, len(label_map.keys())) hex_values = [colors.to_hex(col) for col in rgb_values] color_map = dict(zip(label_map.keys(), hex_values)) elif df[condition_col].dtype == 'bool': condition = df[condition_col] default_label_with_condition = "= {}".format(condition_col) default_label_no_condition = "¬ {}".format(condition_col) with_condition_label = with_condition_label or default_label_with_condition no_condition_label = no_condition_label or default_label_no_condition if not label_map: label_map = {False: no_condition_label, True: with_condition_label} if not color_map: color_map = {False: no_condition_color, True: with_condition_color} else: raise ValueError('Don\'t know how to plot data of type\ {}'.format(df[condition_col].dtype)) # produce kmf plot for each category (group) identified above kmf = KaplanMeierFitter() grp_desc = list() grp_survival_data = dict() grp_event_data = dict() grp_names = list(condition.unique()) for grp_name, grp_df in df.groupby(condition): grp_survival = grp_df[survival_col] grp_event = (grp_df[censor_col].astype(bool)) grp_label = label_map[grp_name] grp_color = color_map[grp_name] kmf.fit(grp_survival, grp_event, label=grp_label) desc_str = "# {}: {}".format(grp_label, len(grp_survival)) grp_desc.append(desc_str) grp_survival_data[grp_name] = grp_survival grp_event_data[grp_name] = grp_event if ax: ax = kmf.plot(ax=ax, show_censors=True, ci_show=ci_show, color=grp_color) else: ax = kmf.plot(show_censors=True, ci_show=ci_show, color=grp_color) ## format the plot # Set the y-axis to range 0 to 1 ax.set_ylim(0, 1) y_tick_vals = ax.get_yticks() ax.set_yticklabels(["%d" % int(y_tick_val * 100) for y_tick_val in y_tick_vals]) # plot title if title: ax.set_title(title) elif print_as_title: ax.set_title(' | '.join(grp_desc)) else: [print(desc) for desc in grp_desc] # axis labels if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) ## summarize analytical version of results ## again using same groups as are plotted if len(grp_names) == 2: # use log-rank test for 2 groups results = logrank_test(grp_survival_data[grp_names[0]], grp_survival_data[grp_names[1]], event_observed_A=grp_event_data[grp_names[0]], event_observed_B=grp_event_data[grp_names[1]]) elif len(grp_names) == 1: # no analytical result for 1 or 0 groups results = NullSurvivalResults() else: # cox PH fitter for >2 groups cf = CoxPHFitter() cox_df = patsy.dmatrix('+'.join([condition_col, survival_col, censor_col]), df, return_type='dataframe') del cox_df['Intercept'] results = cf.fit(cox_df, survival_col, event_col=censor_col) results.print_summary() # add metadata to results object so caller can print them results.survival_data_series = grp_survival_data results.event_data_series = grp_event_data results.desc = grp_desc return results def plot_kmf(df, condition_col, censor_col, survival_col, strata_col=None, threshold=None, title=None, xlabel=None, ylabel=None, ax=None, with_condition_color="#B38600", no_condition_color="#A941AC", with_condition_label=None, no_condition_label=None, color_map=None, label_map=None, color_palette="Set1", ci_show=False, print_as_title=False): """ Plot survival curves by splitting the dataset into two groups based on condition_col. Report results for a log-rank test (if two groups are plotted) or CoxPH survival analysis (if >2 groups) for association with survival. Regarding definition of groups: If condition_col is numeric, values are split into 2 groups. - if threshold is defined, the groups are split on being > or < condition_col - if threshold == 'median', the threshold is set to the median of condition_col If condition_col is categorical or string, results are plotted for each unique value in the dataset. If condition_col is None, results are plotted for all observations Currently, if `strata_col` is given, the results are repeated among each stratum of the df. A truly "stratified" analysis is not yet supported by may be soon. Parameters ---------- df: dataframe condition_col: string, column which contains the condition to split on survival_col: string, column which contains the survival time censor_col: string, strata_col: optional string, denoting column containing data to stratify by (default: None) threshold: int or string, if int, condition_col is thresholded at int, if 'median', condition_col thresholded at its median if 'median-per-strata', & if stratified analysis then condition_col thresholded by strata title: Title for the plot, default None ax: an existing matplotlib ax, optional, default None note: not currently supported when `strata_col` is not None with_condition_color: str, hex code color for the with-condition curve no_condition_color: str, hex code color for the no-condition curve with_condition_label: str, optional, label for True condition case no_condition_label: str, optional, label for False condition case color_map: dict, optional, mapping of hex-values to condition text in the form of {value_name: color_hex_code}. defaults to `sb.color_palette` using `default_color_palette` name, or *_condition_color options in case of boolean operators. label_map: dict, optional, mapping of labels to condition text. defaults to "condition_name = condition_value", or *_condition_label options in case of boolean operators. color_palette: str, optional, name of sb.color_palette to use if color_map not provided. print_as_title: bool, optional, whether or not to print text within the plot's title vs. stdout, default False """ # set reasonable default threshold value depending on type of condition_col if threshold is None: if df[condition_col].dtype != "bool" and \ np.issubdtype(df[condition_col].dtype, np.number): threshold = "median" # check inputs for threshold for validity elif isinstance(threshold, numbers.Number): logger.debug("threshold value is numeric") elif threshold not in ("median", "median-per-strata"): raise ValueError("invalid input for threshold. Must be numeric, None, 'median', or 'median-per-strata'.") elif threshold == "median-per-strata" and strata_col is None: raise ValueError("threshold given was 'median-per-strata' and yet `strata_col` was None. Did you mean 'median'?") # construct kwarg dict to pass to _plot_kmf_single. # start with args that do not vary according to strata_col arglist = dict( condition_col=condition_col, survival_col=survival_col, censor_col=censor_col, threshold=threshold, with_condition_color=with_condition_color, no_condition_color=no_condition_color, with_condition_label=with_condition_label, no_condition_label=no_condition_label, color_map=color_map, label_map=label_map, xlabel=xlabel, ylabel=ylabel, ci_show=ci_show, color_palette=color_palette, print_as_title=print_as_title) # if strata_col is None, pass all parameters to _plot_kmf_single if strata_col is None: arglist.update(dict( df=df, title=title, ax=ax)) return _plot_kmf_single(**arglist) else: # prepare for stratified analysis if threshold == "median": # by default, "median" threshold should be intra-strata median arglist["threshold"] = df[condition_col].dropna().median() elif threshold == "median-per-strata": arglist["threshold"] = "median" # create axis / subplots for stratified results if ax is not None: raise ValueError("ax not supported with stratified analysis.") n_strata = len(df[strata_col].unique()) f, ax = plt.subplots(n_strata, sharex=True) # create results dict to hold per-strata results results = dict() # call _plot_kmf_single for each of the strata for i, (strat_name, strat_df) in enumerate(df.groupby(strata_col)): if n_strata == 1: arglist["ax"] = ax else: arglist["ax"] = ax[i] subtitle = "{}: {}".format(strata_col, strat_name) arglist["title"] = subtitle arglist["df"] = strat_df results[subtitle] = plot_kmf(**arglist) [print(desc) for desc in results[subtitle].desc] if title: f.suptitle(title) return results class NullSurvivalResults(object): def __repr__(self): return "No model fit." def logrank(df, condition_col, censor_col, survival_col, threshold=None): if threshold is not None: if threshold == "median": threshold = df[condition_col].median() condition = df[condition_col] > threshold else: condition = df[condition_col] df_with_condition = df[condition] df_no_condition = df[~condition] survival_no_condition = df_no_condition[survival_col] survival_with_condition = df_with_condition[survival_col] event_no_condition = (df_no_condition[censor_col].astype(bool)) event_with_condition = (df_with_condition[censor_col].astype(bool)) return logrank_test(survival_no_condition, survival_with_condition, event_observed_A=event_no_condition, event_observed_B=event_with_condition)
apache-2.0
-4,546,109,413,557,982,700
42.465672
121
0.59989
false
DickJC123/mxnet
tests/python/mkl/test_mkldnn.py
1
27949
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ MKL-DNN related test cases """ import sys import os import numpy as np import mxnet as mx import pytest from mxnet.test_utils import rand_ndarray, assert_almost_equal from mxnet import gluon, context from mxnet.gluon import nn from mxnet.test_utils import * curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.append(os.path.join(curr_path, '../unittest/')) import itertools @pytest.mark.seed(1234) def test_mkldnn_ndarray_slice(): ctx = mx.cpu() net = gluon.nn.HybridSequential() net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None)) net.initialize(ctx=ctx) x = mx.nd.array(np.ones([32, 3, 224, 224]), ctx) y = net(x) # trigger computation on ndarray slice assert_almost_equal(y[0].asnumpy()[0, 0, 0], np.array(0.056331709)) @pytest.mark.seed(1234) def test_mkldnn_engine_threading(): net = gluon.nn.HybridSequential() net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None)) net.initialize(ctx=mx.cpu()) class Dummy(gluon.data.Dataset): def __len__(self): return 2 def __getitem__(self, key): return key, np.ones((3, 224, 224)), np.ones((10, )) loader = gluon.data.DataLoader(Dummy(), batch_size=2, num_workers=1) X = (32, 3, 32, 32) # trigger mkldnn execution thread y = net(mx.nd.array(np.ones(X))).asnumpy() # Use Gluon dataloader to trigger different thread. # below line triggers different execution thread for _ in loader: y = net(mx.nd.array(np.ones(X))).asnumpy() # output should be 056331709 (non-mkldnn mode output) assert_almost_equal(y[0, 0, 0, 0], np.array(0.056331709)) break def test_mkldnn_reshape(): def test_reshape_after_conv(dst_shape): shape = (1,1,4,4) data = mx.symbol.Variable('data') conv = mx.symbol.Convolution(data=data, num_filter=16, kernel=(1, 1), pad=(0, 0), stride=(1, 1)) res = mx.symbol.reshape(data=conv, shape=dst_shape) exe = res._simple_bind(mx.cpu(), data=shape, grad_req='null') val1 = np.random.uniform(-1, 1, shape) val2 = np.random.uniform(-1, 1, (16, 1, 1, 1)) val3 = np.random.uniform(-1 ,1, (1)) exe.arg_arrays[0][:] = val1 exe.arg_arrays[1][:] = val2 exe.arg_arrays[2][:] = val3 outputs = exe.forward(is_train=False)[0].asnumpy() conv_exe = conv._simple_bind(mx.cpu(), data=shape, grad_req='null') conv_exe.arg_arrays[0][:] = val1 conv_exe.arg_arrays[1][:] = val2 conv_exe.arg_arrays[2][:] = val3 data_npy = conv_exe.forward(is_train=False)[0].asnumpy() assert_almost_equal(outputs, data_npy.reshape(dst_shape)) # Test mkldnn reshape (Using shape) test_cases = [(256), (16, 16), (4, 4, 16), (4, 4, 4, 4)] for test_case in test_cases: test_reshape_after_conv(test_case) def test_reshape_before_conv(): class Net(gluon.HybridBlock): """ test Net """ def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) self.conv0 = nn.Conv2D(10, (3, 3)) self.conv1 = nn.Conv2D(5, (3, 3)) def hybrid_forward(self, F, x, *args, **kwargs): x_reshape = x.reshape((0, 0, 20, 5)) y = self.conv0(x_reshape) y_reshape = y.reshape((0, 0, 9, 6)) out = self.conv1(y_reshape) return out x = mx.nd.random.uniform(shape=(2, 4, 10, 10)) x.attach_grad() net = Net() net.initialize() with mx.autograd.record(): out1 = net(x) out1.backward() dx1 = x.grad net.hybridize() with mx.autograd.record(): out2 = net(x) out2.backward() assert_almost_equal(dx1, x.grad, rtol=1e-5, atol=1e-6) assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6) def test_slice_before_conv(): class Net(gluon.HybridBlock): """ test Net """ def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) self.conv0 = nn.Conv2D(4, (3, 3)) self.conv1 = nn.Conv2D(4, (3, 3)) def hybrid_forward(self, F, x, *args, **kwargs): x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 10, 10)) y = self.conv0(x_slice) y_slice = y.slice(begin=(1, 0, 2, 2), end=(2, 1, 7, 7)) out = self.conv1(y_slice) return out x = mx.nd.random.uniform(shape=(2, 10, 10, 10)) x.attach_grad() net = Net() net.initialize() with mx.autograd.record(): out1 = net(x) out1.backward() dx1 = x.grad net.hybridize() with mx.autograd.record(): out2 = net(x) out2.backward() assert_almost_equal(dx1, x.grad, rtol=1e-5, atol=1e-6) assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6) def test_slice_reshape_before_conv(): class Net(gluon.HybridBlock): """ test Net """ def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) self.conv0 = nn.Conv2D(4, (3, 3)) self.conv1 = nn.Conv2D(4, (3, 3)) def hybrid_forward(self, F, x, *args, **kwargs): x_slice = x.slice(begin=(0, 0, 0, 0), end=(2, 4, 8, 9)) y = self.conv0(x_slice) y_reshape = y.reshape((0, 0, 14, 3)) out = self.conv1(y_reshape) return out x = mx.nd.random.uniform(shape=(2, 10, 10, 10)) x.attach_grad() net = Net() net.initialize() with mx.autograd.record(): out1 = net(x) out1.backward() dx1 = x.grad net.hybridize() with mx.autograd.record(): out2 = net(x) out2.backward() assert_almost_equal(dx1, x.grad, rtol=1e-5, atol=1e-6) assert_almost_equal(out1, out2, rtol=1e-5, atol=1e-6) def test_flatten_slice_after_conv(): data = mx.symbol.Variable('data') weight = mx.symbol.Variable('weight') bias = mx.symbol.Variable('bias') conv1= mx.symbol.Convolution(data = data, weight=weight, bias=bias, name='conv1', num_filter=64, kernel=(3,3), stride=(1,1)) flatten1 = mx.symbol.flatten(data = conv1) slice1 = mx.symbol.slice(data = flatten1, begin=0, end=1) shape = (2, 16, 16, 16) val = np.random.rand(2, 16, 16, 16).astype(np.float32) exe = slice1._simple_bind(context.current_context(), data=shape) exe.arg_arrays[0][:] = val exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape) exe.arg_arrays[2][:] = np.random.normal(size=exe.arg_arrays[2].shape) p = exe.forward(is_train=False) p[0].wait_to_read() print(p[0]) def test_mkldnn_sum_with_mkldnn_layout(): x_shape = (32, 3, 224, 224) x_npy = np.ones(x_shape, dtype='float32') w_shape = (32, 3, 3, 3) w_npy = np.ones(w_shape, dtype='float32') x = mx.sym.Variable("x") w = mx.sym.Variable("w") z = mx.symbol.Convolution(data=x, weight=w, num_filter=32, kernel=(3, 3)) num_inputs = [2, 3, 4, 5] for i in num_inputs: inputs = [] for n in range(i): inputs.append(z) y = mx.sym.add_n(*inputs) # (only MKLDNN data input) exe = y._simple_bind(ctx=mx.cpu(), x=x_shape, w=w_shape) out = exe.forward(is_train=False, x=x_npy, w=np.ones(w_shape))[0] #conv with kernel (3,3) on ones should give result=27 single_cov = 27.0 assert_almost_equal(out[0].asnumpy()[0, 0, 0], single_cov*i) def test_mkldnn_sum_inplace_with_cpu_layout(): x_shape = (32, 3, 224, 224) x_npy = np.ones(x_shape, dtype='float32') y_shape = (32, 32, 222, 222) y_npy = np.ones(y_shape, dtype='float32') x = mx.sym.Variable("x") y = mx.sym.Variable("y") z = mx.symbol.Convolution(data=x, num_filter=32, kernel=(3, 3)) z = mx.sym.add_n(z, y) # (MKLDNN data, cpu data) exe = z._simple_bind(ctx=mx.cpu(), x=x_shape, y=y_shape) out = exe.forward(is_train=False, x=x_npy, y=y_npy)[0] assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0) def test_batchnorm(): def check_batchnorm_training(stype): for shape in [(2, 3), (2, 4), (2, 3, 2, 2), (2, 4, 2, 2)]: data_tmp = np.random.normal(-0.1, 0.1, size=shape) s = shape[1], gamma = np.ones(s) beta = np.ones(s) gamma[1] = 3 beta[0] = 3 rolling_mean = np.random.uniform(size=s) rolling_std = np.random.uniform(size=s) data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype), mx.nd.array(beta).tostype(stype)] mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)] test = mx.symbol.BatchNorm(data, fix_gamma=False) check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2) stypes = ['row_sparse', 'default'] for stype in stypes: check_batchnorm_training(stype) def test_batchnorm_relu_fusion(): def check_batchnorm_relu_fusion(shape): x = mx.sym.Variable('x') in_data = mx.nd.random.normal(shape=shape) grad_out = mx.nd.random.uniform(0, 1, shape) bn = mx.sym.BatchNorm(data=x, fix_gamma=False) relu = mx.sym.Activation(data=bn, act_type='relu', name='relu') exe = relu._simple_bind(ctx=mx.cpu(), x=shape, grad_req='write') exe.arg_arrays[0][:] = in_data exe.forward(is_train=True) exe.backward(grad_out) no_fuse_outputs = exe.outputs no_fuse_grads = exe.grad_arrays bnrelu = mx.sym.contrib.BatchNormWithReLU(data=x, fix_gamma=False) exe_fuse = bnrelu._simple_bind(ctx=mx.cpu(), x=shape, grad_req='write') exe_fuse.arg_arrays[0][:] = in_data exe_fuse.forward(is_train=True) exe_fuse.backward(grad_out) fuse_outputs = exe_fuse.outputs fuse_grads = exe_fuse.grad_arrays for i in range(len(no_fuse_outputs)): assert_almost_equal(no_fuse_outputs[i], fuse_outputs[i]) for i in range(len(no_fuse_grads)): assert_almost_equal(no_fuse_grads[i], fuse_grads[i]) def check_batchnorm_relu_fusion_gluon(shape): class BNNet(gluon.HybridBlock): def __init__(self, fuse_relu): super(BNNet, self).__init__() self.fuse_relu = fuse_relu if self.fuse_relu: self.bn = gluon.nn.BatchNormReLU() else: self.bn = gluon.nn.BatchNorm() self.relu = gluon.nn.Activation('relu') def forward(self, x): y = self.bn(x) if not self.fuse_relu: y = self.relu(y) return y fused_net = BNNet(fuse_relu=True) unfused_net = BNNet(fuse_relu=False) fused_net.initialize() unfused_net.initialize() in_data = mx.nd.random.normal(shape=shape) no_fuse_outputs = unfused_net.forward(in_data) fuse_outputs = fused_net.forward(in_data) for i in range(len(no_fuse_outputs)): assert_almost_equal(no_fuse_outputs[i], fuse_outputs[i]) check_batchnorm_relu_fusion((1, 3, 224, 224)) check_batchnorm_relu_fusion((8, 3, 224, 224)) check_batchnorm_relu_fusion_gluon((1, 3, 224, 224)) check_batchnorm_relu_fusion_gluon((8, 3, 224, 224)) def test_softmax(): def check_softmax_training(stype): for shape in [(2, 3), (2, 3, 2, 2)]: data_tmp = np.random.normal(-0.1, 0.1, size=shape) data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype)] test = mx.symbol.softmax(data, axis=-1) check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: check_softmax_training(stype) def test_pooling(): def check_pooling_training(stype): for shape in [(3, 3, 10), (3, 3, 20, 20), (3, 3, 10, 20, 20)]: data_tmp = np.random.normal(-0.1, 0.1, size=shape) data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype)] if np.array(shape).shape[0] == 3: test = mx.symbol.Pooling(data=data, kernel=(3), stride=(2), pool_type='avg') elif np.array(shape).shape[0] == 4: test = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pool_type='avg') elif np.array(shape).shape[0] == 5: test = mx.symbol.Pooling(data=data, kernel=(3, 3, 3), stride=(2, 2, 2), pool_type='avg') else: return 0 check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: check_pooling_training(stype) def test_activation(): def check_activation_training(stype): for shape in [(2, 3, 3), (2, 3, 2, 2)]: eps = 1e-5 data_tmp = np.random.normal(-0.1, 1, size=shape) # Avoid finite difference method inaccuracies due to discontinuous gradient at the origin. # Here we replace small problematic inputs with 1.0. Repro issue with seed 851486559. data_tmp[abs(data_tmp) < eps] = 1.0 data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype)] test = mx.symbol.Activation(data, act_type="relu") check_numeric_gradient(test, in_location, numeric_eps=eps, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: check_activation_training(stype) def test_convolution(): def check_convolution_training(stype): for shape in [(3, 3, 10), (3, 3, 10, 10), (3, 3, 10, 10, 10)]: data_tmp = np.random.normal(-0.1, 1, size=shape) data = mx.symbol.Variable('data', stype=stype) if np.array(shape).shape[0] == 3: test = mx.symbol.Convolution(data=data, kernel=(3,), stride=(2), num_filter=4) weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3)) elif np.array(shape).shape[0] == 4: test = mx.symbol.Convolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4) weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3, 3)) elif np.array(shape).shape[0] == 5: test = mx.symbol.Convolution(data=data, kernel=(3, 3, 3), stride=(2, 2, 2), num_filter=4) weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3, 3, 3)) else: return 0 bias_tmp = np.random.normal(0.1, 0.1, size=(4,)) in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(weight_tmp).tostype(stype), mx.nd.array(bias_tmp).tostype(stype)] check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: check_convolution_training(stype) @pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/12579") def test_Deconvolution(): def check_Deconvolution_training(stype): for shape in [(3, 3, 10), (3, 3, 10, 10)]: data_tmp = np.random.randint(256, size=shape) data = mx.symbol.Variable('data', stype=stype) if np.array(shape).shape[0] == 3: test = mx.symbol.Deconvolution(data=data, kernel=(3,), stride=(2), num_filter=4) weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3)) elif np.array(shape).shape[0] == 4: test = mx.symbol.Deconvolution(data=data, kernel=(3, 3), stride=(2, 2), num_filter=4) weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3, 3)) else: return 0 bias_tmp = np.random.normal(0.1, 0.1, size=(4,)) in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(weight_tmp).tostype(stype), mx.nd.array(bias_tmp).tostype(stype)] check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: check_Deconvolution_training(stype) def test_LRN(): def check_LRN_training(stype): for shape in [(3, 4, 5, 5)]: data_tmp = np.random.normal(-0.1, 0.1, size=shape) data = mx.symbol.Variable('data', stype=stype) in_location = [mx.nd.array(data_tmp).tostype(stype)] test = mx.symbol.LRN(data, nsize=3) check_numeric_gradient(test, in_location, numeric_eps=1e-2, rtol=0.16, atol=1e-4) stypes = ['row_sparse', 'default'] for stype in stypes: check_LRN_training(stype) def test_fullyconnected(): def check_fullyconnected_training(stype): data_shape = rand_shape_nd(2) weight_shape = rand_shape_nd(2) weight_shape = (weight_shape[0], data_shape[1]) for density in [1.0, 0.5, 0.0]: x = rand_ndarray(shape=data_shape, stype=stype, density=density) w = rand_ndarray(shape=weight_shape, stype=stype, density=density) x_sym = mx.sym.Variable("data") w_sym = mx.sym.Variable("weight") sym = mx.sym.FullyConnected(data=x_sym, weight=w_sym, num_hidden=weight_shape[0], no_bias=True) in_location = [x, w] check_numeric_gradient(sym, in_location, numeric_eps=1e-3, rtol=1e-3, atol=5e-3) stypes = ['row_sparse', 'default'] for stype in stypes: check_fullyconnected_training(stype) def test_softmax_with_large_inputs(): def softmax_forward(input_data, true_output): data = mx.sym.Variable('data') out1 = data.softmax(axis=1) exec1 = out1._bind(mx.cpu(), args={'data': input_data}) exec1.forward()[0].wait_to_read() ndarr = exec1.outputs[0][0][0][0] nparr = ndarr.asnumpy() assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5) softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0])) softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0])) def test_non_mkldnn_fcomputeex(): # test special case where MKLDNN formatted NDArray feeds into non-mkldnn fcomputeex operator # conv is example where MKLDNN NDArray is created from regular NDArrays # CustomOps is example of non-mkldnn fcomputeex operator @mx.operator.register("custom") class CustomProp(mx.operator.CustomOpProp): def __int__(self): super(CustomProp, self).__init__(need_top_grad=False) def list_arguments(self): return ['data'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): data_shape = in_shape[0] output_shape = in_shape[0] return [data_shape], [output_shape], [] def infer_type(self, in_type): dtype = in_type[0] return [dtype], [dtype], [] def create_operator(self, ctx, shapes, dtypes): return Custom() class Custom(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): print(in_data[0]) self.assign(out_data[0], req[0], in_data[0]) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.assign(in_grad[0], req[0], out_grad) data = mx.symbol.Variable('data') conv = mx.sym.Convolution(data=data, kernel=(5, 5), pad=(1, 1), stride=(1,1), num_filter=8, name="conv", no_bias=True) custom = mx.symbol.Custom(name='custom', data=conv, op_type='custom') exec1 = custom._bind(mx.cpu(), args={'data': mx.nd.ones([10,3,96,96]), 'conv_weight': mx.nd.ones([8,3,5,5])}) exec1.forward()[0].wait_to_read() def test_conv_transpose(): axes = [(0,2,1,3), (0,2,3,1), (1,2,3,0), (3,2,1,0)] a = np.random.rand(10, 16, 50, 50) b = np.random.rand(32, 16, 3, 3) x = mx.nd.array(a) w = mx.nd.array(b) y = mx.nd.Convolution(data=x, weight=w, kernel=(3, 3), num_group=1, num_filter=32, no_bias=True) for axis in axes: t = mx.nd.transpose(y, axis) t.wait_to_read() s = y.asnumpy() n = np.transpose(s, axis) np.allclose(t.asnumpy(), n) # This test case is contributed by @awsbillz in https://github.com/apache/incubator-mxnet/issues/14766 def test_reshape_transpose_6d(): class Reshape2D(gluon.HybridBlock): def __init__(self, factor): super(Reshape2D, self).__init__() self._factors = (int(factor),) * 2 def hybrid_forward(self, F, x): f1, f2 = self._factors # (N, f1*f2*C, H, W) x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W) x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W) x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2) x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2) return x class Net(gluon.HybridBlock): def __init__(self, **kwargs): super(Net, self).__init__(**kwargs) self.conv1 = nn.Conv2D(8, kernel_size=5) self.reshape2D = Reshape2D(2) def hybrid_forward(self, F, x): x = self.conv1(x) x = self.reshape2D(x) return x net = Net() net.initialize(mx.init.Xavier(), ctx=mx.cpu()) net.hybridize() data = mx.nd.random_normal(shape=(1, 3, 600, 600)) output = net(data) a = output.asnumpy() def test_concat(): def ref_concat(a, b, axis): return np.concatenate((a, b), axis=axis) a_sym = mx.sym.Variable("a") b_sym = mx.sym.Variable("b") dshape = rand_shape_nd(4) a_shape = tuple(dshape) b_shape = tuple(dshape) for axis in range(0, 4): z = mx.sym.concat(a_sym, b_sym, dim=axis) a = np.random.uniform(-1, 1, a_shape) b = np.random.uniform(-1, 1, b_shape) exe = z._simple_bind(ctx=mx.cpu(), a=a_shape, b=b_shape) out = exe.forward(is_train=False, a=a, b=b) ref_out = ref_concat(a, b, axis=axis) out = out[0].asnumpy() assert_almost_equal(out, ref_out) def check_concat_training(stype): data_shape = rand_shape_nd(4) for density in [1.0, 0.5, 0.0]: a_sym = mx.sym.Variable('a') b_sym = mx.sym.Variable('b') sym = mx.sym.concat(a_sym, b_sym, dim=1) a = rand_ndarray(shape=data_shape, stype=stype, density=density) b = rand_ndarray(shape=data_shape, stype=stype, density=density) in_location = [a, b] check_numeric_gradient(sym, in_location, numeric_eps=1e-3, rtol=1e-3, atol=5e-3) stypes = ['row_sparse', 'default'] for stype in stypes: check_concat_training(stype) def test_concat_blocked(): ctx = mx.cpu() axis = 1 filters = 32 # must be a multiple of 16 kernel = (3, 3) for in_dim_size in range(1, 17): # check cases with and without padding in_shape = (1, in_dim_size, 64, 64) in_data = mx.nd.random.uniform(-1, 1, in_shape, ctx=ctx) conv_weights = mx.nd.random.uniform(-1, 1, (filters, in_shape[1], kernel[0], kernel[1]), ctx=ctx) def calc_output_of_layer(layer): ex = layer._simple_bind(ctx, x=in_shape) in_data.copyto(ex.arg_arrays[0]) conv_weights.copyto(ex.arg_arrays[1]) return ex.forward()[0].asnumpy() x = mx.sym.Variable('x') w = mx.sym.Variable('w') # convolution, so a blocked format is selected conv = mx.sym.Convolution(data=x, weight=w, num_filter=filters, kernel=kernel, pad=(1, 1), no_bias=True) conc = mx.sym.concat(conv, x, dim=axis) # first calculate the output of the convolution to determine ref_out conv_out = calc_output_of_layer(conv) ref_out = np.concatenate((conv_out, in_data.asnumpy()), axis=axis) out = calc_output_of_layer(conc) assert_almost_equal(out, ref_out) def test_elemwise_add(): def ref_add(a, b): return np.add(a, b) a_sym = mx.sym.Variable("a") b_sym = mx.sym.Variable("b") dshape = rand_shape_nd(4) a_shape = tuple(dshape) b_shape = tuple(dshape) z = mx.sym.elemwise_add(a_sym, b_sym) a = np.random.uniform(-1, 1, a_shape) b = np.random.uniform(-1, 1, b_shape) exe = z._simple_bind(ctx=mx.cpu(), a=a_shape, b=b_shape) out = exe.forward(is_train=False, a=a, b=b) ref_out = ref_add(a, b) out = out[0].asnumpy() assert_almost_equal(out, ref_out, rtol=1e-6, atol=1e-6) def check_elemwise_add_training(stype): data_shape = rand_shape_nd(4) for density in [1.0, 0.5, 0.0]: a_sym = mx.sym.Variable('a') b_sym = mx.sym.Variable('b') sym = mx.sym.elemwise_add(a_sym, b_sym) a = rand_ndarray(shape=data_shape, stype=stype, density=density) b = rand_ndarray(shape=data_shape, stype=stype, density=density) in_location = [a, b] check_numeric_gradient(sym, in_location, numeric_eps=1e-3, rtol=1e-3, atol=5e-3) stypes = ['row_sparse', 'default'] for stype in stypes: check_elemwise_add_training(stype) def test_rnn(): SEQ_LENGTH = [2**10, 2**5] STATE_SIZE = [1, 2] BATCH_SIZE = [4] INPUT_SIZE = [4] def batch_check(seq_length, state_size, batch_size, input_size): modes_params = [('rnn_relu', mx.np.random.normal(0, 1, ((input_size + state_size + 2)*state_size),)), ('rnn_tanh', mx.np.random.normal(0, 1, ((input_size + state_size + 2)*state_size),)), ('gru', mx.np.random.normal(0, 1, ((input_size + state_size + 2)*state_size*3),)) ] for m, p in modes_params: data = mx.np.random.normal(0, 1, (seq_length, batch_size, input_size)) state = mx.np.random.normal(0, 1, (1, batch_size, state_size)) data.attach_grad() state.attach_grad() with mx.autograd.record(): y = mx.npx.rnn(data=data, parameters=p, mode=m, \ state=state, state_size=state_size, num_layers=1) assert y.shape == (seq_length, batch_size, state_size) assert type(y[0]).__name__ == 'ndarray' y.backward() assert state.shape == (1, batch_size, state_size) assert type(state[0]).__name__ == 'ndarray' for sl, ss, bs, in_s in itertools.product(SEQ_LENGTH, STATE_SIZE, BATCH_SIZE, INPUT_SIZE): batch_check(sl, ss, bs, in_s)
apache-2.0
373,793,390,311,055,500
38.477401
128
0.572865
false
sljrobin/dotfiles
dzen2/.dzen2/scripts/Clock.py
1
2629
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Filename: Clock.py # Description: Functions for Clock # Author: Simon L. J. Robin | https://sljrobin.org # Created: 2016-08-28 20:18:20 # Modified: 2016-08-30 01:18:05 # ################################################################################ import datetime import os import sys import time sys.path.insert(0, os.environ['HOME'] + "/.dzen2/lib") import Colors import Icons ################################################################################ class Clock(object): """Functions for Time. """ def get_date(self): """Prints the current date. """ icon = Icons.Icons() # Icon current_date = time.strftime("%d/%m/%Y", time.gmtime()) # Current date icon.show_icon("calendar") sys.stdout.write("%s" % current_date) ############################################################################ def get_time(self): """Prints the current time. """ icon = Icons.Icons() # Icon current_time = time.strftime("%H:%M:%S", time.gmtime()) # Current time icon.show_icon("clock") sys.stdout.write("%s" % current_time) ############################################################################ def get_uptime(self): """Prints the current uptime. """ icon = Icons.Icons() # Icon unit_m = 60 # Unit: Minute unit_h = unit_m * 60 # Unit: Hour unit_d = unit_h * 24 # Unit: Day # Opening file containing information try: data_file = open("/proc/uptime") data = data_file.read().split() data_file.close() except: sys.stdout.write("^fg(%s)!E! UptimeFile^fg()" % Colors.CL_BASE08) # Selecting information value = float(data[0]) # Casting in good time units value_d = int(value / unit_d) value_h = int((value % unit_d) / unit_h) value_m = int((value % unit_h) / unit_m) value_s = int(value % unit_m) # Adding leading zeros value_d_fmt = str(value_d).zfill(2) value_h_fmt = str(value_h).zfill(2) value_m_fmt = str(value_m).zfill(2) value_s_fmt = str(value_s).zfill(2) # Printing information icon.show_icon("uptime") sys.stdout.write("%s:" % value_d_fmt) sys.stdout.write("%s:" % value_h_fmt) sys.stdout.write("%s:" % value_m_fmt) sys.stdout.write("%s" % value_s_fmt)
gpl-2.0
-504,071,819,380,044,400
31.060976
80
0.45873
false
lesley2958/lesley2958.github.io
blog/add_static_content.py
1
3752
import sys import os BASIC = True #notebook_file = sys.argv[1] from builtins import open import re home_page = "https://nipunbatra.github.io/blog/" social = {'FB':"https://facebook.com/sharer/sharer.php?u=%s"} def convert(notebook_file): # Get parent folder of file parent_folder = os.path.abspath(os.path.join(notebook_file, os.pardir)) notebook_name = (os.path.splitext(notebook_file)[0]).split("/")[-1] from nbconvert import HTMLExporter import nbformat from traitlets.config import Config c = Config() c.HTMLExporter.preprocessors = ['nbconvert.preprocessors.ExtractOutputPreprocessor'] #resources = {} # Create directory to store post files """ try: os.makedirs(os.path.join(parent_folder, notebook_name)) except: pass """ #resources['output_files_dir'] = '' # create the new exporter using the custom config html_exporter = HTMLExporter(config=c) html_exporter = HTMLExporter() #return html_exporter if BASIC: html_exporter.template_file = 'basic' nb = nbformat.reads(open(notebook_file, 'r').read(), as_version=4) (body, resources) = html_exporter.from_notebook_node(nb) # FIRST between H1 in body from bs4 import BeautifulSoup soup = BeautifulSoup(body, 'html.parser') try: title = soup.find_all('h1')[0].contents[0] if title is None: title = "Nipun Batra" except: title="Nipun Batra" """ # Replace body = body.replace('<img src = "output_""', '<img src = "../%s/output_"' %notebook_name) mpl_images = resources['outputs'] for image_name, image_binary in mpl_images.iteritems(): with open(os.path.join(os.path.join(parent_folder, notebook_name, image_name)), 'wb') as f: f.write(image_binary) """ read_navbar = open("navbar.txt", 'r').read() read_mathjax = open("mathjax.txt", 'r').read() read_disqus = open("disqus.txt", 'r').read() read_css = open("bootstrap_css.txt", 'r').read() read_ga = open("google_analytics.txt","r").read() if BASIC: body = """<html> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags --> <meta name="description" content=""> <meta name="author" content=""> <title>"""+title+"""</title> </head> <body> <div class="container" margin="5%">"""+body+"</div></body></html>" if read_navbar not in body: body = body.replace("<body>", "<body>\n" + read_navbar) if read_ga not in body: body = body.replace("</body>", read_ga + "\n</body>") if read_disqus not in body: body = body.replace("</body>", read_disqus + "\n</body>") if read_mathjax not in body: body = body.replace("</head>", read_mathjax + "\n</head>") if read_css not in body: body = body.replace("</title>", "</title>\n" + read_css) body = body.replace("</body>", read_css + "\n</body>") # Put social media icons #body = body.replace("img src", "img width='100%' src") #body = body.replace(" rendered_html", "") body = body.replace(".rendered_html{overflow-x:auto" , ".rendered_html{overflow-x:auto;overflow-y: hidden;") body = body.replace("#notebook{font-size:14px;line-height:20px;", "#notebook{font-size:20px;line-height:29px;") body = body.replace("div.text_cell_render{outline:0;resize:none;width:inherit;border-style:none;padding:.5em .5em .5em .4em;color:#000;", "div.text_cell_render{outline:0;resize:none;width:inherit;border-style:none;padding:.5em .5em .5em .4em;color:#777;") html_file = notebook_file.replace(".ipynb", ".html") html_file_writer = open(html_file, 'w') html_file_writer.write(body) html_file_writer.close()
mit
-5,824,497,082,864,275,000
28.543307
138
0.660448
false
spaceone/tehbot
tehbot/plugins/tmdb/__init__.py
1
3213
from tehbot.plugins import * import tehbot.plugins as plugins import tmdbsimple as tmdb class MoviePlugin(StandardPlugin): """Shows information about movies from themoviedb.org""" def __init__(self): StandardPlugin.__init__(self) self.parser.add_argument("movie") def execute(self, connection, event, extra, dbconn): tmdb.API_KEY = self.settings["tmdb_api_key"] try: pargs = self.parser.parse_args(extra["args"]) if self.parser.help_requested: return self.parser.format_help().strip() except Exception as e: return u"Error: %s" % str(e) id = -1 res = tmdb.Search().movie(query=pargs.movie) if res["total_results"] > 0: id = res["results"][0]["id"] txt = "No such movie." if id != -1: movie = tmdb.Movies(id) movie_info = movie.info() txt = "\x02%s\x02" % movie_info["title"] if movie_info["title"] != movie_info["original_title"]: txt += " (%s)" % movie_info["original_title"] if movie_info["release_date"]: txt += " | \x02Released:\x02 %s" % movie_info["release_date"] if movie_info["vote_count"] > 0: txt += " | \x02Rating:\x02 %.1f/10" % movie_info["vote_average"] if movie_info["homepage"]: txt += " | \x02Homepage:\x02 %s" % movie_info["homepage"] txt += "\n" + plugins.split(movie_info["overview"]) return txt register_plugin("movie", MoviePlugin()) class TvPlugin(StandardPlugin): """Shows information about TV series from themoviedb.org""" def __init__(self): StandardPlugin.__init__(self) self.parser.add_argument("show") def execute(self, connection, event, extra, dbconn): tmdb.API_KEY = self.settings["tmdb_api_key"] try: pargs = self.parser.parse_args(extra["args"]) if self.parser.help_requested: return self.parser.format_help().strip() except Exception as e: return u"Error: %s" % str(e) id = -1 res = tmdb.Search().tv(query=pargs.show) if res["total_results"] > 0: id = res["results"][0]["id"] txt = "No such movie." if id != -1: movie = tmdb.TV(id) movie_info = movie.info() txt = "\x02%s\x02" % movie_info["name"] if movie_info["name"] != movie_info["original_name"]: txt += " (%s)" % movie_info["original_name"] if movie_info["first_air_date"]: txt += " | \x02First Aired:\x02 %s" % movie_info["first_air_date"] if movie_info["number_of_seasons"]: txt += " | \x02Nr. of Seasons:\x02 %d" % movie_info["number_of_seasons"] if movie_info["vote_count"] > 0: txt += " | \x02Rating:\x02 %.1f/10" % movie_info["vote_average"] if movie_info["homepage"]: txt += " | \x02Homepage:\x02 %s" % movie_info["homepage"] txt += "\n" + plugins.split(movie_info["overview"]) return txt register_plugin("tv", TvPlugin())
mit
1,482,561,011,729,763,800
35.931034
88
0.530968
false
thisismyrobot/DisPy
dispy.py
1
4224
# Copyright 2012 Robert Wallhead # [email protected] # <http://thisismyrobot.com> # # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import inspect import xmlrpclib from SimpleXMLRPCServer import SimpleXMLRPCServer PORT = 8000 class Server(object): """ A general purpose XML-RPC-exposing server. """ def __init__(self, listen_ip='0.0.0.0'): """ Prepare the XML-RPC server, map the exposed functions. """ self.server = SimpleXMLRPCServer((listen_ip, PORT), logRequests=False) self.server.register_function(self._init, 'init') self.server.register_function(self._call, 'call') self.server.register_function(self._get, 'get') self.server.register_function(self._set, 'set') self.cls = {} def start(self): """ Start the server. """ self.server.serve_forever() def stop(self): """ Stop the server. """ self.server.shutdown() def _init(self, cls_src, *args): """ Register and initialise a class, class id. Only to be called via XML-RPC. """ existing_classes = dir() exec(cls_src) new_class = [c for c in dir() if c not in existing_classes][0] next_id = len(self.cls) self.cls[next_id] = eval(new_class)(*args) return next_id def _call(self, cls_id, method, *args): """ Call a method. Only to be called via XML-RPC. """ return getattr(self.cls[cls_id], method)(*args) def _get(self, cls_id, attr): """ Return the value of an attribute. Only to be called via XML-RPC. """ return getattr(self.cls[cls_id], attr) def _set(self, cls_id, attr, val): """ Set the value of an attribute. Only to be called via XML-RPC. """ setattr(self.cls[cls_id], attr, val) return 0 class WrapperTool(object): """ A toolkit to wrap class instances to allow them to be accessed transparently over XML-RPC. """ def __init__(self, server_ip='127.0.0.1'): """ Create the XML-RPC proxy connection to the server. """ address = 'http://' + server_ip + ':' + str(PORT) self.proxy = xmlrpclib.ServerProxy(address) def _get_src(self, cls): """ Return the source code of a class """ return inspect.getsource(cls) def _map_methods(self, cls, instance_id): """ Map the methods to XML-RPC calls. Why not use the server module's register_instance() method you ask? Well, it doesn't expose members, only methods, and adding member access again is nearly impossible.... """ for name, member in inspect.getmembers(cls): if inspect.isfunction(member): setattr(cls, name, lambda x, *y: self.proxy.call(instance_id, name, *y)) def _map_members(self, cls, instance_id): """ Map the members to XML-RPC calls via the magic methods. """ setattr(cls, '__init__', lambda x: None) setattr(cls, '__getattr__', lambda x, y: self.proxy.get(instance_id, y)) setattr(cls, '__setattr__', lambda x, y, z: self.proxy.set(instance_id, y, z)) def init_cls(self, cls, *args): """ Wrap a class, returning a stubb'd instance. """ cls_src = self._get_src(cls) instance_id = self.proxy.init(cls_src, *args) self._map_methods(cls, instance_id) self._map_members(cls, instance_id) return cls()
gpl-3.0
-5,889,848,048,627,380,000
33.064516
78
0.595407
false
icomms/wqmanager
apps/webservice/views.py
1
13148
try: import json except ImportError: import simplejson as json from rapidsms.webui import settings from datetime import datetime from django.http import HttpResponse, HttpResponseServerError, HttpResponseBadRequest from rapidsms.webui.utils import render_to_response from django.db.models import Q from domain.models import Domain from wqm.models import WqmAuthority, WqmArea, SamplingPoint from standards.models import WaterUseType, Standard from samples.models import ValueRule, Sample, NormalRange, AbnormalRange, MeasuredValue, Parameter ACCESS_KEY = settings.WEBSERVICE_PASSWORD SUPERUSER_ID = int(settings.WEBSERVICE_ADMIN_ID) SUPERUSER_KEY = settings.WEBSERVICE_ADMIN_PASSWORD ALLOWED_TABLES = [ 'abnormalrange', 'authorisedsampler', 'domainlookup', 'measuredvalue', 'normalrange', 'parameter', 'sample', 'samplingpoint', 'smsnotifications', 'standard', 'valuerule', 'waterusetype', 'wqmarea', 'wqmauthority' ] ALLOWED_DOMAINS = map(int, settings.WEBSERVICE_DOMAINS.split(',')) def check_access(request): domain = request.GET.get('domain', None) key = request.GET.get('key', None) if domain is None or domain == "" or key is None or key == "": return {'success': False, 'error': _error_response("Incorrect access key for domain") } ok = False try: domain = int(domain) except: return {'success': False, 'error': _error_response("Incorrect access key for domain") } if domain == SUPERUSER_ID: if key == SUPERUSER_KEY: ok = True else: if key == ACCESS_KEY: ok = True if ok: return {'success': True, 'error': None } else: return {'success': False, 'error': _error_response("Incorrect access key for domain") } def table_names(request): key = request.GET.get('key', None) #skip check for now, because phones in the field don't send access key for this call if True or key == SUPERUSER_KEY or key == ACCESS_KEY: return _success_response(ALLOWED_TABLES) else: return _error_response("Incorrect access key for domain") def added_rows(request): return _fetch_rows(request, 'added') def updated_rows(request): return _fetch_rows(request, 'updated') def deleted_rows(request): return _fetch_rows(request, 'deleted') def _fetch_rows(request, type): result = check_access(request) if not result['success']: return result['error'] table = request.GET.get('table', None) time = _int_or_zero(request.GET.get('time', None)) domain = _int_or_zero(request.GET.get('domain', None)) if table is None or time is None: return _error_response("One or more of the following required parameters were not specified: table, time") if table not in ALLOWED_TABLES: return _error_response("Unknown or forbidden table name specified") limit = None offset = None if request.GET.get('limit', None): limit = _int_or_zero(request.GET.get('limit', None)) if request.GET.get('offset', None): offset = _int_or_zero(request.GET.get('offset', None)) # backwards compatibility - match behaviour original android app expects if limit is None: limit = 1000 data = _normalise_rows(type, table, time, domain) return _success_response(data, limit, offset) def _int_or_zero(value): try: return int(value) except TypeError, e: return 0 except ValueError, e: return 0 def _success_response(data, limit=0, offset=0): if limit is None: limit = 0 if offset is None: offset = 0 total_count = len(data) count = limit if limit and offset: data = data[offset:(limit + offset)] elif limit: data = data[:limit] elif offset: data = data[offset:] count = len(data) return _json_response({ 'status': 'success', 'count': count, 'total_count': total_count, 'limit': limit, 'offset': offset, 'data': data }) def _error_response(message): return _json_response({ 'status': 'error', 'count': 0, 'total_count': 0, 'limit': 0, 'offset': 0, 'data': message }) def _json_response(object): return HttpResponse(json.dumps(object), content_type='application/json; charset=utf8') def _normalise_rows(type, table, time, domain): domain = int(domain) data = [] if type == 'deleted': return data # TODO this isn't entirely correct - WQM timestamps are all UTC+0, so we need to adjust the given device time which requires # knowing which timezone the device is operating in date_query = Q(created__gt=datetime.fromtimestamp(time)) if type == 'updated': date_query = Q(modified__gt=datetime.fromtimestamp(time)) if table == 'abnormalrange': rows = AbnormalRange.objects.filter(date_query) for row in rows: if (domain > 0 and row.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmauthority.id in ALLOWED_DOMAINS): data.append({ 'id': row.id, 'description': row.description, 'valuerule': row.value_rule.id, 'minimum': str(row.minimum), 'maximum': str(row.maximum), "remedialaction": row.remedialaction if row.remedialaction is not None else '', "colour": row.color, 'wqmauthority': row.wqmauthority.id, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'authorisedsampler': # not used by android application pass elif table == 'domainlookup': if datetime.fromtimestamp(time) < datetime.strptime("2011-04-19 00:00:00", "%Y-%m-%d %H:%M:%S"): data.append({ 'id': 1, 'key': 'positive', 'value': 1, 'parameter': Parameter.objects.get(test_name_short="h2s").id, 'modified': None, 'created': str(datetime.now()) }) data.append({ 'id': 2, 'key': 'negative', 'value': 0, 'parameter': Parameter.objects.get(test_name_short="h2s").id, 'modified': None, 'created': str(datetime.now()) }) elif table == 'measuredvalue': rows = MeasuredValue.objects.filter(date_query, parameter__is_decimal=True) for row in rows: try: if (domain > 0 and row.sample.sampling_point.wqmarea.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.sample.sampling_point.wqmarea.wqmauthority.id in ALLOWED_DOMAINS): data.append({ 'id': row.id, 'sample': row.sample.id, 'parameter': row.parameter.id, 'value': row.value, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) except Sample.DoesNotExist: pass elif table == 'normalrange': rows = NormalRange.objects.filter(date_query) for row in rows: if (domain > 0 and row.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmauthority.id in ALLOWED_DOMAINS): data.append({ 'id': row.id, 'description': row.description, 'valuerule': row.value_rule.id, 'minimum': str(row.minimum), 'maximum': str(row.maximum), 'wqmauthority': row.wqmauthority.id, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'parameter': rows = Parameter.objects.filter(date_query, is_decimal=True) for row in rows: data.append({ 'id': row.id, 'testname': row.test_name, 'units': row.unit, 'lookuphint': row.lookup_hint, 'testnameshort': row.test_name_short, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'sample': rows = Sample.objects.filter(date_query) for row in rows: if (domain > 0 and row.sampling_point.wqmarea.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.sampling_point.wqmarea.wqmauthority.id in ALLOWED_DOMAINS): data.append({ 'id': row.id, 'samplingpoint': row.sampling_point.id, 'takenby': row.taken_by.id, 'notes': row.notes, 'datetaken': str(row.date_taken.date()), 'date_received': str(row.date_received), 'datasource': 'xform', 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created), }) elif table == 'samplingpoint': rows = SamplingPoint.objects.filter(date_query) for row in rows: if (domain > 0 and row.wqmarea.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmarea.wqmauthority.id in ALLOWED_DOMAINS): data.append({ 'id': row.id, 'pointname': row.name, 'pointcode': row.code, 'wqmarea': row.wqmarea.id, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created), 'waterusetype': None, 'the_geom': None, 'x_coord': row.point.get_x(), 'y_coord': row.point.get_y() }) elif table == 'smsnotifications': # not used by android application pass elif table == 'standard': rows = Standard.objects.filter(date_query) for row in rows: data.append({ 'id': row.id, 'name': row.name, 'governingbody': row.governing_body, 'dateeffective': str(row.date_effective), 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'valuerule': rows = ValueRule.objects.filter(date_query) for row in rows: if row.standard is not None: #temporary data.append({ 'id': row.id, 'description': row.description, 'parameter': row.parameter.id, 'standard': row.standard.id if row.standard is not None else None, 'waterusetype': row.water_use_type.id if row.water_use_type is not None else None, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'waterusetype': rows = WaterUseType.objects.filter(date_query) for row in rows: data.append({ 'id': row.id, 'description': row.description, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'wqmarea': rows = WqmArea.objects.filter(date_query) for row in rows: if (domain > 0 and row.wqmauthority.id == domain) or (domain == SUPERUSER_ID and row.wqmauthority.id in ALLOWED_DOMAINS): data.append({ 'id': row.id, 'name': row.name, 'wqmauthority': row.wqmauthority.id, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) elif table == 'wqmauthority': rows = WqmAuthority.objects.filter(date_query) for row in rows: data.append({ 'id': row.id, 'name': row.name, 'modified': str(row.modified) if row.modified is not None else None, 'created': str(row.created) }) return data
bsd-3-clause
-1,153,064,495,839,629,800
35.726257
197
0.535899
false
srp33/ShinyLearner
BuildTests/CheckAccuracy.py
1
3555
import os, sys taskType = sys.argv[1] validationType = sys.argv[2] description = sys.argv[3] metricFilePath = sys.argv[4] algorithmColumnName = sys.argv[5] expectedNumAlgorithms = int(sys.argv[6]) expectedNumEnsemble = int(sys.argv[7]) if not os.path.exists(metricFilePath): print("[FAILED] No metric file found!") exit(1) successfulAlgorithms = set() failedAlgorithms = [] failedAlgorithmOutput = "" metricFile = open(metricFilePath) metricData = [line.rstrip().split("\t") for line in metricFile] metricFile.close() headerItems = metricData.pop(0) metricNameIndex = headerItems.index("Metric") valueIndex = headerItems.index("Value") algorithmIndex = headerItems.index(algorithmColumnName) uniqueAlgorithms = list(set([row[algorithmIndex] for row in metricData])) if len(uniqueAlgorithms) == 0: print("[FAILED] No algorithm scripts could be found.") exit(1) actualNumAlgorithms = len([x for x in uniqueAlgorithms if not x.startswith("Ensemble")]) actualNumEnsemble = len([x for x in uniqueAlgorithms if x.startswith("Ensemble")]) if actualNumAlgorithms != expectedNumAlgorithms: print("[FAILED] The number of classification algorithms in {} [{}] does not match the expected number [{}].".format(metricFilePath, actualNumAlgorithms, expectedNumAlgorithms)) exit(1) if actualNumEnsemble != expectedNumEnsemble: print("[FAILED] The number of ensemble algorithms in {} [{}] does not match the expected number [{}].".format(metricFilePath, actualNumEnsemble, expectedNumEnsemble)) exit(1) for algorithm in uniqueAlgorithms: if "ZeroR" in algorithm: continue if "demo" in algorithm: continue idText = "{} - {} - {} - {}".format(taskType, validationType, metricFilePath, algorithm) aucValues = [float(row[valueIndex]) for row in metricData if row[algorithmIndex] == algorithm and row[metricNameIndex] == "AUROC"] meanAUC = sum(aucValues) / float(len(aucValues)) if description.startswith("StrongSignal"): lowerThreshold = 0.75 if meanAUC >= lowerThreshold: print("[PASSED] The mean AUROC was {:.3f} for {} and {}. ({})".format(meanAUC, description, algorithm, idText)) successfulAlgorithms.add(algorithm) else: error = "[FAILED] The mean AUROC was {:.3f} for {} and {}. The expected lower threshold is {:.3f}. ({})".format(meanAUC, description, algorithm, lowerThreshold, idText) print(error) failedAlgorithms.append(algorithm) failedAlgorithmOutput += error + "\n" elif description.startswith("NoSignal"): upperThreshold = 0.75 if meanAUC <= upperThreshold: print("[PASSED] The mean AUROC was {:.3f} for {} and {}. ({})".format(meanAUC, description, algorithm, idText)) successfulAlgorithms.add(algorithm) else: error = "[FAILED] The mean AUROC was {:.3f} for {} and {}. The expected upper threshold is {:.3f}. ({})".format(meanAUC, description, algorithm, upperThreshold, idText) print(error) failedAlgorithms.append(algorithm) failedAlgorithmOutput += error + "\n" print("\n[TEST SUMMARY]\n") if len(successfulAlgorithms) == 0: print("[FAILED] No algorithms successfully passed any of the tests.") exit(1) if len(failedAlgorithms) > 0: print("The following algorithm(s) failed at least once:") for algorithm in failedAlgorithms: print(" {}".format(algorithm)) print("\n" + failedAlgorithmOutput) exit(1) else: print("Tests passed!\n")
mit
-7,391,869,750,426,212,000
38.5
180
0.682138
false
orlox/hall_evolution
as_full/field_variation.py
1
9535
#!/usr/bin/env python """ Program to compare how much the structure of a field changes with respect to its initial form """ import matplotlib.pyplot as plt from pylab import * import sys import os import math from scipy import special #Fundamental Ohm mode for rmin=0.75 A1=-0.52004 A2=-0.55882 kk=7.03266 def A_Ohm_function(r,th): return r*pow(sin(th),2)*(((sin(kk*r)/(kk*r)-cos(kk*r))*A2)/(kk*r)+((-sin(kk*r)-cos(kk*r)/(kk*r))*A1)/(kk*r)) #Hall equilibrium field rmin=-0.75 def A_eq_function(r,th): return 1/2.0*pow(sin(th),2)*((3*pow(rmin,5)-5*pow(rmin,3))/r+5*pow(r,2)-3*pow(r,4))/(2-5*pow(rmin,3)+3*pow(rmin,5)); #Get name of folder with results folder=sys.argv[1] #add "/" if it is not given in folder name if folder[len(folder)-1]!="/": folder=folder+"/" #open output file and print header f = open(folder+"compare.dat", 'w') f.write("#num t/t_h int(dB)\n") #Show what folder is being used print("Analyzing data in "+folder) # Open parameters file params=open(folder+'params.dat','r') data=params.readline().split(":"); rNum=int(data[1]) data=params.readline().split(":"); thNum=int(data[1]) data=params.readline().split(":"); factor=float(data[1]) data=params.readline().split(":"); tNum=int(data[1]) data=params.readline().split(":"); plotSteps=int(data[1]) data=params.readline().split(":"); rmin=float(data[1]) data=params.readline().split(":"); thtd=float(data[1]) data=params.readline().split(":"); lNum=int(data[1]) params.close() #solve values of dr and dth dr=(1.0-rmin)/(rNum-1) dth=math.pi/(thNum-1) #store P_l^1(cos(theta)) at the surface, used to solve multipoles. These are calculated at grid mid-points to perform integrations plone=zeros((lNum,thNum)); for j in range(thNum): alp=special.lpmn(1, lNum+1, math.cos((j+0.5)*dth)) for l in range(lNum): plone[l][j]=alp[0][1][l+1] #Solve multipole coefficient l (l is actually l-1, so the dipole is l=0) of the field with poloidal field function A def multipole_l(l,A): value=0 for j in range(0,thNum-1): value+=(A[rNum-1][j]+A[rNum-1][j+1])/2*plone[l][j]*dth return value*math.sqrt(math.pi*(2.0*l+3))/(l+2.0)*sqrt(1.0/8/math.pi) #Create array to store A and B at each step A=zeros((rNum,thNum)); A_Ohm=zeros((rNum,thNum)); A_eq=zeros((rNum,thNum)); B=zeros((rNum,thNum)); #Fill array with ohm eigenmode for A_Ohm for i in range(rNum): r=i*dr+rmin for j in range(thNum): th=j*dth A_Ohm[i][j]=A_Ohm_function(r,th) A_eq[i][j]=A_eq_function(r,th) #Create array for multipole coefficients multipoles=zeros((lNum)); multipoles_i=zeros((lNum)); dipoleOhm=multipole_l(0,A_Ohm) dipoleEq=multipole_l(0,A_eq) #Create array to store the vector values of the field at the initial and later times #field is solved at midpoints in the grid. #B_field_i=initial field B_field_i=zeros((rNum,thNum,3)); #B_field_k=field at each timestep B_field_k=zeros((rNum,thNum,3)); #B_field_Ohm=fundamental Ohm mode, with the same polarity that the equilibrium B_field_Ohm=zeros((rNum,thNum,3)); #B_field_eq=equilibrium field due to rigid rotation of constant electron density in the shell. B_field_eq=zeros((rNum,thNum,3)); #solve vector magnetic field of the fundamental Ohm mode and the equilibrium field for i in range(rNum-1): r=i*dr+dr/2+rmin for j in range(thNum-1): th=j*dth+dth/2 #Solve each component #r component B_field_Ohm[i][j][0]=1/r/r/sin(th)*(A_Ohm[i][j+1]-A_Ohm[i][j]+A_Ohm[i+1][j+1]-A_Ohm[i+1][j])/2/dth B_field_eq[i][j][0]=1/r/r/sin(th)*(A_eq[i][j+1]-A_eq[i][j]+A_eq[i+1][j+1]-A_eq[i+1][j])/2/dth #th component B_field_Ohm[i][j][1]=-1/r/sin(th)*(A_Ohm[i+1][j]-A_Ohm[i][j]+A_Ohm[i+1][j+1]-A_Ohm[i][j+1])/2/dr B_field_eq[i][j][1]=-1/r/sin(th)*(A_eq[i+1][j]-A_eq[i][j]+A_eq[i+1][j+1]-A_eq[i][j+1])/2/dr #phi component B_field_Ohm[i][j][2]=0 B_field_eq[i][j][2]=0 #solve energy of Ohm eigenmode and equilibrium field, first the internal energy energy_Ohm=0 energy_eq=0 for i in range(rNum-1): r=i*dr+dr/2+rmin for j in range(thNum-1): th=j*dth+dth/2 energy_Ohm+=(pow(B_field_Ohm[i][j][0],2)+pow(B_field_Ohm[i][j][1],2)+pow(B_field_Ohm[i][j][2],2))*pow(r,2)*sin(th) energy_eq+=(pow(B_field_eq[i][j][0],2)+pow(B_field_eq[i][j][1],2)+pow(B_field_eq[i][j][2],2))*pow(r,2)*sin(th) energy_Ohm=energy_Ohm*dr*dth/4 energy_eq=energy_eq*dr*dth/4 #now the external energy energy_Ohm=energy_Ohm+2*pow(dipoleOhm,2) energy_eq=energy_eq+2*pow(dipoleEq,2) #analyze all data k=0 initial_energy=0; while 1: #add zeros to the number of the plot, so they are ordered appropately num_file=str(k) diff_zeros=len(str(tNum))-len(str(k)) while diff_zeros>0: num_file="0"+num_file diff_zeros-=1 #read A file data try: data=open(folder+"A_"+str(k),'r') except IOError as e: break #first line has simulation time t=float(data.readline()) i,j=0,0 for line in data: values=line.split(" ") for value in values: if j==thNum: break A[i][j]=float(value) j+=1 j=0 i+=1 data.close() #read B file try: data=open(folder+"B_"+str(k),'r') except IOError as e: break #first line has simulation time t=float(data.readline()) i,j=0,0 for line in data: values=line.split(" ") for value in values: if j==thNum: break B[i][j]=float(value) j+=1 j=0 i+=1 data.close() #solve multipole coefficients for l in range(lNum): multipoles[l]=multipole_l(l,A) Bmaxth=0 #solve vector magnetic field for i in range(rNum-1): r=i*dr+dr/2+rmin for j in range(thNum-1): th=j*dth+dth/2 #Solve each component #r component B_field_k[i][j][0]=1/r/r/sin(th)*(A[i][j+1]-A[i][j]+A[i+1][j+1]-A[i+1][j])/2/dth #th component B_field_k[i][j][1]=-1/r/sin(th)*(A[i+1][j]-A[i][j]+A[i+1][j+1]-A[i][j+1])/2/dr #phi component B_field_k[i][j][2]=1/r/sin(th)*(B[i][j]+B[i+1][j]+B[i][j+1]+B[i+1][j+1])/4 #solve total internal energy energy=0 for i in range(rNum-1): r=i*dr+dr/2+rmin for j in range(thNum-1): th=j*dth+dth/2 energy+=(pow(B_field_k[i][j][0],2)+pow(B_field_k[i][j][1],2)+pow(B_field_k[i][j][2],2))*pow(r,2)*sin(th) energy=energy*dr*dth/4 #add total external energy for l in range(lNum): energy+=(l+2)*pow(multipoles[l],2) #if this is the first timestep, store initial_energy and field and multipole coefficients. if k==0: initial_energy=energy for i in range(rNum-1): for j in range(thNum-1): B_field_i[i][j][0]=B_field_k[i][j][0] B_field_i[i][j][1]=B_field_k[i][j][1] B_field_i[i][j][2]=B_field_k[i][j][2] for l in range(lNum): multipoles_i[l]=multipoles[l] #solve integrals of the different dB^2 inside the star, fields must be corrected to have energy equal to 1 dB_energy=0 dB_energy_Ohm=0 dB_energy_eq=0 for i in range(rNum-1): r=i*dr+dr/2+rmin for j in range(thNum-1): th=j*dth+dth/2 #solve dB with respect to initial field dB_energy+= (pow(B_field_i[i][j][0]*sqrt(1/initial_energy)-B_field_k[i][j][0]*sqrt(1/energy),2) +pow(B_field_i[i][j][1]*sqrt(1/initial_energy)-B_field_k[i][j][1]*sqrt(1/energy),2) +pow(B_field_i[i][j][2]*sqrt(1/initial_energy)-B_field_k[i][j][2]*sqrt(1/energy),2))*pow(r,2)*sin(th) #solve dB with respect to Ohm fundamental mode dB_energy_Ohm+= (pow(B_field_Ohm[i][j][0]*sqrt(1/energy_Ohm)-B_field_k[i][j][0]*sqrt(1/energy),2) +pow(B_field_Ohm[i][j][1]*sqrt(1/energy_Ohm)-B_field_k[i][j][1]*sqrt(1/energy),2) +pow(B_field_Ohm[i][j][2]*sqrt(1/energy_Ohm)-B_field_k[i][j][2]*sqrt(1/energy),2))*pow(r,2)*sin(th) #solve dB with respect to equilibrium field dB_energy_eq+= (pow(B_field_eq[i][j][0]*sqrt(1/energy_eq)-B_field_k[i][j][0]*sqrt(1/energy),2) +pow(B_field_eq[i][j][1]*sqrt(1/energy_eq)-B_field_k[i][j][1]*sqrt(1/energy),2) +pow(B_field_eq[i][j][2]*sqrt(1/energy_eq)-B_field_k[i][j][2]*sqrt(1/energy),2))*pow(r,2)*sin(th) dB_energy=dB_energy*dth*dr/4 dB_energy_Ohm=dB_energy_Ohm*dth*dr/4 dB_energy_eq=dB_energy_eq*dth*dr/4 #add contribution to dBs outside the star dB_energy_Ohm=dB_energy_Ohm+2*pow(dipoleOhm,2)/energy_Ohm-4*dipoleOhm*multipoles[0]/sqrt(energy_Ohm*energy) dB_energy_eq=dB_energy_eq+2*pow(dipoleEq,2)/energy_eq-4*dipoleEq*multipoles[0]/sqrt(energy_eq*energy) for l in range(0,lNum): dB_energy_Ohm=dB_energy_Ohm+(l+2)*pow(multipoles[l],2)/energy dB_energy_eq=dB_energy_eq+(l+2)*pow(multipoles[l],2)/energy dB_energy=dB_energy+(l+2)*(pow(multipoles_i[l],2)/initial_energy-2*multipoles_i[l]*multipoles[l]/sqrt(energy*initial_energy)+pow(multipoles[l],2)/energy) f.write(str(t) + " " + str(dB_energy) + " " + str(dB_energy_Ohm)+ " " + str(dB_energy_eq)+"\n") print str(num_file)+" "+str(energy)+" "+str(dB_energy)+" "+str(dB_energy_Ohm)+" "+str(dB_energy_eq) k+=plotSteps f.close() sys.exit()
gpl-2.0
2,847,259,777,698,597,400
34.981132
161
0.594337
false
stfc/cvmfs-stratum-uploader
uploader/custom_auth/admin.py
1
2869
from django.contrib import admin from django.contrib.admin import ModelAdmin from uploader.custom_auth.models import User from django.contrib.auth.admin import UserAdmin from django.contrib.auth.forms import UserChangeForm, UserCreationForm from django.utils.translation import ugettext_lazy as _ from django import forms from guardian.admin import GuardedModelAdmin class CustomUserChangeForm(UserChangeForm): username = forms.RegexField( label=_("Username"), max_length=200, regex=r"^[\w.@+-=/ ]+$", help_text=_("Required. 200 characters or fewer. Letters, digits and " "@/./+/-/_/=/ // only."), error_messages={ 'invalid': _("This value may contain only letters, numbers and " "@/./+/-/_/=/ // characters.")}, widget=forms.TextInput(attrs={'style': 'width: 70%;'}) ) class CustomUserCreationForm(UserCreationForm): username = forms.RegexField(label=_("Username"), max_length=200, regex=r'^[\w.@+-=/ ]+$', help_text=_("Required. 200 characters or fewer. Letters, digits and " "@/./+/-/_/=/ // only."), error_messages={ 'invalid': _("This value may contain only letters, numbers and " "@/./+/-/_/=/ // characters.")}, widget=forms.TextInput(attrs={'style': 'width: 70%;'}) ) password1 = forms.CharField(required=False) password2 = forms.CharField(required=False) def clean_username(self): # Since User.username is unique, this check is redundant, # but it sets a nicer error message than the ORM. See #13147. username = self.cleaned_data["username"] try: User._default_manager.get(username=username) except User.DoesNotExist: return username raise forms.ValidationError(self.error_messages['duplicate_username']) class Meta: model = User fields = ("username",) class CustomUserAdmin(UserAdmin, ModelAdmin): add_form_template = 'admin/custom_auth/user/add_form.html' form = CustomUserChangeForm add_form = CustomUserCreationForm readonly_fields = ('last_login', 'date_joined', ) fieldsets = ( (None, {'fields': ('username', 'password')}), (_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}), (_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups',)}), (_('Important dates'), {'fields': ('last_login', 'date_joined')}), ) add_fieldsets = ( (None, { 'classes': ('wide',), 'fields': ('username',), }), ) admin.site.register(User, CustomUserAdmin)
apache-2.0
2,708,395,672,979,926,500
39.422535
101
0.565354
false
daviessm/heating
usbrelay.py
1
2262
import usb, logging from relay import Relay logger = logging.getLogger('heating') class USBRelay(Relay): def __init__(self,device): #Assume relay is on until turned off self._status = [1,1,1,1,1,1,1,1] self._hid_device = device if self._hid_device.is_kernel_driver_active(0): try: self._hid_device.detach_kernel_driver(0) except usb.core.USBError as e: raise Exception("Could not detatch kernel driver: %s" % str(e)) try: self._hid_device.set_configuration() self._hid_device.reset() except usb.core.USBError as e: raise Exception("Could not set configuration: %s" % str(e)) #Turn off at start self.all_off() def __sendmsg(self,data): self._hid_device.ctrl_transfer(0x21,0x09,0x0300,0x00,bytes(data),1000) def all_status(self): return self._status def one_status(self,relay_num): return self._status[relay_num-1] def all_on(self): if not self._status == [1,1,1,1,1,1,1,1]: logger.debug("Relay all on") self.__sendmsg([0xFE, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) self._status = [1,1,1,1,1,1,1,1] def all_off(self): if not self._status == [0,0,0,0,0,0,0,0]: logger.debug("Relay all off") self.__sendmsg([0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) self._status = [0,0,0,0,0,0,0,0] def one_on(self,relay_num): if self._status[relay_num-1] == 0 and relay_num > 0 and relay_num <= 8: logger.debug("Relay " + str(relay_num) + " on") self.__sendmsg([0xFF, relay_num, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) self._status[relay_num-1] = 1 def one_off(self,relay_num): if self._status[relay_num-1] == 1 and relay_num > 0 and relay_num <= 8: logger.debug("Relay " + str(relay_num) + " off") self.__sendmsg([0xFD, relay_num, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) self._status[relay_num-1] = 0 @staticmethod def find_relay(): hid_devices = usb.core.find(find_all=True,idVendor=0x16c0,idProduct=0x05df) relays = [] for hid_device in hid_devices: relays.append(USBRelay(hid_device)) if len(relays) < 1: raise Exception("No relays found") if len(relays) > 1: raise Exception("Only one relay allowed!") return relays[0]
gpl-3.0
-8,713,284,299,106,219
31.314286
79
0.621574
false
bitglue/shinysdr
shinysdr/plugins/rtl_433.py
1
10985
# Copyright 2016, 2017 Kevin Reid <[email protected]> # # This file is part of ShinySDR. # # ShinySDR is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ShinySDR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ShinySDR. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, division import json import time from twisted.internet import reactor as the_reactor # TODO eliminate from twisted.internet.protocol import ProcessProtocol from twisted.protocols.basic import LineReceiver from twisted.python import log from zope.interface import implementer from gnuradio import analog from gnuradio import gr from shinysdr.i.blocks import make_sink_to_process_stdin from shinysdr.filters import MultistageChannelFilter from shinysdr.math import dB from shinysdr.interfaces import BandShape, ModeDef, IDemodulator from shinysdr.signals import no_signal from shinysdr.telemetry import ITelemetryMessage, ITelemetryObject from shinysdr.twisted_ext import test_subprocess from shinysdr.types import EnumRow, TimestampT from shinysdr.values import ExportedState, LooseCell, exported_value drop_unheard_timeout_seconds = 120 upper_preferred_demod_rate = 400000 @implementer(IDemodulator) class RTL433Demodulator(gr.hier_block2, ExportedState): def __init__(self, mode='433', input_rate=0, context=None): assert input_rate > 0 assert context is not None gr.hier_block2.__init__( self, type(self).__name__, gr.io_signature(1, 1, gr.sizeof_gr_complex), gr.io_signature(0, 0, 0)) # The input bandwidth chosen is not primarily determined by the bandwidth of the input signals, but by the frequency error of the transmitters. Therefore it is not too critical, and we can choose the exact rate to make the filtering easy. if input_rate <= upper_preferred_demod_rate: # Skip having a filter at all. self.__band_filter = None demod_rate = input_rate else: # TODO: This gunk is very similar to the stuff that MultistageChannelFilter does. See if we can share some code. lower_rate = input_rate lower_rate_prev = None while lower_rate > upper_preferred_demod_rate and lower_rate != lower_rate_prev: lower_rate_prev = lower_rate if lower_rate % 5 == 0 and lower_rate > upper_preferred_demod_rate * 3: lower_rate /= 5 elif lower_rate % 2 == 0: lower_rate /= 2 else: # non-integer ratio lower_rate = upper_preferred_demod_rate break demod_rate = lower_rate self.__band_filter = MultistageChannelFilter( input_rate=input_rate, output_rate=demod_rate, cutoff_freq=demod_rate * 0.4, transition_width=demod_rate * 0.2) # Subprocess # using /usr/bin/env because twisted spawnProcess doesn't support path search # pylint: disable=no-member process = the_reactor.spawnProcess( RTL433ProcessProtocol(context.output_message), '/usr/bin/env', env=None, # inherit environment args=[ 'env', 'rtl_433', '-F', 'json', '-r', '-', # read from stdin '-m', '3', # complex float input '-s', str(demod_rate), ], childFDs={ 0: 'w', 1: 'r', 2: 2 }) sink = make_sink_to_process_stdin(process, itemsize=gr.sizeof_gr_complex) agc = analog.agc2_cc(reference=dB(-4)) agc.set_attack_rate(200 / demod_rate) agc.set_decay_rate(200 / demod_rate) if self.__band_filter: self.connect( self, self.__band_filter, agc) else: self.connect( self, agc) self.connect(agc, sink) @exported_value(type=BandShape, changes='never') def get_band_shape(self): """implements IDemodulator""" if self.__band_filter: return self.__band_filter.get_shape() else: # TODO Reuse UnselectiveAMDemodulator's approach to this return BandShape(stop_low=0, pass_low=0, pass_high=0, stop_high=0, markers={}) def get_output_type(self): """implements IDemodulator""" return no_signal class RTL433ProcessProtocol(ProcessProtocol): def __init__(self, target): self.__target = target self.__line_receiver = LineReceiver() self.__line_receiver.delimiter = '\n' self.__line_receiver.lineReceived = self.__lineReceived def outReceived(self, data): """Implements ProcessProtocol.""" # split lines self.__line_receiver.dataReceived(data) def errReceived(self, data): """Implements ProcessProtocol.""" # we should inherit stderr, not pipe it raise Exception('shouldn\'t happen') def __lineReceived(self, line): # rtl_433's JSON encoder is not perfect (e.g. it will emit unescaped newlines), so protect against parse failures try: message = json.loads(line) except ValueError: log.msg('bad JSON from rtl_433: %s' % line) return log.msg('rtl_433 message: %r' % (message,)) # rtl_433 provides a time field, but when in file-input mode it assumes the input is not real-time and generates start-of-file-relative timestamps, so we can't use them. wrapper = RTL433MessageWrapper(message, time.time()) self.__target(wrapper) _message_field_is_id = { # common u'model': True, u'time': False, # id fields u'device': True, # common u'channel': True, # some u'id': True, # some, frequenrly labeled 'house code' u'dev_id': True, # one u'node': True, # one u'address': True, # one u'ws_id': True, # one u'sid': True, # one u'rid': True, # one u'unit': True, # one # data fields - device u'battery': False, u'rc': False, # data fields - weather u'temperature_F': False, u'temperature_C': False, u'temperature': False, u'humidity': False, u'wind_speed': False, u'wind_speed_ms': False, u'wind_gust': False, u'wind_gust_ms': False, u'wind_direction': False, u'direction': False, u'direction_str': False, u'direction_deg': False, u'speed': False, u'gust': False, u'rain': False, u'rain_total': False, u'rain_rate': False, u'rainfall_mm': False, u'total_rain': False, # data fields - other u'cmd': False, u'cmd_id': False, u'command': False, u'tristate': False, u'power0': False, u'power1': False, u'power2': False, u'ct1': False, u'ct2': False, u'ct3': False, u'ct4': False, u'Vrms/batt': False, u'pulse': False, u'temp1_C': False, u'temp2_C': False, u'temp3_C': False, u'temp4_C': False, u'temp5_C': False, u'temp6_C': False, u'msg_type': False, u'hours': False, u'minutes': False, u'seconds': False, u'year': False, u'month': False, u'day': False, u'button': False, u'button1': False, u'button2': False, u'button3': False, u'button4': False, u'group_call': False, u'dim': False, u'dim_value': False, u'maybetemp': False, u'flags': False, u'binding_countdown': False, u'depth': False, u'state': False, } @implementer(ITelemetryMessage) class RTL433MessageWrapper(object): def __init__(self, message, receive_time): self.message = message # a parsed rtl_433 JSON-format message self.receive_time = float(receive_time) id_keys = [k for k in message if _message_field_is_id.get(k, False)] id_keys.sort() self.object_id = u'-'.join(unicode(message[k]) for k in id_keys) def get_object_id(self): return self.object_id def get_object_constructor(self): return RTL433MsgGroup # TODO: It would make sense to make this a CollectionState object to have simple dynamic fields. @implementer(ITelemetryObject) class RTL433MsgGroup(ExportedState): def __init__(self, object_id): """Implements ITelemetryObject.""" self.__cells = {} self.__last_heard_time = None def state_is_dynamic(self): """Overrides ExportedState.""" return True def state_def(self): """Overrides ExportedState.""" for d in super(RTL433MsgGroup, self).state_def(): yield d for d in self.__cells.iteritems(): yield d # not exported def receive(self, message_wrapper): """Implements ITelemetryObject.""" self.__last_heard_time = message_wrapper.receive_time shape_changed = False for k, v in message_wrapper.message.iteritems(): if _message_field_is_id.get(k, False) or k == u'time': continue if k not in self.__cells: shape_changed = True self.__cells[k] = LooseCell( key=k, value=None, type=object, writable=False, persists=False, label=k) self.__cells[k].set_internal(v) self.state_changed() if shape_changed: self.state_shape_changed() def is_interesting(self): """Implements ITelemetryObject.""" return True def get_object_expiry(self): """implement ITelemetryObject""" return self.__last_heard_time + drop_unheard_timeout_seconds @exported_value(type=TimestampT(), changes='explicit', label='Last heard') def get_last_heard_time(self): return self.__last_heard_time # TODO: Arrange for a way for the user to see why it is unavailable. _rtl_433_available = test_subprocess( ['rtl_433', '-r', '/dev/null'], 'Reading samples from file', shell=False) plugin_mode = ModeDef(mode='433', info=EnumRow(label='rtl_433', description='OOK telemetry decoded by rtl_433 mostly found at 433 MHz'), demod_class=RTL433Demodulator, available=_rtl_433_available)
gpl-3.0
2,245,055,274,137,531,400
32.287879
246
0.596723
false
devlpr/mpptCommander
src/gui.py
1
4218
import copy import os import sys import time import threading from multiprocessing import Process, Queue from PyQt4 import uic from PyQt4 import QtGui from PyQt4 import QtCore from PyQt4.uic import loadUiType import commander import mappings THISDIR = os.path.realpath(os.path.dirname(__file__)) class Commander(QtGui.QMainWindow): def __init__(self, parent=None): """ MPPT Commander simple UI for viewing the state of the controller. """ QtGui.QMainWindow.__init__(self, parent) uic.loadUi(os.path.join(THISDIR, "ui", "commander.ui"), self) self.__queue = Queue() self.__widgets = {} self.__running = True self.__timer = QtCore.QTimer(self) self.connect(self.__timer, QtCore.SIGNAL("timeout()"), self.update) self.__timer.start(10) self.__process = Process(target=self.populateColumns, args=()) self.__process.start() def update(self): """ Update the UI in the main thread. This is triggered by a timer. """ if self.__queue.empty(): return key, name, register, num = self.__queue.get() key = "%s-%s" % (key, name) mess = "%s (%s): %s" % (name, register.unit, register.value) self.statusBar().showMessage(mess) if key not in self.__widgets: self.__widgets[key] = QtGui.QListWidgetItem(mess) if num <= 32: self.arrayInfoListWidget.addItem(self.__widgets[key]) elif num <= 64: self.batteryInfoListWidget.addItem(self.__widgets[key]) else: self.loadInfoListWidget.addItem(self.__widgets[key]) else: #self.__widgets[key].setText(mess) #self.statusBar().showMessage(mess) pass def closeEvent(self, event): """ Event that is triggered on close. """ reply = QtGui.QMessageBox.question(self, 'Message', "Are you sure to quit?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: self.__running = False self.__timer.stop() self.__process.terminate() event.accept() else: event.ignore() def populateColumns(self): """ Query the Commander unit and push all items into a queue to get popped off in the main thread. This is run in a new process using the multiprocessing module in order to make the UI responsive while querying the device. A thread would probably also work. """ # The ID of the device we are going to communicate with. Default is 1. deviceId = 0x01 while self.__running: ser = commander.getRs485() try: num = 1 for addr, reg in sorted(mappings.REGISTERS.iteritems()): if not self.__running: break results = commander.communicate(ser, deviceId, addr, reg, debug=False) wasList = False if not isinstance(results, list): wasList = True results = [results, ] for item in results: if wasList: key = "%s-%s" % (addr, reg.unit) self.__queue.put([key, reg.name, item, num]) else: self.__queue.put([addr, reg.name, item, num]) num += 1 except: raise finally: # Close the port regardless of which errors occur ser.close() if __name__ == "__main__": # Create the QApplication and spawn a Commander window. Block until it is # done. app = QtGui.QApplication(sys.argv) w = Commander() w.setWindowTitle('MPPT Commander') w.show() sys.exit(app.exec_())
mit
6,608,296,891,409,699,000
32.47619
81
0.513988
false
bfrascher/passpy
passpy/__main__.py
1
21211
# coding: utf-8 # passpy -- ZX2C4's pass compatible library and cli # Copyright (C) 2016 Benedikt Rascher-Friesenhausen <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import locale import os import click import pyperclip from git import ( GitCommandError ) from passpy import ( Store, StoreNotInitialisedError, RecursiveCopyMoveError ) # Message constants MSG_STORE_NOT_INITIALISED_ERROR = ('You need to call {0} init first.' .format(__name__)) MSG_PERMISSION_ERROR = 'Nah-ah!' MSG_FILE_NOT_FOUND = 'Error: {0} is not in the password store.' MSG_RECURSIVE_COPY_MOVE_ERROR = 'Error: Can\'t {0} a directory into itself.' # Tree constants if locale.getdefaultlocale()[1].startswith('UTF'): SPACES = ' ' BRIDGE = '│ ' BRANCH = '├── ' ENDING = '└── ' else: SPACES = ' ' BRIDGE = '| ' BRANCH = '|-- ' ENDING = '`-- ' def _gen_tree(lines): """Create hierarchical file tree from key names. :param list lines: A list of key names from the password store. :rtype: dict :returns: A nested dictionary with directories and key names as it's keys. """ tree = {} for line in lines: ctree = tree for segment in line.split(os.sep): if segment not in ctree: ctree[segment] = {} ctree = ctree[segment] return tree def _print_name(name, num_children): """Print a name with added styles. If `num_children` is larger than nil, `name` will be printed in bold face and in blue, to differentiate it as a directory and not a key. :param str name: The name to be printed. :param int num_children: The number of children the leaf has. """ # pass colors folders blue, so we do too. if num_children > 0: click.secho(name, bold=True, fg='blue') else: click.echo(name) def _print_tree(tree, seperators=None): """Print a depth indented listing. The algorithm for printing the tree has been taken from `doctree`_ written by Mihai Ciumeică and licensed under the MIT licence. The code has been adapted to fit our needs. .. _doctree: https://github.com/cmihai/docktree :param dict tree: A dictionary created by :func:`passpy.__main__._gen_tree`. :param list seperators: (optional) The seperators to print before the leaf name. Leave empty when calling this function. """ if seperators is None: seperators = [] length = len(tree) for i, entry in enumerate(sorted(tree, key=str.lower)): num_children = len(tree[entry]) for seperator in seperators: if seperator: click.echo(BRIDGE, nl=False) else: click.echo(SPACES, nl=False) if i < length - 1: click.echo(BRANCH, nl=False) _print_name(entry, num_children) _print_tree(tree[entry], seperators + [1]) else: click.echo(ENDING, nl=False) _print_name(entry, num_children) _print_tree(tree[entry], seperators + [0]) class PassGroup(click.Group): """Custom group for command name aliases. """ def get_command(self, ctx, cmd_name): """Allow aliases for commands. """ if cmd_name == 'list': cmd_name = 'ls' elif cmd_name == 'search': cmd_name = 'find' elif cmd_name == 'gen': cmd_name = 'generate' elif cmd_name == 'add': cmd_name = 'insert' elif cmd_name in ['remove', 'delete']: cmd_name = 'rm' elif cmd_name == 'rename': cmd_name = 'mv' elif cmd_name == 'copy': cmd_name = 'cp' # TODO(benedikt) Figure out how to make 'show' the default # command and pass cmd_name as the first argument. rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv @click.group(cls=PassGroup) @click.option('--gpg-bin', envvar='PYPASS_GPG_BIN', default='gpg2', help='The path to your gpg2 binary. Only necessary ' 'if gpg2 is not already in your PATH. Alternatively ' 'you can set the PYPASS_GPG_BIN environment variable ' 'with the path.') @click.option('--git-bin', envvar='PYPASS_GIT_BIN', default='git', help='The path to your git binary. Only necessary ' 'if git is not already in your PATH. Alternatively ' 'you can set the PYPASS_GIT_BIN environment variable ' 'with the path.') @click.option('--store-dir', envvar=['PYPASS_STORE_DIR', 'PASSWORD_STORE_DIR'], default='~/.password-store', help='The path to the directory to use for the ' 'password store. Alternatively you can set the ' 'PYPASS_STORE_DIR environment variable with the path.') @click.option('--no-agent', envvar='PYPASS_NO_AGENT', is_flag=True, help='Pass this along if you don\'t have an ssh agent ' 'running. Alternatively you can set the PYPASS_NO_AGENT ' 'environment variable.', default=False) @click.pass_context def cli(ctx, gpg_bin, git_bin, store_dir, no_agent): """passpy is a password manager compatible with ZX2C4's pass written in Python. """ if no_agent: use_agent = False else: use_agent = True ctx.obj = Store(gpg_bin, git_bin, store_dir, use_agent, True, True) @cli.command(options_metavar='[ --path,-p ]') @click.option('-p', '--path', type=str, help='Only set the gpg-ids for the given subfolder.') @click.argument('gpg_ids', nargs=-1, metavar='gpg-id') @click.pass_context def init(ctx, gpg_ids, path): """Initialize new password storage and use `gpg-id` for encryption. Mutliple gpg-ids may be specified, in order to encrypt each password with multiple ids. This command must be run first before a password store can be used. If the specified `gpg-id` is different from the ye used in any existing files, these files will be reencrypted to use the new id. Note that use of an gpg agent is recommended so that the batch decryption does not require as much user intervention. If `--path` or `-p` is specified, along with an argument, a specific gpg-id or a set of gpg-ids is assigned for that specific sub folder of the password store. If only the gpg-id is given, and it is an empty string then the current `.gpg-id` file for the specfified `sub-folder` (or root if unspecified) is removed. """ try: ctx.obj.init_store(list(gpg_ids), path=path) except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 click.echo('Password store initialised for {0}.' .format(','.join(gpg_ids))) @cli.command() @click.argument('subfolder', type=str, default='.') @click.pass_context def ls(ctx, subfolder, passthrough=False): """List names of passwords inside the tree at `subfolder`. This command is alternatively names `list`. """ # TODO(benedikt) Generate pretty output try: keys = list(ctx.obj.iter_dir(subfolder)) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 # If subfolder is actually a key in the password store pass shows # the contents of that key. except FileNotFoundError: if not passthrough: return ctx.invoke(show, pass_name=subfolder, clip=False, passthrough=True) else: click.echo(MSG_FILE_NOT_FOUND.format(subfolder)) return 1 click.echo('Password Store') tree = _gen_tree(keys) _print_tree(tree) @cli.command() @click.argument('search_string', type=str, metavar='search-string') @click.pass_context def grep(ctx, search_string): """Searches inside each decrypted password file for `search-string`, and displays line containing matched string along with filename. `search-string` can be a regular expression. """ try: results = ctx.obj.search(search_string) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 for key in results: if os.path.dirname(key) != '': click.secho(os.path.dirname(key) + os.sep, fg='blue', nl=False) click.secho(os.path.basename(key), fg='blue', bold=True, nl=False) click.secho(':') for line, match in results[key]: start = match.start() end = match.end() click.echo(line[:start], nl=False) click.secho(line[start:end], nl=False, fg='red', bold=True) click.echo(line[end:]) @cli.command() @click.argument('pass_names', type=str, nargs=-1, metavar='pass-name') @click.pass_context def find(ctx, pass_names): """List names of passwords inside the tree that match `pass-names` and print them to the command line. This command is alternatively named `search`. """ try: keys = ctx.obj.find(list(pass_names)) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 click.echo('Search Terms: {0}'.format(','.join(pass_names))) tree = _gen_tree(keys) _print_tree(tree) @cli.command(options_metavar='[ --clip,-c ]') @click.option('-c', '--clip', is_flag=True, help='Copy the password to the clipboard instead of ' 'printing it to the command line.') @click.argument('pass_name', type=str, metavar='pass-name', default='.') @click.pass_context def show(ctx, pass_name, clip, passthrough=False): """Decrypt and print a password named `pass-name`. If `--clip` or `-c` is specified, do not print the password but instead copy the first line to the clipboard using pyperclip. On Linux you will need to have xclip/xsel and on OSX pbcopy/pbpaste installed. """ try: data = ctx.obj.get_key(pass_name) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 # If pass_name is actually a folder in the password store pass # lists the folder instead. except FileNotFoundError: if not passthrough: return ctx.invoke(ls, subfolder=pass_name, passthrough=True) else: click.echo(MSG_FILE_NOT_FOUND.format(pass_name)) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 if clip: pyperclip.copy(data.split('\n')[0]) click.echo('Copied {0} to the clipboard.'.format(pass_name)) else: # The key data always ends with a newline. So no need to add # another one. click.echo(data, nl=False) @cli.command(options_metavar='[ --echo,-e | --multiline,-m ] [ --force,-f ]') @click.option('-e', '--echo', 'input_method', flag_value='echo', help='Don\'t ask to repeat the password.') @click.option('-m', '--multiline', 'input_method', flag_value='multiline', help='Allows entering multiple lines of text for the key.') @click.option('-f', '--force', is_flag=True, help='Any existing key at pass-name will be ' 'silently overwritten.') @click.argument('pass_name', type=str, metavar='pass-name') @click.pass_context def insert(ctx, pass_name, input_method, force): """Insert a new password into the password store called `pass-name`. This will read the new password from standard in. If `--echo` or `-e` are NOT specified, disable keyboard echo when the password is entered and confirm the password by asking for it twice. If `--multiline` or `-m` is specified, lines will be read until EOF or Ctrl+D is reached. Otherwise, only a single line from standard in read. Prompt before overwriting an existing password, unless `--force` or `-f` is specified. This command is alternatively named `add` """ if input_method is None: input_method = 'neither' if input_method == 'multiline': click.echo('Enter contents of {0} and press Ctrl+D on an empty ' 'line when finished:'.format(pass_name)) lines = [] while True: try: line = input('> ') lines.append(line) except EOFError: break data = '\n'.join(lines) else: echo = (input_method != 'echo') data = click.prompt('Enter password for {0}'.format(pass_name), hide_input=True, confirmation_prompt=echo, type=str) try: ctx.obj.set_key(pass_name, data, force=force) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 @cli.command() @click.argument('pass_name', type=str, metavar='pass-name') @click.pass_context def edit(ctx, pass_name): """Insert a new password or edit an existing one using the editor specified by either EDITOR or VISUAL or falling back on the platform default if both are not set. """ try: data = ctx.obj.get_key(pass_name) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except FileNotFoundError: data = '' except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 if 'EDITOR' in os.environ: data = click.edit(text=data, editor=os.environ['EDITOR']) else: data = click.edit(text=data) if data is None: click.echo('Password unchanged.') return 1 ctx.obj.set_key(pass_name, data, force=True) @cli.command(options_metavar='[ --no-symbols,-n ] [ --clip,-c ] ' '[ --in-place,-i ] [ --force,-f ]') @click.option('-n', '--no-symbols', is_flag=True, help='If specified the password will only consist ' 'of alphanumeric characters.') @click.option('-c', '--clip', is_flag=True, help='Copy the password to the clipboard instead of ' 'printing it on the command line.') @click.option('-i', '--in-place', is_flag=True, help='Replace the first line of an existing key at ' 'path-name with the newly generated password.') @click.option('-f', '--force', is_flag=True, help='Overwrite an existing key at pass-name without ' 'prompting the user first.') @click.argument('pass_name', type=str, metavar='pass-name') @click.argument('pass_length', type=int, metavar='pass-length') @click.pass_context def generate(ctx, pass_name, pass_length, no_symbols, clip, in_place, force): """Generate a new password of length `pass-length` and insert into `pass-name`. If `--no-symbols` or `-n` is specified, do not use any non-alphanumeric characters in the generated password. If `--clip` or `-c` is specified, do not print the password but instead copy it to the clipboard. On Linux you will need to have xclip/xsel and on OSX pbcopy/pbpaste installed. Prompt before overwriting an existing password, unless `--force` or `-f` is specified. If `--in-place` or `-i` is specified, do not interactively prompt, and only replace the first line of the password file with the new generated password, keeping the remainder of the file intact. """ symbols = not no_symbols try: password = ctx.obj.gen_key(pass_name, pass_length, symbols, force, in_place) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 except FileNotFoundError: click.echo(MSG_FILE_NOT_FOUND.format(pass_name)) return 1 if password is None: return 1 if clip: pyperclip.copy(password) click.echo('Copied {0} to the clipboard.'.format(pass_name)) else: click.echo('The generated password for {0} is:'.format(pass_name)) click.echo(password) @cli.command(options_metavar='[ --recursive,-r ] [ --force,-f ]') @click.option('-r', '--recursive', is_flag=True, help='If pass-name is a directory, also remove all ' 'it\'s contents.') @click.option('-f', '--force', is_flag=True, default=False, help='Don\'t prompt for confirmation when removing a key.') @click.argument('pass_name', type=str, metavar='pass-name') @click.pass_context def rm(ctx, pass_name, recursive, force): """Remove the password names `pass-name` from the password store. This command is alternatively named `remove` or `delete`. If `--recursive` or `-r` is specified, delete pass-name recursively if it is a directory. If `--force` or `-f` is specified, do not interactively prompt before removal. """ try: ctx.obj.remove_path(pass_name, recursive, force) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except FileNotFoundError: click.echo('{0} is not in the password store.'.format(pass_name)) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 @cli.command(options_metavar='[ --force,-f ]') @click.option('-f', '--force', is_flag=True, help='If specified existing files at `new-path` ' 'will be silently overwritten.') @click.argument('old_path', type=str, metavar='old-path') @click.argument('new_path', type=str, metavar='old-path') @click.pass_context def mv(ctx, old_path, new_path, force): """Renames the password or directory named `old-path` to `new-path`. This command is alternatively named `rename`. If `--force` or `-f` is specified, silently overwrite `new-path` if it exists. If `new-path` ends in a trailing '/', it is always treated as a directory. Passwords are selectively reencrypted to the corresponding keys of their new destination. """ try: ctx.obj.move_path(old_path, new_path, force) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except FileNotFoundError: click.echo('{0} is not in the password store.'.format(old_path)) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 except RecursiveCopyMoveError: click.echo(MSG_RECURSIVE_COPY_MOVE_ERROR.format('move')) return 1 @cli.command(options_metavar='[ --force,-f ]') @click.option('-f', '--force', is_flag=True, help='If specified existing files at `new-path` ' 'will be silently overwritten.') @click.argument('old_path', type=str, metavar='old-path') @click.argument('new_path', type=str, metavar='new-path') @click.pass_context def cp(ctx, old_path, new_path, force): """Copies the password or directory names `old-path` to `new-path`. This command is alternatively named `copy`. If `--force` is specified, silently overwrite `new_path` if it exists. If `new-path` ends in a trailing `/`, it is always treated as a directory. Passwords are selectively reencrypted to the corresponding keys of their new destination. """ try: ctx.obj.copy_path(old_path, new_path, force) except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except FileNotFoundError: click.echo('{0} is not in the password store.'.format(old_path)) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 except RecursiveCopyMoveError: click.echo(MSG_RECURSIVE_COPY_MOVE_ERROR.format('copy')) return 1 @cli.command() @click.argument('git_args', type=str, metavar='git-command-args', nargs=-1) @click.pass_context def git(ctx, git_args): """If the password store is a git repository, pass `args` as arguments to `git` using the password store as the git repository. If `args` is `init`, in addition to initializing the git repository, add the current contents of the password store to the repository in an initial commit. """ try: ctx.obj.git(*list(git_args)) except GitCommandError as e: click.echo(e) return 1
gpl-3.0
2,915,392,858,262,720,500
34.986418
92
0.631298
false
AQORN/thunder-engine
thunder_web/monitor_node.py
1
2757
# @author: Binoy # @create_date: 17-Apr-2015 # @modified by: binoy # @modified_date: 17-Apr-2015 # @linking to other page: # @description: manage cron job module # importing required modules import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thunder.settings") from cloud.models import * from cloud.views import * import re from django.db import connections from deployment.common import * cursor = connections['zabbix'].cursor() # Getting all the nodes in the system nodes = Nodelist.objects.all() # Looping through the nodes for node in nodes: # Creating the sql query to fetch the details from the history table. try: currStatus = isNodeActive(node.host_name) preStatus = node.node_up # check both status are not equal, then add it to alert if currStatus != preStatus: # save current node status node.node_up = currStatus node.save() # if node is up if currStatus: # parameters for the zabbix alert msgTxt = 'Node '+ node.host_name +' is UP.' params = { 'alert_type': 'Node', 'referece_id': node.id, 'alert_content': msgTxt, 'alert_status' : 'S' } thunderAlertAdd(params, True) # Saving the node details into the node log saveJobNodeLog(0, node, msgTxt, msgTxt, 1) print 'up........' else: # parameters for the zabbix alert msgTxt = 'Node '+ node.host_name +' is Down.' params = { 'alert_type': 'Node', 'referece_id': node.id, 'alert_content': msgTxt, 'alert_status' : 'F' } thunderAlertAdd(params, True) # Saving the node details into the node log saveJobNodeLog(0, node, msgTxt, msgTxt, 0) print 'Down........' except Exception, e: print e ''' ### check existing services are down like cobbler,chef, zabbix etc If down enable it ### ''' # Getting the services from the tables getServices = MonitorService.objects.filter(status = 1) # looping through the services available for getService in getServices: # Get the service status outputStr = getServiceDetails(getService.command) # if error occurred while deployment if "down" in outputStr: executeServiceCommnd(getService.name)
gpl-3.0
-527,449,063,338,068,400
28.340426
73
0.534276
false
StefanRijnhart/odoomrp-wip
purchase_secondary_unit/__openerp__.py
2
1517
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "Unit of Purchase", "version": "1.0", "summary": "Purchase secondary unit", "depends": [ "product", "purchase", ], "author": "OdooMRP team", "website": "http://www.odoomrp.com", "contributors": [ "Oihane Crucelaegui <[email protected]>", "Pedro M. Baeza <[email protected]>", "Ana Juaristi <[email protected]>" ], "category": "Purchase Management", "data": [ "data/product_data.xml", "views/product_view.xml", "views/purchase_view.xml", "views/pricelist_view.xml", ], "installable": True, }
agpl-3.0
-7,948,394,462,924,185,000
35.119048
78
0.575478
false
coolsnow77/bankeasytrader_v1
setup.py
1
2870
from setuptools import setup import easytrader long_desc = """ easytrader =============== * easy to use to trade in China Stock Installation -------------- pip install easytrader Upgrade --------------- pip install easytrader --upgrade Quick Start -------------- :: import easytrader user = easytrader.use('ht') user.prepare('account.json') user.balance return:: [{ 'asset_balance': '资产总值', 'current_balance': '当前余额', 'enable_balance': '可用金额', 'market_value': '证券市值', 'money_type': '币种', 'pre_interest': '预计利息' ]} user.position return:: [{'cost_price': '摊薄成本价', 'current_amount': '当前数量', 'enable_amount': '可卖数量', 'income_balance': '摊薄浮动盈亏', 'keep_cost_price': '保本价', 'last_price': '最新价', 'market_value': '证券市值', 'position_str': '定位串', 'stock_code': '证券代码', 'stock_name': '证券名称'}] user.entrust return:: [{'business_amount': '成交数量', 'business_price': '成交价格', 'entrust_amount': '委托数量', 'entrust_bs': '买卖方向', 'entrust_no': '委托编号', 'entrust_price': '委托价格', 'entrust_status': '委托状态', # 废单 / 已报 'report_time': '申报时间', 'stock_code': '证券代码', 'stock_name': '证券名称'}] user.buy('162411', price=5.55) user.sell('16411', price=5.65) """ setup( name='easytrader', version=easytrader.__version__, description='A utility for China Stock Trade', long_description = long_desc, author='shidenggui', author_email='[email protected]', license='BSD', url='https://github.com/shidenggui/easytrader', keywords='China stock trade', install_requires=[ 'demjson', 'requests', 'logbook', 'anyjson', 'six' ], classifiers=['Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'License :: OSI Approved :: BSD License'], packages=['easytrader', 'easytrader.config', 'easytrader.thirdlibrary'], package_data={'': ['*.jar', '*.json'], 'config': ['config/*.json'], 'thirdlibrary': ['thirdlibrary/*.jar']}, )
mit
1,468,537,289,616,456,000
23.075472
116
0.49699
false
narendrameena/featuerSelectionAssignment
gloub.py
1
1478
import numpy as np import csv from sklearn.datasets import load_svmlight_file def golub(data, labels): c = [] for i in range(0,len(data)): c.append([labels[i],data[i]]) # append labels with data matrix negative = [] positive = [] for i in range(0,len(c)): if c[i][0] == [-1]: # for negative condition negative.append(c[i][1]) if c[i][0] == [1]: # for positive condition positive.append(c[i][1]) negMean = np.mean(np.asarray(negative),axis=0).tolist() # calculate mean negStd = np.std(np.asarray(negative),axis=0).tolist() # calculate standard deviation posMean = np.mean(np.asarray(positive),axis=0).tolist() # calculating mean posStd = np.std(np.asarray(positive),axis=0).tolist() # calculating Standard Deviation score =[] for i in range(0,len(negMean)): if((posStd[i]+negStd[i]) != 0): score.append((posMean[i]-negMean[i])/(posStd[i]+negStd[i])) else: score.append(0) # zero if denominator is zero print(len(score)) return (score, score) # return score as score , score as asked def main(): data = load_svmlight_file("leu") X_1 = data[0].todense().tolist() y_1 = map(int,data[1]) lables= [] for i in range(0,len(y_1)): lables = lables + [[y_1[i]]] #print(X_1) #print(y_1) #print(lables) print golub(X_1,lables) if __name__ == "__main__": main()
cc0-1.0
-885,232,497,795,173,100
28
93
0.574425
false
mmcauliffe/linguistic-helper-functions
linghelper/distance/dtw.py
1
3026
from numpy import zeros,inf from scipy.spatial.distance import euclidean import operator def dtw_distance(rep_one, rep_two,norm=True): """Computes the distance between two representations with the same number of filters using Dynamic Time Warping. Parameters ---------- rep_one : 2D array First representation to compare. First dimension is time in frames or samples and second dimension is the features. rep_two : 2D array Second representation to compare. First dimension is time in frames or samples and second dimension is the features. Returns ------- float Distance of dynamically time warping `rep_one` to `rep_two`. """ assert(rep_one.shape[1] == rep_two.shape[1]) distMat = generate_distance_matrix(rep_one,rep_two) return regularDTW(distMat,norm=norm) def generate_distance_matrix(source,target): """Generates a local distance matrix for use in dynamic time warping. Parameters ---------- source : 2D array Source matrix with features in the second dimension. target : 2D array Target matrix with features in the second dimension. Returns ------- 2D array Local distance matrix. """ sLen = source.shape[0] tLen = target.shape[0] distMat = zeros((sLen,tLen)) for i in range(sLen): for j in range(tLen): distMat[i,j] = euclidean(source[i,:],target[j,:]) return distMat def regularDTW(distMat,norm=True): """Use a local distance matrix to perform dynamic time warping. Parameters ---------- distMat : 2D array Local distance matrix. Returns ------- float Total unweighted distance of the optimal path through the local distance matrix. """ sLen,tLen = distMat.shape totalDistance = zeros((sLen,tLen)) totalDistance[0:sLen,0:tLen] = distMat minDirection = zeros((sLen,tLen)) for i in range(1,sLen): totalDistance[i,0] = totalDistance[i,0] + totalDistance[i-1,0] for j in range(1,tLen): totalDistance[0,j] = totalDistance[0,j] + totalDistance[0,j-1] for i in range(1,sLen): for j in range(1,tLen): #direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1)) #totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance #minDirection[i,j] = direction minDirection[i,j],totalDistance[i,j] = min(enumerate([totalDistance[i-1,j-1] + 2*totalDistance[i,j], totalDistance[i-1,j] + totalDistance[i,j], totalDistance[i,j-1] + totalDistance[i,j]]), key=operator.itemgetter(1)) if norm: return totalDistance[sLen-1,tLen-1] / (sLen+tLen) return totalDistance[sLen-1,tLen-1]
gpl-3.0
-5,921,349,442,274,469,000
31.537634
147
0.60575
false
expert360/cfn-params
tests/test_resolution.py
1
2629
import collections import unittest import cfnparams.exceptions import cfnparams.resolution BotoCfnStack = collections.namedtuple( 'BotoCfnStack', ['stack_id', 'stack_name', 'outputs', 'tags'] ) StackOutput = collections.namedtuple('Output', ['key', 'value']) class MockStrategy(object): def __init__(self, stacks): self.stacks = stacks def __call__(self, cfn, name): for stack in self.stacks: if stack.stack_name == name: yield cfnparams.resolution.Stack(stack) class ResolverTestCase(unittest.TestCase): def setUp(self): stacks = [ BotoCfnStack( '1', 'example', [StackOutput('Foo', 'foo')], {} ), BotoCfnStack( '2', 'foo', [StackOutput('Foo', 'staging'), StackOutput('Bar', 'bar')], {'Environment': 'staging'} ), BotoCfnStack( '2', 'foo', [StackOutput('Foo', 'production'), StackOutput('Baz', 'baz')], {'Environment': 'production'} ), ] self.strategy = MockStrategy(stacks) def test_no_filters(self): resolver = cfnparams.resolution.Resolver(None, self.strategy, {}) self.assertEqual(resolver('example', 'Foo'), 'foo') self.assertEqual(resolver('foo', 'Bar'), 'bar') self.assertEqual(resolver('foo', 'Baz'), 'baz') # undefined behaviour, but should not raise an exception self.assertIsNotNone(resolver('foo', 'Foo')) with self.assertRaises(cfnparams.exceptions.ResolutionError): resolver('example', 'NotPresent') def test_with_filter(self): resolver = cfnparams.resolution.Resolver( None, self.strategy, {'Environment': 'staging'} ) self.assertEqual(resolver('foo', 'Foo'), 'staging') self.assertEqual(resolver('foo', 'Bar'), 'bar') with self.assertRaises(cfnparams.exceptions.ResolutionError): resolver('foo', 'Baz') with self.assertRaises(cfnparams.exceptions.ResolutionError): resolver('example', 'Foo') resolver = cfnparams.resolution.Resolver( None, self.strategy, {'Environment': 'production'} ) self.assertEqual(resolver('foo', 'Foo'), 'production') self.assertEqual(resolver('foo', 'Baz'), 'baz') with self.assertRaises(cfnparams.exceptions.ResolutionError): resolver('foo', 'Bar')
mit
-4,560,765,489,366,899,000
30.674699
78
0.554203
false
calpaterson/recall
src/recall/bookmarks.py
1
5227
# -*- coding: utf-8 -*- # Recall is a program for storing bookmarks of different things # Copyright (C) 2012 Cal Paterson # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from urllib.parse import unquote from bottle import abort, request, Bottle, response from recall import convenience as conv from recall import ( plugins, search, data, jobs, ) from bs4 import BeautifulSoup logger = conv.logger("bookmarks") app = Bottle() app.install(plugins.ppjson) app.install(plugins.auth) app.install(plugins.cors) app.install(plugins.exceptions) app.error_handler = plugins.handler_dict @app.post("/<who>/public/<when>/") def add_public(who, when, user): if "~" not in request.json or "@" not in request.json: abort(400, "You must include @ and ~ with all bookmarks") if request.json["@"] != who or who != user["email"]: abort(400, "You may only add bookmarks as yourself") if request.json["~"] != int(when): abort(400, "You must use the same time in the bookmark as you post to") if data.has_problematic_keys(request.json): abort(400, "Bookmarks must not have keys prefixed with $ or £") request.json["£created"] = conv.unixtime() conv.db().bookmarks.insert(request.json) del request.json["_id"] jobs.enqueue(search.IndexRecord(request.json), priority=1) response.status = 202 @app.post("/<who>/private/<when>/") def add_private(who, when, user): request.json["%private"] = True add_public(who, when, user) @app.get("/public/") def public_bookmarks(): query = search.SearchQueryBuilder() if "q" in request.params: query.with_keywords(request.params["q"]) query.anonymously() total, results = search.search(query) response.set_header("X-Recall-Total", total) if results == []: response.status = 404 data.strip_generated_keys(results) return results @app.get("/<who>/all/") def user_all_bookmarks(who, user): if who != user["email"]: abort(400, "You may only look at your own bookmarks") query = search.SearchQueryBuilder() if "q" in request.params: query.with_keywords(request.params["q"]) query.as_user(user) total, results = search.search(query) if results == []: response.status = 404 data.strip_generated_keys(results) return results @app.route("/<who>/", method="POST") def import_(who, user): soup = BeautifulSoup(request.body) if soup.contents[0] != "NETSCAPE-Bookmark-file-1": abort(400, "You must send a bookmark file with the doctype " + " 'NETSCAPE-Bookmarks-file-1'") anchors = soup.find_all("a") bookmarks = [] add_dates = set() for anchor in anchors: bookmark = { "~": int(anchor.attrs.get("add_date", conv.unixtime())) } while bookmark["~"] in add_dates: bookmark["~"] += 1 add_dates.add(bookmark["~"]) bookmark["hyperlink"] = anchor.attrs["href"] if bookmark["hyperlink"].startswith("place"): continue bookmark["title"] = anchor.string bookmark["@"] = user["email"] bookmark["%private"] = True bookmark["£created"] = conv.unixtime() bookmarks.append(bookmark) for each in bookmarks: conv.db().eachs.insert(each) del each["_id"] jobs.enqueue(search.IndexRecord(each), priority=1) response.status = 202 @app.get("/<who>/all/recent/") def recent(who, user): if who != user["email"]: abort(400, "You may only look at your own bookmarks") total, hits = search.search(search.SearchQueryBuilder() .sort_by_when() .of_size(75) .as_user(user) .only_user(user)) response.set_header("X-Recall-Total", total) data.strip_generated_keys(hits) return hits @app.get("/<who>/url/<url_encoded:re:.*>") def url(who, url_encoded, user): if who != user["email"]: abort(400, "You may only look at your own bookmarks") url_decoded = unquote(url_encoded) query = search.SearchQueryBuilder().of_size(1).as_user(user) query.the_url(url_decoded) total, hits = search.search(query) if total > 0: return hits else: response.status(404) #### NOT IMPLEMENTED: @app.get("/<unused_who>/public/") def user_public_bookmarks(unused_who): abort(501) # @app.post("/<who>/<when>/edits/<who_edited>/<time_editted/") # def update(unused_who, unused_when, unused_who_edited, unused_time_editted): # abort(501)
agpl-3.0
6,404,023,144,794,574,000
32.922078
79
0.636103
false
bdr00/typedef
typedef/pragma.py
1
1728
from abc import ABCMeta, abstractmethod from typedef.constants import Arch from typedef.errors import PragmaValueMissing, UnsupportedPragmaPack pack_stack = [] class PragmaStack(object): __metaclass__ = ABCMeta def __init__(self, default_val): self._stack = [] self._default = default_val self._waiting = True self._next_val = None def __call__(self, v): self._validate(v) # should raise ValueError self._waiting = False self._next_val = v return self def __exit__(self, exc_type, exc_val, exc_tb): self._waiting = True self.pop() def __enter__(self): if self._waiting: raise PragmaValueMissing('missing pragma value') self._stack.append(self._next_val) @abstractmethod # TODO: maybe allow non validations? def _validate(self, v): pass @property def Current(self): try: return self._stack[-1] except IndexError: return self._default def pop(self): try: return self._stack.pop() except IndexError: return self._default def push(self, v): self._validate(v) self._stack.append(v) class PragmaPack(PragmaStack): Unknown = None Infer = 0 Tight = 1 x86 = 4 x64 = 8 Os = [x86, x64][Arch.Os] Options = [2 ** i for i in range(5)] Interpreter = [x86, x64][Arch.Interpreter] def __init__(self): super(PragmaPack, self).__init__(self.Infer) def _validate(self, v): if v is not PragmaPack.Infer and v not in self.Options: raise UnsupportedPragmaPack('unsupported pack value') pack = PragmaPack()
mit
-3,177,838,628,537,396,700
22.351351
68
0.583333
false
maaku/haiku-lang
setup.py
1
4196
#!/usr/bin/env python # -*- coding: utf-8 -*- # === setup.py ------------------------------------------------------------=== # Copyright © 2011-2012, RokuSigma Inc. and contributors. See AUTHORS for more # details. # # Some rights reserved. # # Redistribution and use in source and binary forms of the software as well as # documentation, with or without modification, are permitted provided that the # following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The names of the copyright holders or contributors may not be used to # endorse or promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND # CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT # NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND # DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ===----------------------------------------------------------------------=== import os from distutils.core import setup VERSION = (0,0,8, 'alpha', 0) def get_version(): version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3:] == ('alpha', 0): version = '%s pre-alpha' % version else: if VERSION[3] != 'final': version = '%s %s' % (version, VERSION[3]) if VERSION[4] != 0: version = '%s%s' % (version, VERSION[4]) return version # Compile the list of packages available, because distutils doesn't have an # easy way to do this. packages, data_files = [], [] root_dir = os.path.dirname(__file__) if root_dir: os.chdir(root_dir) for dirpath, dirnames, filenames in os.walk('haiku'): # Ignore dirnames that start with '.' for i, dirname in enumerate(dirnames): if dirname.startswith('.'): del dirnames[i] if '__init__.py' in filenames: pkg = dirpath.replace(os.path.sep, '.') if os.path.altsep: pkg = pkg.replace(os.path.altsep, '.') packages.append(pkg) elif filenames: prefix = dirpath[6:] # Strip "haiku/" or "haiku\" for f in filenames: data_files.append(os.path.join(prefix, f)) version = get_version().replace(' ', '-') setup(name='haiku-lang', version=version, description='An embedable LISP implemented on top of the Python interpreter.', install_requires=[ 'LEPL>=5.1.1', 'bitstring>=3.0.2', 'python-patterns>=0.0.1', ], author='RokuSigma Inc.', author_email='[email protected]', url='http://www.github.com/monetizeio/haiku-lang/', download_url='http://pypi.python.org/packages/source/h/haiku-lang/haiku-lang-%s.tar.gz' % version, package_dir={'haiku': 'haiku'}, packages=packages, package_data={'haiku': data_files}, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: Other/Proprietary License', 'Operating System :: OS Independent', 'Programming Language :: Lisp', 'Programming Language :: Python', 'Topic :: Software Development :: Interpreters', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', ], ) # ===----------------------------------------------------------------------=== # End of File # ===----------------------------------------------------------------------===
bsd-3-clause
3,591,831,845,850,132,500
38.168224
100
0.647578
false
viaict/viaduct
app/repository/custom_form_repository.py
1
3977
from app import db from app.models.activity import Activity from app.models.custom_form import CustomFormFollower, CustomForm, \ CustomFormResult filter_unarchived = db.or_(CustomForm.archived == False, CustomForm.archived == None) # noqa filter_active = db.or_(Activity.id == None, db.and_(Activity.id != None, db.func.now() < Activity.end_time)) # noqa filter_archived_inactive = db.or_(CustomForm.archived == True, db.and_(Activity.id != None, db.func.now() >= Activity.end_time)) # noqa def get_active_followed_forms_by_user(user_id, group_ids): """ Get all forms followed by the user. Filter archived forms and forms with activities in the past. """ q = db.session.query(CustomForm) \ .outerjoin(Activity, CustomForm.id == Activity.form_id) \ .filter(CustomFormFollower.query .filter(CustomForm.id == CustomFormFollower.form_id, CustomFormFollower.owner_id == user_id) .exists(), filter_unarchived, filter_active) if group_ids is not None: q = q.filter(CustomForm.group_id.in_(group_ids)) return q.order_by(CustomForm.id.desc()) \ .all() def get_active_unfollowed_by_user(user_id, group_ids): """ Get all active forms not followed by the user. Filter archived forms and forms with activities in the past. """ q = db.session.query(CustomForm) \ .outerjoin(Activity, CustomForm.id == Activity.form_id) \ .filter(db.not_(CustomFormFollower.query .filter(CustomForm.id == CustomFormFollower.form_id, CustomFormFollower.owner_id == user_id) .exists()), filter_unarchived, filter_active) if group_ids is not None: q = q.filter(CustomForm.group_id.in_(group_ids)) return q.order_by(CustomForm.id.desc()) \ .all() def get_inactive_forms(group_ids): """Get all inactive or ssarchived forms.""" q = db.session.query(CustomForm) \ .outerjoin(Activity, CustomForm.id == Activity.form_id) \ .filter(filter_archived_inactive) if group_ids is not None: q = q.filter(CustomForm.group_id.in_(group_ids)) return q.order_by(CustomForm.id.desc()) \ .all() def get_form_entries_by_form_id(form_id): return db.session.query(CustomFormResult) \ .filter(CustomFormResult.form_id == form_id) \ .order_by(CustomFormResult.created) \ .all() def get_form_by_form_id(form_id): return db.session.query(CustomForm) \ .filter(CustomForm.id == form_id) \ .one_or_none() def get_form_submission_by_id(form_id, submit_id): return db.session.query(CustomFormResult) \ .filter(CustomFormResult.id == submit_id, CustomFormResult.form_id == form_id) \ .one_or_none() def get_form_submission_by_user_id(form_id, user_id): return db.session.query(CustomFormResult) \ .filter(CustomFormResult.owner_id == user_id, CustomFormResult.form_id == form_id) \ .one_or_none() def get_form_following_by_user_id(form, user_id): return db.session.query(CustomFormFollower) \ .filter(CustomFormFollower.form == form, CustomFormFollower.owner_id == user_id) \ .one_or_none() def delete_form_follow(follower): print(follower) db.session.delete(follower) db.session.commit() def follow_form(form, user_id): cf = CustomFormFollower(form=form, owner_id=user_id) db.session.add(cf) db.session.commit() def form_set_archive_status(form, archived): form.archived = archived db.session.commit() def form_set_paid_status(submission, paid): submission.has_paid = not submission.has_paid db.session.commit()
mit
7,212,193,541,876,776,000
30.816
86
0.611013
false
ashtonmv/twod_materials
examples/pbe_bandstructure.py
1
1676
""" Relaxes 2D materials in all subdirectories of the current working directory, along with their most stable competing species. At a specified INTERVAL, checks if all relaxations have converged. Once all are converged, calculates and plots the formation energies of all 2D materials as stability_plot.pdf. """ from __future__ import print_function, division, unicode_literals import os import time from twod_materials.utils import is_converged from twod_materials.electronic_structure.startup import ( run_linemode_calculation ) from twod_materials.electronic_structure.analysis import ( plot_normal_band_structure ) INTERVAL = 360 # Seconds between convergence checks directories = [dir for dir in os.listdir(os.getcwd()) if os.path.isdir(dir) and dir not in ['all_competitors']] if __name__ == '__main__': for directory in directories: os.chdir(directory) run_linemode_calculation() os.chdir('../') loop = True while loop: print('>> Checking convergence') finished = [] for directory in directories: if is_converged('{}/pbe_bands'.format(directory)): finished.append(directory) if len(finished) == len(directories): print('>> Plotting band structures') for directory in finished: os.chdir('{}/pbe_bands'.format(directory)) plot_normal_band_structure() os.chdir('../../') loop = False else: print('>> Not all directories converged ({}/{})'.format( len(finished), len(directories))) time.sleep(INTERVAL)
gpl-3.0
513,743,030,525,387,700
30.037037
75
0.636635
false
RedhawkSDR/framework-codegen
redhawk/codegen/jinja/cpp/component/pull/mapping.py
1
2959
# # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # # This file is part of REDHAWK core. # # REDHAWK core is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # REDHAWK core is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # from redhawk.codegen.model.softwarecomponent import ComponentTypes from redhawk.codegen.lang.idl import IDLInterface from redhawk.codegen.jinja.cpp.component.base import BaseComponentMapper class PullComponentMapper(BaseComponentMapper): def _mapComponent(self, softpkg): cppcomp = {} cppcomp['baseclass'] = self.baseClass(softpkg) cppcomp['userclass'] = self.userClass(softpkg) cppcomp['superclasses'] = self.superClasses(softpkg) cppcomp['interfacedeps'] = tuple(self.getInterfaceDependencies(softpkg)) cppcomp['hasmultioutport'] = self.hasMultioutPort(softpkg) return cppcomp @staticmethod def userClass(softpkg): return {'name' : softpkg.basename()+'_i', 'header': softpkg.basename()+'.h', 'file' : softpkg.basename()+'.cpp'} @staticmethod def baseClass(softpkg): baseclass = softpkg.basename() + '_base' return {'name' : baseclass, 'header': baseclass+'.h', 'file' : baseclass+'.cpp'} @staticmethod def superClasses(softpkg): if softpkg.type() == ComponentTypes.RESOURCE: name = 'Component' elif softpkg.type() == ComponentTypes.DEVICE: name = 'Device_impl' aggregate = 'virtual POA_CF::AggregatePlainDevice' elif softpkg.type() == ComponentTypes.LOADABLEDEVICE: name = 'LoadableDevice_impl' aggregate = 'virtual POA_CF::AggregateLoadableDevice' elif softpkg.type() == ComponentTypes.EXECUTABLEDEVICE: name = 'ExecutableDevice_impl' aggregate = 'virtual POA_CF::AggregateExecutableDevice' else: raise ValueError, 'Unsupported software component type', softpkg.type() classes = [{'name': name, 'header': '<ossie/'+name+'.h>'}] if softpkg.descriptor().supports('IDL:CF/AggregateDevice:1.0'): classes.append({'name': aggregate, 'header': '<CF/AggregateDevices.h>'}) classes.append({'name': 'AggregateDevice_impl', 'header': '<ossie/AggregateDevice_impl.h>'}) return classes
lgpl-3.0
454,499,221,827,718,600
42.514706
104
0.670835
false
ohld/miptnews
objs/rssparser.py
1
1446
import ssl import time import binascii import feedparser from tqdm import tqdm from objs.news import News def conv_to_rss(link): if "vk.com" in link: group = link[link.find("vk.com") + 7:] return "http://feed.exileed.com/vk/feed/%s" % (group + "?count=5") return link class RssParser(object): """ Класс для парсинга RSS-канала. Выделяет из общей информации только интереующие нас поля: Заголовок, ссылку, дату публикации. """ def __init__(self, config_links): self.links = [conv_to_rss(config_links[i]) for i in config_links] self.news = [] def refresh(self): self.news = [] for link in tqdm(self.links, desc="Getting news"): data = 0 if hasattr(ssl, '_create_unverified_context'): ssl._create_default_https_context = ssl._create_unverified_context data = feedparser.parse(link) self.news += [News(binascii.b2a_base64(data['feed']['title'].replace(' VK feed', '').encode()).decode(), binascii.b2a_base64(entry['link'].encode()).decode(), int(time.mktime(entry['published_parsed']))) for entry in data['entries']] time.sleep(1) def __repr__(self): return "<RSS ('%s','%s')>" % (self.link, len(self.news))
apache-2.0
-8,413,308,440,841,147,000
32.65
116
0.578752
false
tudelft3d/val3dity
tools/python/gml2poly/geomtools.py
1
10595
# val3dity - Copyright (c) 2011-2016, Hugo Ledoux. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the authors nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL HUGO LEDOUX BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS import math import random TOLERANCE = 1e-3 class Point: def __init__(self, cx=0.0, cy=0, cz=0, cid=0): self.x = float(cx) self.y = float(cy) self.z = float(cz) self.id = int(cid) def __repr__(self): return str(self.x) + " " + str(self.y) + " " + str(self.z) def str_poly(self): return str(self.id) + " " + "{:f}".format(self.x) + " " + "{:f}".format(self.y) + " " + "{:f}".format(self.z) def str_off(self): return "{:f}".format(self.x) + " " + "{:f}".format(self.y) + " " + "{:f}".format(self.z) def __str__(self): return "{:f}".format(self.x) + "," + "{:f}".format(self.y) + "," + "{:f}".format(self.z) def __getitem__(self, index): if index < 0 or index > 2: raise Exception("out of bound for Point access.") if index == 0: return self.x if index == 1: return self.y if index == 2: return self.z def __eq__(self, other): if (cmp_doubles(self.x, other.x) == 0 and cmp_doubles(self.y, other.y) == 0 and cmp_doubles(self.z, other.z) == 0 ): return True else: return False def __ne__(self, other): return not self.__eq__(other) def __neg__(self): self.x = -(self.x) self.y = -(self.y) self.z = -(self.z) return self def cmp_one_dim(self, other, dim): if dim == 'x': return cmp_doubles(self.x, other.x) if dim == 'y': return cmp_doubles(self.y, other.y) if dim == 'z': return cmp_doubles(self.z, other.z) def distance_to(self, other): return math.sqrt( pow(other.x - self.x, 2) + pow(other.y - self.y, 2) + pow(other.z - self.z, 2) ) def distance_to_proj(self, other, m, n): return math.sqrt( pow(other[m] - self[m], 2) + pow(other[n] - self[n], 2) ) def translate_random(self, dist): a = random.uniform(-dist, dist) self.x += a self.y += a self.z += a def copy(self): return Point(self.x, self.y, self.z) class Vector(Point): def __str__(self): return "VECTOR(" + str(self.x) + " " + str(self.y) + " " + str(self.z) + ")" def __add__(self, o): r = Vector() r.x = self.x + o.x r.y = self.y + o.y r.z = self.z + o.z return r def set_vector(self, a, b): self.x = b.x - a.x self.y = b.y - a.y self.z = b.z - a.z def length(self): return math.sqrt((self.x * self.x) + (self.y * self.y) + (self.z * self.z)) def cross_product(self, o): r = Vector() r.x = (self.y * o.z) - (self.z * o.y) r.y = -( (self.x * o.z) - (self.z * o.x) ) r.z = (self.x * o.y) - (self.y * o.x) return r def dot_product(self, o): return (self.x * o.x) + (self.y * o.y) + (self.z * o.z) def normalise(self): length = self.length() #-- raise an if the lenght of the vector is 0 (or near 0) if abs(length) < TOLERANCE: 1.0 / 0 #-- will raise a ZeroDivisionError :) else: self.x = self.x / length self.y = self.y / length self.z = self.z / length return self def get_projection_plane(p1, p2, p3): normal = geomtools.get_normal_rhr(p1, p2, p3) #-- check if the plane if vertical, and assign the projection plane m = 0 #-- xy plane n = 1 if geomtools.cmp_doubles(normal.z, 0.0) == 0: if geomtools.cmp_doubles(normal.y, 0.0) == 00: m = 1 #-- yz plane n = 2 else: m = 0 #-- xz plane n = 2 return m, n def cmp_doubles(a, b): if abs(a-b) <= TOLERANCE: return 0 else: if a - b > 0: return 1 else: return -1 def orient2D(a, b, p): """ Determine if a Point_25 pt is above or below the plane defined by a-b-c (anti-clockwise order) Input: a,b,c : the Point_25 in anti-clockwise order p : the point to test Output: 1 -> pt is BELOW of the plane (OK for left-hand rule) 0 -> 4 points are coplanar -1 -> pt is ABOVE of the plane (NOT OK for left-hand rule) Note: "above and below" means when looking from above; or when using the left-hand rule """ re = det3x3t(a, b, p) if abs(re) < TOLERANCE: return 0 elif re > 0: return 1 else: return -1 def orient2D_proj(a, b, p, m, n): """ Determine if a Point_25 pt is above or below the plane defined by a-b-c (anti-clockwise order) Input: a,b,c : the Point_25 in anti-clockwise order p : the point to test Output: 1 -> pt is BELOW of the plane (OK for left-hand rule) 0 -> 4 points are coplanar -1 -> pt is ABOVE of the plane (NOT OK for left-hand rule) Note: "above and below" means when looking from above; or when using the left-hand rule """ re = det3x3t_expand(a[m], a[n], b[m], b[n], p[m], p[n]) if abs(re) < TOLERANCE: return 0 elif re > 0: return 1 else: return -1 def det3x3t(a, b, c): at = Point(a.x - c.x, a.y - c.y) bt = Point(b.x - c.x, b.y - c.y) return (at.x * bt.y) - (at.y * bt.x) def det3x3t_expand(ax, ay, bx, by, cx, cy): at = Point(ax - cx, ay - cy) bt = Point(bx - cx, by - cy) return (at.x * bt.y) - (at.y * bt.x) def det3x3_point(a, b, c): return det3x3(a.x, a.y, a.z, b.x, b.y, b.z, c.x, c.y, c.z) def det3x3(ax, ay, az, bx, by, bz, cx, cy, cz): temp1 = ax * (by * cz - bz * cy) temp2 = ay * (bx * cz - bz * cx) temp3 = az * (bx * cy - by * cx) return temp1 - temp2 + temp3 def det4x4t(a, b, c, d): return det3x3(a.x-d.x, a.y-d.y, a.z-d.z, b.x-d.x, b.y-d.y, b.z-d.z, c.x-d.x, c.y-d.y, c.z-d.z) def orient3D(a, b, c, p): """ Determine if a Point p is above or below the plane defined by the Points abc (in anti-clockwise order looking from above) Input: a,b,c : the Points in anti-clockwise order looking from above p : the Point to test Output: 1 -> pt is BELOW of the plane (OK for left-hand rule) 0 -> 4 points are coplanar -1 -> pt is ABOVE of the plane (NOT OK for left-hand rule) Note: "above and below" means when looking from above; or when using the left-hand rule """ re = det4x4t(a, b, c, p) if abs(re) < TOLERANCE: return 0 elif re > 0: return 1 else: return -1 #def get_area_triangle(a, b, c): # """Area of triangle (projected on the 2D plane.) # # Input: a,b,c: the Points # Ouput: value of area # # """ # return abs(det3x3t(a, b, c) / 2) def get_volume_tetra(a, b, c, d): """Volume of a tetrahedron.""" return abs(det4x4t(a, b, c, d) / 6) def get_normal_rhr(a, b, c): """ Return the normal Vector to the 3 Points a-b-c, acc. to a right-hand rule. If a-b-c are CCW viewed from above, then the Vector points above Input: a,b,c : the Points in anti-clockwise order looking from above Output: the Vector """ v1 = Vector(b.x - a.x, b.y - a.y, b.z - a.z) v2 = Vector(c.x - a.x, c.y - a.y, c.z - a.z) return v1.cross_product(v2) def point_in_triangle_3D(a, b, c, p, m, n): re = True oTr = orient2D_proj(a, b, c, m, n) if ((orient2D_proj(a, b, p, m, n) != oTr) or (orient2D_proj(b, c, p, m, n) != oTr) or (orient2D_proj(c, a, p, m, n) != oTr)): re = False return re def get_midpoint_of_segment(a, b): mid = Point() mid.x = (a.x + b.x) / 2.0 mid.y = (a.y + b.y) / 2.0 mid.z = (a.z + b.z) / 2.0 return mid def point_in_tetra(a, b, c, d, p): otetra = orient3D(a, b, c, d) assert(otetra != 0) if ( (orient3D(a, b, c, p) == otetra) and (orient3D(b, d, c, p) == otetra) and (orient3D(c, d, a, p) == otetra) and (orient3D(d, b, a, p) == otetra) ): return True else: return False oTr = orient2D_proj(a, b, c, m, n) if ((orient2D_proj(a, b, p, m, n) != oTr) or (orient2D_proj(b, c, p, m, n) != oTr) or (orient2D_proj(c, a, p, m, n) != oTr)): re = False return re def intersection_plane_segment(ring, a, b): pass def get_circumsphere(a, b, c, d): pass if __name__ == "__main__": p1 = Point(3, 0, 1) print p1[0], p1.x print p1[-1] # p1 = Point(1.0, 2.00000000001, 3) # p2 = Point(1, 2, 3) # print p1 == p2
gpl-3.0
7,664,310,428,974,760,000
30.253687
117
0.522039
false
opendatateam/udata
udata/tests/frontend/test_topic_frontend.py
1
2471
from udata import search from udata.tests import SearchTestMixin, TestCase from udata.core.dataset.factories import VisibleDatasetFactory from udata.core.dataset.search import DatasetSearch from udata.core.topic.factories import TopicFactory from udata.core.topic.search import topic_search_for class TopicSearchTest(SearchTestMixin, TestCase): def test_empty_search_no_match(self): '''Should return no result if no data match the tags''' with self.autoindex(): VisibleDatasetFactory.create_batch(2, tags=['whatever']) topic = TopicFactory(tags=['no-match']) query = topic_search_for(topic, DatasetSearch) result = search.query(query) self.assertEqual(len(result), 0) def test_empty_search_with_match(self): '''Should only return data with at least one tag''' with self.autoindex(): included = VisibleDatasetFactory.create_batch(2, tags=['in']) excluded = VisibleDatasetFactory.create_batch(2, tags=['out']) topic = TopicFactory(tags=['in', 'no-match']) query = topic_search_for(topic, DatasetSearch) result = search.query(query) found = [d.id for d in result] self.assertEqual(len(found), 2) for dataset in included: self.assertIn(dataset.id, found) for dataset in excluded: self.assertNotIn(dataset.id, found) def test_empty_search_with_filter_and_match(self): '''Should match both the topic criteria and the query''' with self.autoindex(): # Match both the topic condition but the queried tag match = VisibleDatasetFactory.create_batch(2, tags=[ 'in', 'filtered' ]) # Match the topic condition but not the queried tag no_match = VisibleDatasetFactory.create_batch(2, tags=['in']) # Excluded because not matching one of the topic tag excluded = VisibleDatasetFactory.create_batch(2, tags=[ 'out', 'filtered' ]) topic = TopicFactory(tags=['in', 'no-match']) query = topic_search_for(topic, DatasetSearch, tag='filtered') result = search.query(query) found = [d.id for d in result] self.assertEqual(len(found), 2) for dataset in match: self.assertIn(dataset.id, found) for dataset in no_match + excluded: self.assertNotIn(dataset.id, found)
agpl-3.0
911,704,196,157,743,100
37.015385
74
0.634966
false
mysql/mysql-utilities
mysql-test/t/export_exclude.py
1
8611
# # Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # """ export_exclude test. """ import os import export_parameters_def from mysql.utilities.exception import MUTLibError class test(export_parameters_def.test): """check exclude parameter for export utility This test executes a series of export database operations on a single server using a variety of exclude options. It uses the export_parameters_def test as a parent for setup and teardown methods. """ def check_prerequisites(self): return export_parameters_def.test.check_prerequisites(self) def setup(self, spawn_servers=True): return export_parameters_def.test.setup(self) def run(self): self.res_fname = "result.txt" from_conn = "--server={0}".format( self.build_connection_string(self.server1)) cmd_str = ("mysqldbexport.py --skip=events,grants --no-headers {0} " "--format=CSV util_test --skip-gtid".format(from_conn)) test_num = 1 comment = "Test case {0} - exclude by name.".format(test_num) cmd_opts = ("{0} --exclude=util_test.v1 " "--exclude=util_test.t4".format(cmd_str)) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude by name using " "backticks.".format(test_num)) if os.name == 'posix': cmd_opts = ("{0} --exclude='`util_test`.`v1`' " "--exclude='`util_test`.`t4`'".format(cmd_str)) else: cmd_opts = ('{0} --exclude="`util_test`.`v1`" ' '--exclude="`util_test`.`t4`"'.format(cmd_str)) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude using SQL LIKE " "pattern #1.".format(test_num)) cmd_opts = "{0} -x f% -x _4".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude using SQL LIKE " "pattern #2.".format(test_num)) cmd_opts = "{0} -x util_test.t%".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude using SQL LIKE " "pattern #3.".format(test_num)) cmd_opts = "{0} -x %".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude using REGEXP " "pattern.".format(test_num)) cmd_opts = "{0} -x ^f -x 4$ --regexp".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude by name and SQL LIKE " "pattern.".format(test_num)) cmd_opts = ("{0} --exclude=f% --exclude=_4 -x p% --exclude=v1 " "--exclude=util_test.trg".format(cmd_str)) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude by name and REGEXP " "pattern.".format(test_num)) cmd_opts = ("{0} --exclude=^f --exclude=4$ -x ^p --exclude=v1 " "--exclude=util_test.trg --regexp".format(cmd_str)) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude everything using SQL LIKE " "pattern.".format(test_num)) cmd_opts = "{0} -x % ".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - exclude everything using REGEXP " "pattern.".format(test_num)) if os.name == 'posix': cmd_opts = "{0} -x '.*' --regexp".format(cmd_str) else: cmd_opts = '{0} -x ".*" --regexp'.format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Note: Unlike SQL LIKE pattern that matches the entire value, with a # SQL REGEXP pattern match succeeds if the pattern matches anywhere in # the value being tested. # See: http://dev.mysql.com/doc/en/pattern-matching.html test_num += 1 comment = ("Test case {0}a - SQL LIKE VS REGEXP pattern (match entire " "value VS match anywhere in value).".format(test_num)) cmd_opts = "{0} -x 1 -x t".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) comment = ("Test case {0}b - SQL LIKE VS REGEXP pattern (match entire " "value VS match anywhere in value).".format(test_num)) cmd_opts = "{0} -x 1 -x t --regexp".format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - try exclude everything without using " "pattern.".format(test_num)) if os.name == 'posix': cmd_opts = "{0} -x 'u*' ".format(cmd_str) else: cmd_opts = '{0} -x "u*" '.format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) test_num += 1 comment = ("Test case {0} - try exclude everything with using " "pattern and regexp.".format(test_num)) if os.name == 'posix': cmd_opts = "{0} -x 'u*' --regexp".format(cmd_str) else: cmd_opts = '{0} -x "u*" --regexp'.format(cmd_str) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) export_parameters_def.test._mask_csv(self) test_num += 1 comment = ("Test case {0} - exclude by name using --export=both." "".format(test_num)) cmd_opts = ("{0} --export=both " "--exclude=util_test.t1 --exclude=util_test.t2 " "--exclude=util_test.t3 --exclude=util_test.t4 " "".format(cmd_str)) res = self.run_test_case(0, cmd_opts, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) # Mask known source. self.replace_result("# Source on localhost: ... connected.", "# Source on XXXX-XXXX: ... connected.\n") self.replace_result("# Source on [::1]: ... connected.", "# Source on XXXX-XXXX: ... connected.\n") # Mask GTID warning when servers with GTID enabled are used self.remove_result("# WARNING: The server supports GTIDs but you") return True def get_result(self): return self.compare(__name__, self.results) def record(self): return self.save_result_file(__name__, self.results) def cleanup(self): return export_parameters_def.test.cleanup(self)
gpl-2.0
4,174,144,891,480,443,000
39.617925
79
0.560562
false
scottidler/gimport
gimport.py
1
6213
#!/usr/bin/env python2.7 # to use gimport: use wget or curl to download the gimport.py file locally #ie. os.system('wget -q https://github.com/scottidler/gimport/raw/master/gimport.py -O gimport.py') import os import re import imp import sys import contextlib from subprocess import Popen, PIPE sys.dont_write_bytecode = True class RepospecDecompositionError(Exception): ''' exception when repospec can't be decomposed ''' pass @contextlib.contextmanager def cd(*args, **kwargs): ''' helper change dir function to be used with 'with' expressions ''' mkdir = kwargs.pop('mkdir', True) verbose = kwargs.pop('verbose', False) path = os.path.sep.join(args) path = os.path.normpath(path) path = os.path.expanduser(path) prev = os.getcwd() if path != prev: if mkdir: run('mkdir -p %(path)s' % locals(), verbose=verbose) os.chdir(path) curr = os.getcwd() sys.path.append(curr) if verbose: print 'cd %s' % curr try: yield finally: if path != prev: sys.path.remove(curr) os.chdir(prev) if verbose: print 'cd %s' % prev def run(*args, **kwargs): ''' thin wrapper around Popen; returns exitcode, stdout and stderr ''' nerf = kwargs.pop('nerf', False) shell = kwargs.pop('shell', True) verbose = kwargs.pop('verbose', False) if (verbose or nerf) and args[0]: print args[0] if nerf: return (None, 'nerfed', 'nerfed') process = Popen(shell=shell, *args, **kwargs) stdout, stderr = process.communicate() exitcode = process.poll() if verbose and stdout: print stdout return exitcode, stdout, stderr def expand(path): ''' converts ~ -> /home/%{USER} ''' if path: return os.path.expanduser(path) def decompose(repospec, giturl=None): ''' decompoes repospec into giturl, sep, reponame and revision ''' pattern = r'(((((ssh|https)://)?([a-zA-Z0-9_.\-]+@)?)([a-zA-Z0-9_.\-]+))([:/]{1,2}))?([a-zA-Z0-9_.\-\/]+)@?([a-zA-Z0-9_.\-\/]+)?' match = re.search(pattern, repospec) if match: return match.group(2) or giturl, match.group(8), match.group(9), match.group(10) or 'HEAD' raise RepospecDecompositionError(repospec) def divine(giturl, sep, reponame, revision): ''' divines refname and commit from supplied args ''' r2c = {} # revisions to commits c2r = {} # commits to revisions result = run('git ls-remote %(giturl)s%(sep)s%(reponame)s' % locals(), stdout=PIPE)[1].strip() for line in result.split('\n'): commit, refname = line.split('\t') r2c[refname] = commit c2r[commit] = refname refnames = [ 'refs/heads/' + revision, 'refs/tags/' + revision, revision ] commit = None for refname in refnames: commit = r2c.get(refname, None) if commit: break if not commit: commit = revision return c2r.get(commit, None), commit def clone(giturl, sep, reponame, commit, cachepath, mirrorpath, versioning): ''' wraps clone command with mirroring and caching ''' mirror = '' if mirrorpath: mirror = '--reference %(mirrorpath)s/%(reponame)s.git' % locals() path = os.path.join(cachepath, reponame) repopath = reponame if versioning: repopath = os.path.join(repopath, commit) with cd(cachepath, mkdir=True): if not os.path.isdir(commit): run('git clone %(mirror)s %(giturl)s%(sep)s%(reponame)s %(repopath)s' % locals(), stdout=PIPE, stderr=PIPE) with cd(repopath): run('git clean -x -f -d', stdout=PIPE, stderr=PIPE) run('git checkout %(commit)s' % locals(), stdout=PIPE, stderr=PIPE) return os.path.join(cachepath, repopath) def rmtree(path, empties=False): ''' removes a folder path ''' try: if empties: run('rmdir ' + path) else: run('rm -rf ' + path) dpath = os.path.dirname(path) if dpath: return rmtree(dpath) return path except: return path def gimport(repospec, filepath, giturl=None, imports=None, cachepath='.gimport', mirrorpath=None, versioning=True, persist=False): ''' main function alows user to import code from a git url ''' cachepath = expand(cachepath) mirrorpath = expand(mirrorpath) giturl, sep, reponame, revision = decompose(repospec, giturl) _, commit = divine(giturl, sep, reponame, revision) path = clone(giturl, sep, reponame, commit, cachepath, mirrorpath, versioning) with cd(path): modname = os.path.splitext(os.path.basename(filepath))[0] module = imp.load_source(modname, filepath) if not persist: rmtree(path) if imports: return [module[import_] for import_ in imports] return module def main(): ''' only provided as an easy way to test module; usually used via import ''' try: import argparse except: print 'missing argparse; gimport.py can be used as a library without argparse installed' sys.exit(-1) parser = argparse.ArgumentParser() parser.add_argument( '--cachepath', default='.gimport', help='path to store all gimport cached files') parser.add_argument( '--mirrorpath', help='path to cached repos to support fast cloning') parser.add_argument( '--imports', nargs='+', help='list of imports') parser.add_argument( '--giturl', help='the giturl to be used with git clone') parser.add_argument( '--no-versioning', action='store_false', dest='versioning', help='turn versioning off; checkout in reponame rather than reponame/commit') parser.add_argument( 'repospec', help='repospec schema is giturl?reponame@revision?') parser.add_argument( 'filepath', help='the filepath inside the git repo') ns = parser.parse_args() print gimport(**ns.__dict__) sys.exit(0) if __name__ == '__main__': main()
mit
8,092,490,262,048,937,000
28.585714
133
0.599227
false
pybel/pybel
src/pybel/struct/query/query.py
1
6479
# -*- coding: utf-8 -*- """Query builder.""" import json import logging from typing import Dict, Iterable, List, Mapping, Optional, Set, TextIO, Union from .exc import QueryMissingNetworksError from .seeding import Seeding from ..pipeline import Pipeline from ...dsl import BaseEntity __all__ = [ 'Query', ] logger = logging.getLogger(__name__) class Query: """Represents a query over a network store.""" def __init__( self, network_ids: Union[None, int, Iterable[int]] = None, seeding: Optional[Seeding] = None, pipeline: Optional[Pipeline] = None, ) -> None: """Build a query. :param network_ids: Database network identifiers identifiers """ if not network_ids: self.network_ids = [] elif isinstance(network_ids, int): self.network_ids = [network_ids] elif isinstance(network_ids, Iterable): network_ids = list(network_ids) for network_id in network_ids: if not isinstance(network_id, int): raise TypeError(network_ids) self.network_ids = network_ids else: raise TypeError(network_ids) if seeding is not None and not isinstance(seeding, Seeding): raise TypeError('Not a Seeding: {}'.format(seeding)) self.seeding = seeding or Seeding() if pipeline is not None and not isinstance(pipeline, Pipeline): raise TypeError('Not a pipeline: {}'.format(pipeline)) self.pipeline = pipeline or Pipeline() def append_network(self, network_id: int) -> 'Query': """Add a network to this query. :param network_id: The database identifier of the network :returns: self for fluid API """ self.network_ids.append(network_id) return self def append_seeding_induction(self, nodes: Union[BaseEntity, List[BaseEntity], List[Dict]]) -> Seeding: """Add a seed induction method. :returns: seeding container for fluid API """ return self.seeding.append_induction(nodes) def append_seeding_neighbors(self, nodes: Union[BaseEntity, List[BaseEntity], List[Dict]]) -> Seeding: """Add a seed by neighbors. :returns: seeding container for fluid API """ return self.seeding.append_neighbors(nodes) def append_seeding_annotation(self, annotation: str, values: Set[str]) -> Seeding: """Add a seed induction method for single annotation's values. :param annotation: The annotation to filter by :param values: The values of the annotation to keep """ return self.seeding.append_annotation(annotation, values) def append_seeding_sample(self, **kwargs) -> Seeding: """Add seed induction methods. Kwargs can have ``number_edges`` or ``number_seed_nodes``. """ return self.seeding.append_sample(**kwargs) def append_pipeline(self, name, *args, **kwargs) -> Pipeline: """Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`. :param name: The name of the function :type name: str or types.FunctionType :return: This pipeline for fluid query building """ return self.pipeline.append(name, *args, **kwargs) def __call__(self, manager): """Run this query and returns the resulting BEL graph with :meth:`Query.run`. :param pybel.manager.Manager manager: A cache manager :rtype: Optional[pybel.BELGraph] """ return self.run(manager) def run(self, manager): """Run this query and returns the resulting BEL graph. :param manager: A cache manager :rtype: Optional[pybel.BELGraph] """ universe = self._get_universe(manager) graph = self.seeding.run(universe) return self.pipeline.run(graph, universe=universe) def _get_universe(self, manager): if not self.network_ids: raise QueryMissingNetworksError('can not run query without network identifiers') logger.debug('query universe consists of networks: %s', self.network_ids) universe = manager.get_graph_by_ids(self.network_ids) logger.debug('query universe has %d nodes/%d edges', universe.number_of_nodes(), universe.number_of_edges()) return universe def to_json(self) -> Dict: """Return this query as a JSON object.""" rv = { 'network_ids': self.network_ids, } if self.seeding: rv['seeding'] = self.seeding.to_json() if self.pipeline: rv['pipeline'] = self.pipeline.to_json() return rv def dump(self, file: TextIO, **kwargs) -> None: """Dump this query to a file as JSON.""" json.dump(self.to_json(), file, **kwargs) def dumps(self, **kwargs) -> str: """Dump this query to a string as JSON.""" return json.dumps(self.to_json(), **kwargs) @staticmethod def from_json(data: Mapping) -> 'Query': """Load a query from a JSON dictionary. :param data: A JSON dictionary :raises: QueryMissingNetworksError """ network_ids = data.get('network_ids') if network_ids is None: raise QueryMissingNetworksError('query JSON did not have key "network_ids"') seeding_data = data.get('seeding') seeding = ( Seeding.from_json(seeding_data) if seeding_data is not None else None ) pipeline_data = data.get('pipeline') pipeline = ( Pipeline.from_json(pipeline_data) if pipeline_data is not None else None ) return Query( network_ids=network_ids, seeding=seeding, pipeline=pipeline, ) @staticmethod def load(file: TextIO) -> 'Query': """Load a query from a JSON file. :raises: QueryMissingNetworksError """ return Query.from_json(json.load(file)) @staticmethod def loads(s: str) -> 'Query': """Load a query from a JSON string. :param s: A stringified JSON query :raises: QueryMissingNetworksError """ return Query.from_json(json.loads(s)) def __str__(self): return 'Query(networks={}, seeding={}, pipeline={})'.format(self.network_ids, self.seeding, self.pipeline)
mit
-7,717,907,304,301,928,000
30.759804
116
0.604569
false
rsennrich/nematus
nematus/translate.py
1
2461
#!/usr/bin/env python3 """Translates a source file using a translation model (or ensemble).""" import argparse import logging import tensorflow as tf from config import load_config_from_json_file import inference import model_loader import rnn_model from settings import TranslationSettings from transformer import Transformer as TransformerModel def main(settings): """ Translates a source language file (or STDIN) into a target language file (or STDOUT). """ # Start logging. level = logging.DEBUG if settings.verbose else logging.INFO logging.basicConfig(level=level, format='%(levelname)s: %(message)s') # Create the TensorFlow session. tf_config = tf.ConfigProto() tf_config.allow_soft_placement = True session = tf.Session(config=tf_config) # Load config file for each model. configs = [] for model in settings.models: config = load_config_from_json_file(model) setattr(config, 'reload', model) configs.append(config) # Create the model graphs and restore their variables. logging.debug("Loading models\n") models = [] for i, config in enumerate(configs): with tf.variable_scope("model%d" % i) as scope: if config.model_type == "transformer": model = TransformerModel(config) else: model = rnn_model.RNNModel(config) saver = model_loader.init_or_restore_variables(config, session, ensemble_scope=scope) models.append(model) # TODO Ensembling is currently only supported for RNNs, so if # TODO len(models) > 1 then check models are all rnn # Translate the source file. inference.translate_file(input_file=settings.input, output_file=settings.output, session=session, models=models, configs=configs, beam_size=settings.beam_size, nbest=settings.n_best, minibatch_size=settings.minibatch_size, maxibatch_size=settings.maxibatch_size, normalization_alpha=settings.normalization_alpha) if __name__ == "__main__": # Parse console arguments. settings = TranslationSettings(from_console_arguments=True) main(settings)
bsd-3-clause
-8,753,197,665,726,532,000
33.661972
80
0.607883
false
cgvarela/fileserver
kontalk/fileserver/auth.py
1
4332
# -*- coding: utf-8 -*- """Authentication utilities.""" """ Kontalk Fileserver Copyright (C) 2015 Kontalk Devteam <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from zope.interface import implements from twisted.web import iweb from twisted.cred import credentials, checkers, error, portal from twisted.python import failure from twisted.internet import defer from twisted.words.protocols.jabber import jid, sasl from gnutls.crypto import OpenPGPCertificate from OpenSSL.crypto import X509 import log import keyring class IKontalkCertificate(credentials.ICredentials): def check(fingerprint, kr, verify_cb=None): pass class KontalkCertificate(object): implements(IKontalkCertificate) def __init__(self, cert): self.cert = cert def check(self, fingerprint, kr, verify_cb=None): _jid = None fpr = None if isinstance(self.cert, OpenPGPCertificate): uid = self.cert.uid(0) _jid = jid.JID(uid.email) fpr = self.cert.fingerprint elif isinstance(self.cert, X509): fpr = keyring.verify_certificate(self.cert, kr) if fpr: pkey = kr.get_key(fpr) uid = pkey.uids[0] if uid: _jid = jid.JID(uid.email) fpr = kr.check_user_key(pkey, _jid.user) if not fpr: _jid = None if _jid: def _continue(userjid): return userjid def _error(reason): return None # deferred to check fingerprint against JID cache data if verify_cb: d = verify_cb(_jid, fpr) d.addCallback(_continue) d.addErrback(_error) return d else: return _jid return None class IKontalkToken(credentials.ICredentials): def check(fingerprint, kr, verify_cb): pass class KontalkToken(object): implements(IKontalkToken) def __init__(self, token, decode_b64=False): self.token = token self.decode_b64 = decode_b64 def check(self, fingerprint, kr, verify_cb): try: if self.decode_b64: data = sasl.fromBase64(self.token) else: data = self.token return kr.check_token(data) except: # TODO logging or throw exception back import traceback traceback.print_exc() log.debug("token verification failed!") class AuthKontalkChecker(object): implements(checkers.ICredentialsChecker) credentialInterfaces = IKontalkToken, IKontalkCertificate def __init__(self, fingerprint, kr, verify_cb=None): self.fingerprint = str(fingerprint) self.keyring = kr self.verify_cb = verify_cb def _cbTokenValid(self, userid): if userid: return userid else: return failure.Failure(error.UnauthorizedLogin()) def requestAvatarId(self, credentials): return defer.maybeDeferred( credentials.check, self.fingerprint, self.keyring, self.verify_cb).addCallback( self._cbTokenValid) class AuthKontalkTokenFactory(object): implements(iweb.ICredentialFactory) scheme = 'kontalktoken' def __init__(self, fingerprint, kr): self.fingerprint = fingerprint self.keyring = kr def getChallenge(self, request): return {} def decode(self, response, request): key, token = response.split('=', 1) if key == 'auth': return KontalkToken(token, True) raise error.LoginFailed('Invalid token')
gpl-3.0
7,967,290,889,403,946,000
27.12987
91
0.622345
false
killthekitten/kaggle-carvana-2017
find_bounding_boxes.py
1
2471
import numpy as np import pandas as pd from scipy import ndimage import os from params import args MARGIN = 64 def find_slices(mask_img): mask = mask_img > 100 label_im, nb_labels = ndimage.label(mask) # Find the largest connect component sizes = ndimage.sum(mask, label_im, range(nb_labels + 1)) mask_size = sizes < 50000 remove_pixel = mask_size[label_im] label_im[remove_pixel] = 0 labels = np.unique(label_im) label_im = np.searchsorted(labels, label_im) # Now that we have only one connect component, extract it's bounding box slice_y, slice_x = ndimage.find_objects(label_im == 1)[0] return slice_x, slice_y def find_bounding_boxes(): img_width = args.img_width img_height = args.img_height masks_dir = args.pred_mask_dir boxes = process_images(img_height, img_width, masks_dir) df = pd.DataFrame(boxes) df.to_csv("boxes.csv", header=['filename', 'y_start', 'y_end', 'x_start', 'x_end'], index=False) def process_images(img_height, img_width, masks_dir): boxes = [] for i, filename in enumerate(sorted(os.listdir(masks_dir))): mask_img = ndimage.imread(os.path.join(masks_dir, filename), mode='L') expanded = np.zeros((1280, 1920), dtype=mask_img.dtype) expanded[:, 1:-1] = mask_img mask_img = expanded slice_x, slice_y = find_slices(mask_img) # we should expand by at least 32px + ceil to closest divisible 32 x_start = max(slice_x.start - MARGIN, 0) x_end = min(slice_x.stop + MARGIN, img_width) y_start = max(slice_y.start - MARGIN, 0) y_end = min(slice_y.stop + MARGIN, img_height) bb_height = y_end - y_start bb_width = x_end - x_start if bb_width % MARGIN != 0: bb_width_expand = (bb_width // MARGIN + 1) * MARGIN x_start = min(x_start, max(0, x_start - MARGIN)) x_end = x_start + bb_width_expand if bb_height % MARGIN != 0: bb_height_expand = (bb_height // MARGIN + 1) * MARGIN y_start = min(y_start, max(0, y_start - MARGIN)) y_end = y_start + bb_height_expand assert (x_end - x_start) % MARGIN == 0 assert (y_end - y_start) % MARGIN == 0 boxes.append((filename[:-4] + ".jpg", y_start, y_end, x_start, x_end)) if i % 100 == 0: print("processed {} images".format(i)) return boxes if __name__ == '__main__': find_bounding_boxes()
mit
3,856,632,177,011,683,000
34.3
100
0.598543
false
saullocastro/pyNastran
pyNastran/converters/tecplot/test_tecplot.py
1
2061
import os from pyNastran.converters.tecplot.tecplot import read_tecplot from pyNastran.converters.tecplot.tecplot_to_nastran import tecplot_to_nastran_filename from pyNastran.converters.nastran.nastran_to_tecplot import nastran_to_tecplot, nastran_to_tecplot_filename import pyNastran pkg_path = pyNastran.__path__[0] model_path = os.path.join(pkg_path, 'converters', 'tecplot', 'models') nastran_path = os.path.join(pkg_path, '..', 'models') import unittest class TestTecplot(unittest.TestCase): def test_tecplot_01(self): tecplot_filename1 = os.path.join(model_path, 'ascii', 'point_fetri_2d_02.dat') tecplot_filename2 = os.path.join(model_path, 'ascii', 'point_fetri_2d_02.dat_out') tecplot = read_tecplot(tecplot_filename1) #tecplot.write_tecplot(tecplot_filename2, res_types=None, #is_points=True, adjust_nids=True) #os.remove(tecplot_filename2) def test_tecplot_02(self): nastran_filename1 = os.path.join(nastran_path, 'solid_bending', 'solid_bending.bdf') nastran_filename2 = os.path.join(nastran_path, 'solid_bending', 'solid_bending2.bdf') tecplot_filename = os.path.join(nastran_path, 'solid_bending', 'solid_bending.plt') tecplot = nastran_to_tecplot_filename(nastran_filename1, tecplot_filename) #tecplot.write_tecplot(tecplot_filename) #tecplot_to_nastran_filename(tecplot_filename, nastran_filename2) #os.remove(nastran_filename2) #os.remove(tecplot_filename) def _test_tecplot_02(self): nastran_filename1 = os.path.join(nastran_path, 'solid_bending', 'solid_bending.bdf') nastran_filename2 = os.path.join(nastran_path, 'solid_bending', 'solid_bending2.bdf') tecplot_filename = os.path.join(nastran_path, 'solid_bending', 'solid_bending.plt') tecplot = nastran_to_tecplot_filename(nastran_filename1, tecplot_filename) tecplot_to_nastran_filename(tecplot_filename, nastran_filename2) if __name__ == '__main__': # pragma: no cover unittest.main()
lgpl-3.0
-7,380,779,099,647,959,000
44.8
107
0.697234
false
Balannen/LSMASOMM
atom3/Kernel/UserInterface/popupMenuCreator.py
1
13756
""" popupMenuCreator.py This constructs context senstive menus that present only relevent information to the user depending on the state of the canvas. In order to make this fast & intuitive, most of the actual implementations of the menu elements have been pushed into another file. Created June 17, 2004 by Denis Dube """ from Tkinter import Menu, IntVar import time from popupMenuElements import * from OptionDialog import OptionDialog from Embedded_Images import Embedded_Images class PopupMenuCreator: def __init__(self, atom3i ): self.master = atom3i.parent self.atom3i = atom3i self.cb = atom3i.cb self.optionsDatabase = atom3i.optionsDatabase self.popupLogoPhotoimage = Embedded_Images().getPopupLogo() self.popupMenu = None self.event = None # --------------------------- Popup Utilities ------------------------------- def initilizePopupMenu( self, event ): """ Create a new popup menu """ if( self.popupMenu ): self.popupMenu.unpost() self.popupMenu = Menu(self.master , tearoff=0, bg = "white") self.event = event def showPopupMenu( self ): """ Display the popup menu """ if( self.popupMenu ): self.popupMenu.post(self.event.x_root, self.event.y_root) def swapMenu(self, menu ): """ This is a fix for a problem that no longer exists :p It essentially takes one menu and slaps another one in its place. """ raise Exception, "No one uses this method! But if you see this, maybe not so..." self.popupMenu.unpost() self.popupMenu = menu self.showPopupMenu() def popupRemover(self): """ Goodbye popup! """ if( self.popupMenu ): self.popupMenu.unpost() self.popupMenu = None # ---------------------- Context Sensitive Menus -------------------------- def NoCursorNoSelectPopup( self,event ): """ Popup menu to show when no items under the mouse, and no items selected """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addModelAction( self ) addSelectAll( self ) addPaste( self ) addUndo( self ) addRedo( self ) #......................... addSeperator( self ) #......................... addFileMenu( self ) addModelMenu( self ) addTransformationMenu( self ) addLayoutMenu( self ) addExportMenu( self ) #......................... addSeperator( self ) #......................... addOpenLastModel( self ) addOpenLastMetaModel(self) addSourcePath( self ) #......................... addSeperator( self ) #......................... addToggleSmoothMode( self ) #......................... addSeperator( self ) #......................... addExit( self ) self.showPopupMenu() def NoCursorMultiSelectPopup(self,event): """ Popup menu to show when no items under the mouse, and multiple items selected """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addLayoutMenu( self ) addResizeEntity( self ) addNodeLabelDragToggle( self ) #......................... addSeperator( self ) #......................... addSelectAll( self ) addDeselectAll( self ) #......................... addSeperator( self ) #......................... addCut( self ) addCopy( self ) addPaste( self ) #......................... addSeperator( self ) #......................... addUndo( self ) addRedo( self ) #......................... addSeperator( self ) #......................... addClear( self ) self.showPopupMenu() def EntityAtCursorMultiSelectPopup(self,event): """ A graphical entity is under the mouse cursor, along with multiple selected items """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addLayoutMenu( self ) addEditEntity( self ) addDragOverlap( self ) addDrawArrow( self ) addResizeEntity( self ) addNodeLabelDragToggle( self ) #......................... addSeperator( self ) #......................... addSelectAll( self ) addDeselectAll( self ) #......................... addSeperator( self ) #......................... addCut( self ) addCopy( self ) addPaste( self ) #......................... addSeperator( self ) #......................... addCopyAttributes( self ) addPasteAttributes( self ) #......................... addSeperator( self ) #......................... addUndo( self ) addRedo( self ) #......................... addSeperator( self ) #......................... addClear( self ) self.showPopupMenu() def EntityAtCursorNoSelectPopup(self,event): """ A graphical entity is under the mouse cursor, but no selected items """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addEditEntity( self ) addDragOverlap( self ) addDrawArrow( self ) addResizeEntity( self ) #......................... addSeperator( self ) #......................... addSelectAll( self ) addPaste( self ) #......................... addSeperator( self ) #......................... addCopyAttributes( self ) addPasteAttributes( self ) #......................... addSeperator( self ) #......................... addUndo( self ) addRedo( self ) self.showPopupMenu() def LinkAtCursorMultiSelectPopup(self,event): """ A graphical link/connection is under the mouse cursor, along with multiple selected items """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addLayoutMenu( self ) addEditEntity( self ) addDragOverlap( self ) addArrowEditor( self ) addResizeEntity( self ) addNodeLabelDragToggle( self ) #......................... addSeperator( self ) #......................... addSmoothSelected( self ) addToggleSmoothMode( self ) #......................... addSeperator( self ) #......................... addSelectAll( self ) addDeselectAll( self ) #......................... addSeperator( self ) #......................... addCut( self ) addCopy( self ) addPaste( self ) #......................... addSeperator( self ) #......................... addCopyAttributes( self ) addPasteAttributes( self ) #......................... addSeperator( self ) #......................... addUndo( self ) addRedo( self ) #......................... addSeperator( self ) #......................... addClear( self ) self.showPopupMenu() def LinkAtCursorNoSelectPopup(self,event): """ A graphical link/connection is under the mouse cursor, but there are no selected items """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addEditEntity( self ) addDragOverlap( self ) addArrowEditor( self ) #......................... addSeperator( self ) #......................... addSelectAll( self ) addToggleSmoothMode( self ) addPaste( self ) #......................... addSeperator( self ) #......................... addCopyAttributes( self ) addPasteAttributes( self ) #......................... addSeperator( self ) #......................... addUndo( self ) addRedo( self ) self.showPopupMenu() def ArrowEditorPopup(self,event): """ Menu for the arrow editor """ self.initilizePopupMenu( event ) addLogo( self ) #......................... addSeperator( self ) #......................... addEditEntity( self ) addInsertPoint( self ) addDeletePoint( self ) addSmoothSelected( self ) addNodeLabelMoveToggle( self ) #......................... addSeperator( self ) #......................... addArrowEditorExit( self ) self.showPopupMenu() # ----------------------- Popup a specific submenu ------------------------- def LayoutPopup(self,event): self.initilizePopupMenu( event ) self.popupMenu = self.atom3i.layoutMenu self.showPopupMenu() def ExportPopup(self,event): self.initilizePopupMenu( event ) self.popupMenu = self.atom3i.exportMenu self.showPopupMenu() def ModelPopup(self,event): self.initilizePopupMenu( event ) self.popupMenu = self.atom3i.modelMenu self.showPopupMenu() def TransformationPopup(self,event): self.initilizePopupMenu( event ) self.popupMenu = self.atom3i.transMenu self.showPopupMenu() def FilePopup(self,event): self.initilizePopupMenu( event ) self.popupMenu = self.atom3i.filemenu self.showPopupMenu() def LastModelPopup(self,event): self.initilizePopupMenu( event ) addOpenLastModelSubroutine( self, self.popupMenu ) self.showPopupMenu() def LastMetaModelPopup(self,event): self.initilizePopupMenu( event ) addOpenLastMetaModelSubroutine( self, self.popupMenu ) self.showPopupMenu() def SourcePathPopup(self,event): self.initilizePopupMenu( event ) addSourcePathSubroutine( self, self.popupMenu ) self.showPopupMenu() # ------------------------ String List to PopupMenu --------------------------------- def listChoicePopup(self, title, stringList, unused = None ): """ Creates a popup menu with radiobuttons labeled from the stringList. Returns the index of the label that was chosen. NOTE: choosing outside the popup implicitly chooses index 0 """ # Remove any existing popups first self.popupRemover() self.popupMenu = Menu(self.master , tearoff=0) integerVar = IntVar() self.popupMenu.add_command( label=title, command=self.popupRemover ) self.popupMenu.add_separator() i = 1 for label in stringList: self.popupMenu.add_radiobutton( label=label, variable=integerVar, value=i,indicatoron=False ) i += 1 # This gets the last known co-ordinates of the mouse :D # NOTE: We get co-ordinates in terms of canvas space, convert back into # screenspace first before using them... x,y = self.atom3i.cb.getLastClickCoord() dc = self.atom3i.cb.getCanvas() x,y = [x-dc.canvasx(0),y-dc.canvasy(0)] # These offsets place the menu just where I like it... x = int(x) +40 #+ 100 y = int(y) +40 #+ 20 # Posts the menu, and blocks program execution here on win32 only self.popupMenu.post( x,y ) # Blocks program execution (all platforms) & waits for integerVar to be updated # Not ideal: If we close the popup without selecting anything this will # wait forever and execution will never get anywhere beyond this point!!! # Moreover: AToM3 will not shutdown properly! #self.master.wait_variable( integerVar ) # THEORY: This will work whether or not the post() blocks or not # Practice: Works great on WinXP with Python 2.3 # Linux? while( 1 ): self.master.update() value = integerVar.get() # Hapiness, we got the value we wanted if( value > 0 ): return value # The user killed the popup! O_O elif( self.popupMenu == None ): return 0 # Unhapiness, the user avoided selecting anything elif( value == 0 ): self.popupMenu.unpost() self.popupMenu.post( x,y ) self.master.update() time.sleep( 0.4 ) return 0 # We won't get here, but just in case... def listChoicePopupAlternative(self, title, stringList, actionLabel ): """ OBSOLETE --- Delete this """ raise Exception, "No one uses this method! But if you see this, maybe not so..." """ optionList = [OptionDialog.BOOL_BUTTON_ENTRY,actionLabel] options = dict() optionOrder = list() for i in range(0,len(stringList)): options[i] = [False,optionList,stringList[i],''] optionOrder.append(i) i+=1 dialog = OptionDialog(self.master, title, options,optionOrder, grab = False, position = self.atom3i.cb.getLastClickCoordInRootCoords() ) if( dialog.isCanceled() ): return 0 options = dialog.getOptionsDatabase() i = 1 for option in optionOrder: if( options[option][0] ): return i i += 1 return 0 """
gpl-3.0
2,528,474,062,664,432,600
27.587097
112
0.509959
false
luksan/kodos
scripts/pyuicfix.py
1
1631
#!/usr/bin/env python # -*- coding: utf-8; mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; truncate-lines: 0 -*- # vi: set fileencoding=utf-8 filetype=python expandtab tabstop=4 shiftwidth=4 softtabstop=4 cindent: # :mode=python:indentSize=4:tabSize=4:noTabs=true: """ this should be invoked by a pyuic wrapper it looks for the arg after the -o cmd line flag which is used as the source AND destination file. """ #-----------------------------------------------------------------------------# # Built-in modules import sys import re #-----------------------------------------------------------------------------# filename = None args = sys.argv[1:] for i in range(len(args)): arg = args[i] if arg == '-o': filename = args[i+1] break if not filename: print("Error: could not extract filename from: {0}".format(args)) sys.exit(0) fp = open(filename, "r") pycode = fp.read() fp.close() # regex from Kodos (of course!) rx = re.compile(r"""self\.clearWState\(Qt\.WState_Polished\)""") repl = """try: self.clearWState(Qt.WState_Polished) except AttributeError: pass """ pycode = rx.sub(repl, pycode) rx = re.compile(r"""\.setAccel\((?P<tr>.*)""") pos = 0 while 1: m = rx.search(pycode, pos) if not m: break pos = m.end() tr = m.group(1) pycode = pycode[:m.start()] + \ ".setAccel(QKeySequence(" + \ tr + \ ")" + \ pycode[m.end():] fp = open(filename, "w") fp.write(pycode) fp.close() #-----------------------------------------------------------------------------#
gpl-2.0
-8,801,084,592,959,030,000
23.712121
112
0.514408
false
vkoukis/pymatryoshka
matryoshka/server.py
1
31315
#!/usr/bin/env python # # PyMatryoshka: A VXLAN-over-UDP agent # # Copyright (c) 2012 Vangelis Koukis <[email protected]>. # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """Matryoshka: A VXLAN-over-UDP agent""" import daemon import daemon.pidlockfile import errno import fcntl import logging import logging.handlers import os import pyinotify import select import socket import struct import sys import time from IPy import IP #from scapy.layers.l2 import Ether from signal import signal, siginterrupt, SIGTERM, SIGUSR1, SIGUSR2 from vxlan import VXLAN from tuntap import VirtualTap DEFAULT_MCASTIF = "" DEFAULT_BINDADDR = "" DEFAULT_BINDPORT = 3601 DEFAULT_STATEDIR = "/var/lib/matryoshka" DEFAULT_LOGDIR = "/var/log/matryoshka" DEFAULT_PIDFILE = "/var/run/matryoshka/matryoshka.pid" LOG_FILENAME = "matryoshka.log" LOG_FORMAT = "%(asctime)-15s %(levelname)-6s %(message)s" DEFAULT_MAC_TABLE_SIZE = 10000 class FileHandler(pyinotify.ProcessEvent): """Handle pyinotify events from watching the state directory.""" def __init__(self, server): pyinotify.ProcessEvent.__init__(self) self.server = server def process_IN_DELETE(self, event): """Handle deletion of file in the state directory. Whnever a file is deleted from the state directory, the server detaches itself from the associated virtual network. """ logging.debug("File %s deleted, detaching from virtual network", event.name) self.server.detach_from_network((os.path.join(event.path, event.name))) return def process_IN_CLOSE_WRITE(self, event): """Handle addition of file in the state directory. Whenever a file is added to the state directory, the server attaches itself to the associated virtual network. """ logging.debug("File %s added, attaching to virtual network", event.name) self.server.attach_to_network((os.path.join(event.path, event.name))) return class VirtualNetwork(object): """A virtual network with MAC-to-VTEP learning functionality""" def __init__(self, vni, macttl, mactablesize=DEFAULT_MAC_TABLE_SIZE): self._macs = {} self.socket = None self.targetips = [] self.vni = vni self.macttl = macttl self.mactablesize = mactablesize if not vni or not macttl: raise ValueError("vni and macttl arguments are mandatory") def __repr__(self): return "<vnet vni=0x%06X, macttl=%fs>" % (self.vni, self.macttl) def learn(self, mac, vtep): """Learn a new mac address on endpoint vtep. Learn a new mac address on endpoint vtep, return True if the mac address is a new entry, False if the mac address was already known, so the existing entry gets a refreshed ttl. """ now = time.time() existing = mac in self._macs if not existing and len(self._macs) >= self.mactablesize: # Trigger cleaning of stale entries self.gc() if len(self._macs) >= self.mactablesize: raise MemoryError("Mac table size limit of %d reached for %r" % (self.mactablesize, self)) self._macs[mac] = (vtep, now + self.macttl) return not existing def lookup(self, mac): """Lookup a MAC address, return VTEP if found, None otherwise""" now = time.time() entry = self._macs.get(mac, None) if not entry: return None if now > entry[1]: del self._macs[mac] # Remove stale entry return None return entry[0] def gc(self): """Do garbage collection, flush all expired entries in MAC table""" now = time.time() for m in self._macs.keys(): if now > self._macs[m][1]: del self._macs[m] def _parse_network_file(path, family): """Read virtual network information from file""" try: ifile = open(path, "r") except IOError as ioe: logging.error("Unable to open network file %s: %s", path, ioe) return None try: vals = {} lcnt = 0 for line in ifile: lcnt += 1 # Lines are of the form "key = val", keys are converted # to all lowercase, lines starting with '#' are ignored. if not line.strip() or line.strip().startswith("#"): continue (key, val) = [s.strip() for s in line.strip().split("=", 1)] vals[key.lower()] = val except ValueError as ve: logging.error("Cannot parse line %d in %s using 'key=val' format: %s", lcnt, path, ve) return None # Report on missing and unknown keys keys = ["tapname", "vni", "macttl", "targetip", "targetport"] unknown_keys = set(vals.keys()) - set(keys) missing_keys = set(keys) - set(vals.keys()) if unknown_keys: logging.error("Unknown keys specified in network file %s: %s", path, ", ".join(unknown_keys)) return None if missing_keys: logging.error("Required keys missing from network file %s: %s", path, ", ".join(missing_keys)) return None try: vals["vni"] = int(vals["vni"]) vals["macttl"] = float(vals["macttl"]) targetip = IP(vals["targetip"]) if (targetip.version() == 4 and family != socket.AF_INET or targetip.version() == 6 and family != socket.AF_INET6): msg = ("Cannot specify IPv%d IP in TARGETIP when" " using %s") % (targetip.version(), _family_name(family)) raise ValueError(msg) vals["targetip"] = str(targetip) vals["targetport"] = int(vals["targetport"]) except ValueError as ve: logging.error("Validation failed for fields in %s: %s", path, ve) return None if "tapname" in vals and vals["tapname"] != os.path.basename(path): logging.error("Network file %s refers to tap interface %s", path, vals["tapname"]) return None return vals def _mac_is_multicast(mac): return int(mac.split(":")[0], 16) & 1 == 1 def _ip_is_multicast(ip): ip = IP(ip) if ip.version() == 4: return ip in IP("224.0.0.0/4") else: return ip in IP("ff00::/8") def _family_name(family): d = {socket.AF_INET: "IPv4", socket.AF_INET6: "IPv6"} return d[family] def _join_mcast_group(s, addr, ifname): logging.debug("Socket %s joining multicast group %s on ifname '%s'", s.getsockname(), addr, ifname) # Set the TTL for outgoing IP multicast packets # A value of '1' means same subnet, see # http://tldp.org/HOWTO/Multicast-HOWTO-2.html. TTL = 1 optval = struct.pack("@B", TTL) if s.family == socket.AF_INET: s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, optval) else: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, optval) # Disable looping of locally originating packets LOOP = 0 optval = struct.pack("@B", LOOP) if s.family == socket.AF_INET: s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, optval) else: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_LOOP, optval) # Subscribe the socket to the IP multicast group on interface ifname mcast_packed = socket.inet_pton(s.family, addr) if s.family == socket.AF_INET: optval = mcast_packed + struct.pack("!II", socket.INADDR_ANY, _if_nametoindex(ifname)) s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, optval) else: optval = mcast_packed + struct.pack("!I", _if_nametoindex(ifname)) s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, optval) logging.debug("Socket %s joined multicast group %s on ifname '%s'", s.getsockname(), addr, ifname) def _leave_mcast_group(s, addr, ifname): logging.debug("Socket %s leaving multicast group %s on ifname '%s'", s.getsockname(), addr, ifname) # Unsubscribe socket from the IP multicast group mcast_packed = socket.inet_pton(s.family, addr) if s.family == socket.AF_INET: optval = mcast_packed + struct.pack("!II", socket.INADDR_ANY, _if_nametoindex(ifname)) s.setsockopt(socket.IPPROTO_IP, socket.IP_DROP_MEMBERSHIP, optval) else: optval = mcast_packed + struct.pack("!I", _if_nametoindex(ifname)) s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_LEAVE_GROUP, optval) logging.debug("Socket %s left multicast group %s on ifname '%s'", s.getsockname(), addr, ifname) # Use ctypes to access libC's if_nametoindex(). # We need if_nametoindex() to get the network interface index # to pass to IP_MULTICAST_IF/IPV6_MULTICAST_IF socket options. from ctypes import CDLL _libc = CDLL("libc.so.6") def _if_nametoindex(ifname): if not ifname: return 0 i = _libc.if_nametoindex(ifname) if not i: raise ValueError("Invalid network interface name %s" % ifname) return i def _get_bound_udp_socket(family, addr, port, mcastif): """Get a UDP socket of the requested family. The socket is IPv4/IPv6 based on the value of family, bound to addr:port. If addr=None, the socket is bound to 0.0.0.0, or ::, for IPv4 and IPv6 respectively. If mcastif is set, outgoing multicast traffic is sent over the network interface with name mcastif on the local host, e.g. eth0. The socket is also set to allow IP broadcasting. """ if not addr: addr = "0.0.0.0" if family == socket.AF_INET else "::" try: ip = IP(addr) except ValueError: logging.error("Not a valid IPv4 or IPv6 address: %s", addr) return None if (ip.version() == 4 and family != socket.AF_INET or ip.version() == 6 and family != socket.AF_INET6): logging.error("Cannot bind to an IPv%d address when using %s", ip.version(), _family_name(family)) return None try: s = socket.socket(family, socket.SOCK_DGRAM, 0) if family == socket.AF_INET6: # Only bind for IPv6 traffic when using an IPv6 socket s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) s.bind((addr, port)) except socket.error as msg: logging.error("Could not bind %s UDP socket on %s, port %d: %s", _family_name(family), addr, port, msg) s.close() return None # Allow sending UDP datagrams to broadcast addresses try: s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) except Exception as msg: logging.error("Could not set the SO_BROADCAST flag on socket: %s", msg) s.close() return None # Set the outgoing interface for multicast traffic try: ifindex = _if_nametoindex(mcastif) if family == socket.AF_INET6: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, struct.pack("!I", ifindex)) else: s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, struct.pack("!III", 0, 0, ifindex)) except Exception as msg: logging.error("Failed to set multicast interface to '%s': %s", mcastif, msg) s.close() return None # Set the socket in non-blocking mode fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK) return s def sigterm_handler(signum, stack_frame): assert signum == SIGTERM logging.info("Caught SIGTERM, terminating...") raise SystemExit sigusr1_proxy = None tracing = None def sigusr12_handler(signum, stack_frame): global tracing assert signum == SIGUSR1 or signum == SIGUSR2 if signum == SIGUSR1: logging.info("Caught SIGUSR1. Showing currnet proxy state:") sigusr1_proxy.log_state() return if signum == SIGUSR2: tracing = not tracing logger = logging.getLogger() logger.setLevel(logging.DEBUG if tracing else logging.INFO) logging.info("Caught SIGUSR2, %s tracing" % ("enabling" if tracing else "disabling")) return class VXLANProxy(object): """The main class implementing the Matryoshka VXLAN proxy.""" def _create_vnet(self, vni, macttl): vn = VirtualNetwork(vni=vni, macttl=macttl) if vni in self.vnet_vni_map: raise ValueError("VNI %s already in use for vnet %r", self.vnet_vni_map[vni]) self.vnet_vni_map[vni] = vn return vn def _vnet_from_vni(self, vni): return self.vnet_vni_map[vni] def _remove_vnet(self, vnet): msg = "Removed vnet %r" % vnet del self.vnet_vni_map[vnet.vni] del vnet logging.info(msg) def _attach_to_network(self, tapname, vni, macttl, targetip, targetport): if tapname in self.vnet_tapname_map: vnet = self.vnet_tapname_map[tapname] msg = ("Ignoring network addition request for tapname %s," " already in use for vnet %r" % (tapname, vnet)) raise ValueError(msg) tap = VirtualTap(name=tapname) tap.open() self.taps.append(tap) # Set tap in non-blocking mode fcntl.fcntl(tap, fcntl.F_SETFL, os.O_NONBLOCK) vn = self._create_vnet(vni=vni, macttl=macttl) vn.targets = [(targetip, int(targetport))] vn.socket = self.socket vn.tap = tap tap.vnet = vn self.vnet_tapname_map[tapname] = vn for t in vn.targets: if _ip_is_multicast(t[0]): _join_mcast_group(vn.socket, t[0], self.mcastif) logging.info("Joined new network, vnet %r over tap %r", tap.vnet, tap) def _detach_from_network(self, tapname): try: vnet = self.vnet_tapname_map[tapname] except KeyError: logging.error("Ignoring request to detach from unknown tap %s", tapname) return for t in vnet.targets: if _ip_is_multicast(t[0]): _leave_mcast_group(vnet.socket, t[0], self.mcastif) del self.vnet_tapname_map[tapname] self._close_tap(vnet.tap) self._remove_vnet(vnet) def _close_tap(self, tap): logging.debug("Closing tap %r", tap) tap.close() self.taps.remove(tap) del tap def _handle_incoming_frame(self, tap): """Handle reception of incoming Ethernet frame on tap iface.""" vnet = tap.vnet logging.debug("Incoming frame on tap %r, vnet %r", tap, vnet) frame = os.read(tap.fileno(), 10000) if not frame: logging.error("EOF on read, removing tap %r", tap) self._close_tap(tap) return # TODO: Learn source mac. If it's a new MAC, # broadcast the packet to all VTEPs, to force MAC table # update on migrations. # build VXLAN-encapsulated packet #ether = Ether(frame) #packet = VXLAN(VNI=vnet.vni) / ether vx = VXLAN(frame=frame, vni=vnet.vni) # lookup vtep address for target dst MAC, # broadcast to all known targets if it's a multicast MAC. targets = [None] if not _mac_is_multicast(vx.dst_mac): targets = [vnet.lookup(vx.dst_mac)] if targets[0] is None: targets = vnet.targets # send it over UDP # TODO: Hash ether's headers to get source UDP address s = vnet.socket for t in targets: buf = str(vx) logging.debug("Sending VXLAN packet of %d bytes to peer %s", len(buf), t) # TODO: Set O_NONBLOCK everywhere, report EAGAIN errors s.sendto(buf, t) def _handle_incoming_packet(self, s): """Handle reception of encapsulated Ethernet frame on UDP socket.""" logging.debug("Incoming packet on socket %s", s.getsockname()) (packet, srcvtep) = s.recvfrom(10000) if not packet: logging.error("Received zero-length packet from %s?!", srcvtep) return logging.debug("Incoming packet of length %d from %s", len(packet), srcvtep) try: # vxlan = VXLAN(packet) # vni = vxlan.VNI vx = VXLAN(packet=packet) vni = vx.vni except Exception as e: logging.error("Dropping malformed non-VXLAN packet: %s", e) return try: vnet = self._vnet_from_vni(vni) except KeyError: logging.error("Dropping packet with unknown VNI = %d", vni) return logging.debug("Incoming packet from %s, len = %d for vnet = %r", srcvtep, len(packet), vnet) # ether = vxlan.getlayer(Ether) #logging.debug("Ether MACs: dst = %s, src = %s", ether.dst, ether.src) logging.debug("Ether MACs: dst = %s, src = %s", vx.dst_mac, vx.src_mac) if _mac_is_multicast(vx.src_mac): # Drop frames with multicast address as Ethernet source MAC. # # IEEE 802.3-2002, Section 3.2.3(b) says I/G (multicast) bit is # reserved for Ethernet src MACs, see # http://standards.ieee.org/getieee802/download/802.3-2002.pdf # # Also useful: # RFC 1812, Section 3.3.2 says a router MUST not believe any ARP # reply that claims that the Link Layer address of another host or # router is a broadcast or multicast address, but the MS load # balancer violates this rule. logging.warning("Dropping inner Ethernet frame with multicast src") return else: logging.debug("About to learn source MAC %s, endpoint %s", vx.src_mac, srcvtep) try: wasnew = vnet.learn(vx.src_mac, srcvtep) logging.debug("MAC was %s for vnet %r", 'new' if wasnew else 'known', vnet) except MemoryError: logging.debug("Could not learn MAC, table for %r full", vnet) try: logging.debug("Writing Ethernet frame of length %d to fd %d", len(vx.frame), vnet.tap.fileno()) # TODO: Set O_NONBLOCK everywhere n = os.write(vnet.tap.fileno(), vx.frame) if n != len(vx.frame): logging.warning("Short write: %d != %d to tap %r for vnet %r", n, len(vx.frame), vnet.tap, vnet) except Exception as e: logging.error("Error writing frame to tap %r for vnet %r: %s", vnet.tap, vnet, e) def __init__(self, family=socket.AF_INET, bindaddr=DEFAULT_BINDADDR, bindport=DEFAULT_BINDPORT, mcastif=DEFAULT_MCASTIF, statedir=DEFAULT_STATEDIR): self.taps = [] self.sockets = [] self.vnet_vni_map = {} self.vnet_tapname_map = {} self.family = family self.bindaddr = bindaddr self.bindport = bindport self.mcastif = mcastif self.statedir = statedir self.wm = pyinotify.WatchManager() mask = pyinotify.EventsCodes.ALL_FLAGS["IN_DELETE"] mask |= pyinotify.EventsCodes.ALL_FLAGS["IN_CLOSE_WRITE"] self.notifier = pyinotify.Notifier(self.wm, FileHandler(self)) wdd = self.wm.add_watch(self.statedir, mask, rec=True) if wdd[self.statedir] < 0: raise Exception("Could not watch state directory %s" % self.statedir) # Allocate a single listening UDP socket. self.socket = _get_bound_udp_socket(self.family, self.bindaddr, self.bindport, self.mcastif) if not self.socket: raise Exception("Could not get bound UDP socket") self.sockets.append(self.socket) def attach_to_network(self, path): """Attach to a new virtual network, get parameters from path. The basename of the path is used as the name of the tap interface used to attach to the virtual network on the local host. """ logging.info("Attaching to network for file %s", path) tapname = os.path.basename(path) info = _parse_network_file(path, self.family) if not info: logging.error("Ignoring network file %s due to errors", path) return if tapname != info["tapname"]: raise ValueError("filename of %s does not match TAPNAME=%s" % (tapname, info["tapname"])) self._attach_to_network(**info) def detach_from_network(self, path): """Detach from a virtual network. The basename of the path is used as the name of the tap interface used to determine which network to detach from. """ logging.info("Detaching from network for file %s", path) tapname = os.path.basename(path) self._detach_from_network(tapname) def log_state(self): s = ["%s" % str(sock.getsockname()) for sock in self.sockets] t = ["%r %r" % (tap, tap.vnet) for tap in self.taps] logging.info("Current set of open sockets, %d entries: %s", len(s), ", ".join(s)) logging.info(("Current set of tap interfaces, and associated virtual" " networks, %d entries: %s"), len(t), ", ".join(t)) logging.info("Current mapping of VNIs to virtual networks: %s", repr(self.vnet_vni_map)) logging.info(("Current mapping of tap interface names to virtual" " networks: %s"), repr(self.vnet_tapname_map)) logging.info("MAC tables per virtual network:") for v in self.vnet_vni_map.keys(): vnet = self.vnet_vni_map[v] logging.info("vnet %r: %r", vnet, vnet._macs) def serve(self): # Cheat: get pyinotify Watch Manager's fd directly wmfd = self.wm._fd while True: # Before blocking on select(), process any pyinotify # events which may have been queued up by previous # invocations of serve(), but may have been left # unprocessed due to premature termination of this method, # if exceptions were thrown. logging.debug("processing any left-over pyinotify events") self.notifier.process_events() logging.debug("Waiting for input from %d sockets, %d taps", len(self.sockets), len(self.taps)) try: rdset = self.sockets + self.taps + [wmfd] rfds, wfds, excfds = select.select(rdset, [], []) except select.error as e: if e[0] == errno.EINTR: continue logging.debug("Woke up after select, r = (%s, %s, %s)", rfds, wfds, excfds) for fd in rfds: assert fd in rdset assert not wfds assert not excfds for fd in rfds: if fd in self.sockets: logging.debug("Socket fd %d ready after select", fd.fileno()) self._handle_incoming_packet(fd) if fd in self.taps: logging.debug("Tap fd %d ready after select", fd.fileno()) self._handle_incoming_frame(fd) if fd == wmfd: self.notifier.read_events() self.notifier.process_events() def parse_arguments(args): from argparse import ArgumentParser, RawDescriptionHelpFormatter description = \ ("Matryoshka is a VXLAN encapsulation agent, and implements a VXLAN\n" "Virtual Tunnel Endpoint (VTEP). It performs two main functions:\n" "a) it receives Ethernet frames from local tap ifaces, encapsulates\n" " them in VXLAN packets with a proper Virtual Network ID (VNI), \n" " and forwards them to the right VTEP based on destination MAC,\n" "b) it listens to a UDP port, receiving VXLAN-encapsulated Ethernet\n" " frames, which it then forwards to the proper local tap device\n" " based on the VNI of the incoming packet.\n\n" "Matryoshka watches a state directory for requests\n" "to attach and detach from virtual networks dynamically.") parser = ArgumentParser(description=description, formatter_class=RawDescriptionHelpFormatter) parser.add_argument("-p", "--port", action="store", dest="bindport", default=DEFAULT_BINDPORT, metavar="PORT", help=("Bind to UDP port PORT, default is %d" % DEFAULT_BINDPORT)) parser.add_argument("-6", "--ipv6", action="store_const", dest="ipfamily", default=socket.AF_INET, const=socket.AF_INET6, help="Run over IPv6, default is to run over IPv4") parser.add_argument("-i", "--mcastif", action="store", dest="mcastif", default=DEFAULT_MCASTIF, metavar="IFNAME", help=("Send outgoing multicast datagrams, and join" " multicast groups over the interface with name" " IFNAME (e.g., eth0) on the local host. If not" " specified, multicast traffic goes over the" " default interface for the system.")) parser.add_argument("--bindaddr", action="store", dest="bindaddr", default=DEFAULT_BINDADDR, metavar="ADDRESS", help=("Bind to host interface with address ADDRESS," " default is to bind to 0.0.0.0 or to ::, for" " IPv4/IPv6 respectively. Warning: Do not bind" " if you will be using broadcast or multicast" " target addresses.")) parser.add_argument("-s", "--statedir", action="store", dest="statedir", default=DEFAULT_STATEDIR, metavar="DIRECTORY", help=("Watch DIRECTORY for virtual network bindings," " default is %s" % DEFAULT_STATEDIR)) parser.add_argument("--pidfile", action="store", dest="pidfile", default=DEFAULT_PIDFILE, metavar="PIDFILE", help=("Write the PID to PIDFILE if daemonizing," " default is %s" % DEFAULT_PIDFILE)), parser.add_argument("-d", "--debug", action="store_true", dest="debug", default=False, help="Turn on debugging messages") parser.add_argument("-l", "--logging-dir", action="store", dest="logdir", default=DEFAULT_LOGDIR, metavar="DIRECTORY", help=("Store logfile %s in DIRECTORY, default is %s" % (LOG_FILENAME, DEFAULT_LOGDIR))) parser.add_argument("-f", "--foreground", action="store_false", dest="daemonize", default=True, help="Stay in the foreground and do not daemonize") return parser.parse_args(args) def main(): global tracing opts = parse_arguments(sys.argv[1:]) tracing = opts.debug logger = logging.getLogger() logger.setLevel(logging.DEBUG if opts.debug else logging.INFO) if opts.daemonize: logfile = os.path.join(opts.logdir, LOG_FILENAME) handler = logging.handlers.RotatingFileHandler(logfile, maxBytes=1048576) else: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(LOG_FORMAT)) logger.addHandler(handler) if opts.daemonize: pidfile = daemon.pidlockfile.TimeoutPIDLockFile(opts.pidfile, 10) d = daemon.DaemonContext(pidfile=pidfile, stdout=handler.stream, stderr=handler.stream, files_preserve=[handler.stream]) d.umask = 0022 d.open() logging.info("Starting matryoshka...") proxy = VXLANProxy(family=opts.ipfamily, bindaddr=opts.bindaddr, bindport=opts.bindport, statedir=opts.statedir) # Touch every single file in state dir, to trigger additions logging.info("Touching all files under %s, to trigger network additions", opts.statedir) try: for dirpath, dirnames, filenames in os.walk(opts.statedir): for fname in filenames: path = os.path.join(dirpath, fname) open(path, 'a').close() except Exception as msg: logging.error("Caught exception while touching files in %s: %s", opts.statedir, msg) logging.info("Dropping privileges, setting capabilities, switching uid") # TODO: Drop all privileges # Handle SIGTERM, SIGUSR1, do not interrupt system calls global sigusr1_proxy sigusr1_proxy = proxy signal(SIGTERM, sigterm_handler) siginterrupt(SIGTERM, False) signal(SIGUSR1, sigusr12_handler) siginterrupt(SIGUSR1, False) signal(SIGUSR2, sigusr12_handler) siginterrupt(SIGUSR2, False) logging.info("Watching state directory %s", opts.statedir) while True: try: logging.info("Entering proxy request servicing loop") proxy.serve() except ValueError as ve: logging.error("Caught exception: Invalid input values: %s", ve) logging.info("Resuming main request loop") except Exception: logging.exception("Caught unexpected exception, text follows") logging.info("Resuming main request loop in 1s") time.sleep(1) logging.info("Exiting matryoshka...") return 0 if __name__ == "__main__": sys.exit(main())
gpl-2.0
-2,801,750,069,125,500,000
37.517835
79
0.580457
false
skitazaki/python-clitool
clitool/__init__.py
1
2822
# -*- coding: utf-8 -*- """ =================================== Command Line Tool Utilities =================================== * One line argument parsing function and decorator * Simple configuration loader * Stream utility with some logging * CSV reader/writer unicode support for Python 2.x (in official document) * Apache accesslog parser Requirements ============ * Python 2.7 or 3.x Python 2.4, 2.5, 2.6 are not supported. Install ======= Use ``pip`` via PyPI. :: pip install clitool Bootstrap ========= At first, create your script file using module script, ``clitool.cli``. :: $ python -m clitool.cli -o your-script.py This file can parse basic command line options and arguments. :: $ ./your-script.py --help usage: your-script.py [-h] [-c FILE] [-o FILE] [--basedir BASEDIR] [--input-encoding INPUT_ENCODING] [--output-encoding OUTPUT_ENCODING] [--processes PROCESSES] [--chunksize CHUNKSIZE] [-v | -q] [FILE [FILE ...]] positional arguments: FILE optional arguments: -h, --help show this help message and exit -c FILE, --config FILE configuration file -o FILE, --output FILE output file --basedir BASEDIR base directory --input-encoding INPUT_ENCODING encoding of input source --output-encoding OUTPUT_ENCODING encoding of output distination --processes PROCESSES count of processes --chunksize CHUNKSIZE a number of chunks submitted to the process pool -v, --verbose set logging to verbose mode -q, --quiet set logging to quiet mode Edit this script on your own :D Examples ======== Example scripts exist in git repository. * csv2db.py: read csv data and import database via 'SQLAlchemy'. * csv2gexf.py: read csv data and dump them by GEXF format via 'NetworkX'. * csv2json.py: read csv data and dump them by JSON format. * csv2kml.py: read csv data and dump them by KML format via 'simplekml'. * logfile.py: parse Apache access log and create report. * logparams.py: parse Apache access log and analyze query parameters. """ __title__ = 'clitool' __version__ = '0.4.1' __author__ = 'KITAZAKI Shigeru' # Constant values. RUNNING_MODE_ENVKEY = 'PYTHON_CLITOOL_ENV' DEFAULT_ENCODING = 'utf-8' DEFAULT_RUNNING_MODE = 'development' PROCESSING_REPORTING_INTERVAL = 10000 PROCESSING_SUCCESS = 'success' PROCESSING_SKIPPED = 'skipped' PROCESSING_ERROR = 'error' PROCESSING_TOTAL = 'total' PROCESSING_TIME = 'time' # vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :
apache-2.0
1,118,717,363,739,956,000
25.87619
76
0.597449
false
cjaymes/pyscap
src/scap/model/ocil_2_0/ArtifactResultType.py
1
1633
# Copyright 2016 Casey Jaymes # This file is part of PySCAP. # # PySCAP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PySCAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PySCAP. If not, see <http://www.gnu.org/licenses/>. from scap.Model import Model import logging logger = logging.getLogger(__name__) class ArtifactResultType(Model): MODEL_MAP = { 'elements': [ # children of artifact_value tag # TODO: at least one of *_artifact_value {'tag_name': 'text_artifact_value', 'class': 'TextArtifactValueElement', 'min': 0, 'max': 1}, {'tag_name': 'binary_artifact_value', 'class': 'BinaryArtifactValueElement', 'min': 0, 'max': 1}, {'tag_name': 'reference_artifact_value', 'class': 'ReferenceArtifactValueElement', 'min': 0, 'max': 1}, {'tag_name': 'provider', 'type': 'ProviderValuePattern', 'min': 1, 'max': 1}, {'tag_name': 'submitter', 'class': 'UserType', 'min': 1, 'max': 1}, ], 'attributes': { 'artifact_ref': {'type': 'ArtifactIDPattern', 'required': True}, 'timestamp': {'type': 'DateTimeType', 'required': True}, } }
gpl-3.0
-1,167,776,288,768,710,700
43.135135
115
0.643601
false
Alwnikrotikz/open-hea
src/openhea/importdata.py
1
9848
import os, sys from datetime import date import xlrd from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine from pprint import PrettyPrinter from model.config_parser import OpenHEAConfig from model.mapper import LivelihoodZone pp = PrettyPrinter() def transpose(grid): return zip(*grid) def removeBlankRows(grid): return [list(row) for row in grid if any(row)] def removeBlankRowsAndColumns(grid): return removeBlankRows(transpose(removeBlankRows(transpose(grid)))) def sheetToGrid(sheet): grid = [] for rownum in range(sheet.nrows): grid.append(sheet.row_values(rownum)) return grid def sheetToGridNoBlank(sheet): return removeBlankRowsAndColumns(sheetToGrid(sheet)) def testPrint(obj): if test_print: pp.pprint(obj) class DataImporter: def __init__(self, spreadsheet): # database session config_file = os.path.join(os.path.dirname(__file__), 'openhea.cfg') self.config = OpenHEAConfig() read = self.config.read(config_file) if len(read) != 1: print 'Need openhea.cfg setup with database parameters' sys.exit(1) cs = self.config.sqlalchemy_connection_string() engine = create_engine(cs, echo=True) Session = sessionmaker(bind=engine) self.session = Session() # open workbook self.workbook = xlrd.open_workbook(spreadsheet) def saveSiteData(self): """Expecting site_data to look like: [ ['One sample point', ''] ['Country name', 'Namibia'] ['LZ name', 'Caprivi Lowland Maize and Cattle Zone'] ['Name of village or settlement', 'avillage'] ['Interview date', '40321.0'] ['Interviewer name', 'James Acidri'] ['Interview number', '1'] ['Start of reference/ consumption year', 'March'] ] Note that the date being a number is an Excel thing - see code for how we deal with it """ site_data = sheetToGridNoBlank(self.workbook.sheet_by_index(0)) testPrint(site_data) DATACOL = 1 project = site_data[1][DATACOL] livelihoodzone = site_data[2][DATACOL] # TODO: not currently in database schema #village_name = site_data[3][DATACOL] date_tuple = xlrd.xldate_as_tuple(site_data[4][DATACOL], self.workbook.datemode) datecreated = date(date_tuple[0], date_tuple[1], date_tuple[2]) createdby = site_data[5][DATACOL] # TODO: not currently in database schema #interview_number = site_data[6][DATACOL] consumptionyearbegins = site_data[7][DATACOL] lz = Livelihoodzone( livelihoodzone=livelihoodzone, createdby=createdby, datecreated=datecreated, consumptionyearbegins=consumptionyearbegins) self.session.add(lz) self.session.commit() def saveExpenditureData(self): """Expecting expenditure_data to look like: [ ['', '', '', '', '', u'HOUSEHOLD EXPENDITURE', '', '', '', '', '', '', '', '', ''], ['', '', '', u'WG1 Lower', u'WG1 upper', u'WG2 Lower', u'WG2 Upper', u'WG3 Lower', u'WG3 Upper', u'WG4 Lower', u'WG4 Upper', u'WG1', u'WG2', u'WG3', u'WG4'], [u'Category', u'Food type', u'Unit', '', '', '', u'No. Units purchased', '', '', '', '', '', u'Price per unit', '', ''], [u'Staple food', u'Maize meal', u'Kg', 390.0, 390.0, 208.0, 208.0, 182.0, 182.0, 97.5, 97.5, 4.0, 4.0, 4.0, 4.0], [u'Non-staple food', u'Sugar', u'Kg', 11.0, 11.0, 22.0, 22.0, 36.0, 36.0, 18.0, 18.0, 7.5, 11.0, 6.0, 8.5], ['', '', '', '', '', '', u'Annual expenditure', '', '', '', '', '', '', '', ''], [u'Household items', u'Candles', u'N$', 92.0, 92.0, 95.0, 95.0, 164.0, 164.0, 235.0, 235.0, '', '', '', ''], ['', u'Soap/Vaseline', u'N$', 258.0, 258.0, 480.0, 480.0, 326.0, 326.0, 597.0, 597.0, '', '', '', ''], ['', u'Kerosine', u'N$', 0.0, 0.0, 0.0, 0.0, 140.0, 140.0, 360.0, 360.0, '', '', '', ''], [u'Essential inputs', u'Tools', '', 200.0, 200.0, 200.0, 200.0, 300.0, 300.0, 300.0, 300.0, '', '', '', ''] ] """ expenditure_data = sheetToGridNoBlank(self.workbook.sheet_by_index(3)) testPrint(expenditure_data) # check the first row is what we expect title_row = expenditure_data.pop(0) stripped_title_row = [x for x in title_row if x != ''] assert len(stripped_title_row) == 1 wealth_groups = {} expenditure = [] standard_of_living = [] wg_row = expenditure_data.pop(0) for index, col in enumerate(wg_row): if col.lower().endswith('lower') and wg_row[index+1].lower().endswith('upper'): wealth_group_name = col.split()[0] assert wealth_group_name.lower() == wg_row[index+1].lower().split()[0] wealth_groups[wealth_group_name] = { 'lower_col': index, 'upper_col': index+1, } expenditure_data.pop(0) data = expenditure category = '' for row in expenditure_data: strip_row = [x for x in row if x != ''] # change data dictionary after "Annual Expenditure" if len(strip_row) == 1 and strip_row[0].lower().startswith('annual'): data = standard_of_living continue # cache category - it is often not repeated, so we will keep using the same # value until it changes datadict = {} if row[0]: category = row[0] datadict['category'] = category datadict['type'] = row[1] datadict['unit'] = row[2] for wg in wealth_groups.keys(): datadict[wg] = { 'lower': row[wealth_groups[wg]['lower_col']], 'upper': row[wealth_groups[wg]['upper_col']], } data.append(datadict) def saveWealthGroupAssetsData(self): """Expecting wgassets_data to look like: [['', u'Wealth group characteristics', '', '', '', '', '', '', '', '', '', '', '', ''], ['', '', '', u'WG1', u'WG2', u'WG3', u'WG4', u'WG5 etc', '', '', '', '', '', ''], ['', u'Wealth group name', '', u'very poor', u'poor ', u'middle ', u'better off', '', '', '', '', '', '', ''], ['', u'Percent in wealth group', '', 0.31, 0.39, 0.22, 0.08, '', '', '', '', '', '', ''], ['', u'Number of people in household', '', 7.0, 7.0, 6.0, 5.0, '', '', '', '', '', '', ''], ['', u'Wealth group rank 1= poorest', '', 1.0, 2.0, 3.0, 4.0, '', '', '', '', '', '', ''], [u'ASSETS', '', '', '', '', u'Asset holdings', '', '', '', '', '', '', u'Asset price', ''], ['', '', '', u'WG1', u'WG1', u'WG2', u'WG2', u'WG3', u'WG3', u'WG4 ', u'WG4', u'WG5 etc', '', ''], ['', '', '', u'Lower', u'Upper', u'Lower', u'Upper', u'Lower', u'Upper', u'Lower', u'Upper', '', u'Lower', u'Upper'], [u'Category', u'Asset Type', u'Unit', '', '', '', '', '', '', '', '', '', '', ''], [u'Land', u'Upland', u'Acre', 1.25, 2.0, 2.5, 3.0, 20.0, 25.0, 22.5, 30.0, '', '', ''], ['', u'Owned Irrigated', u'Acre', 0.5, 1.0, 1.0, 2.0, 1.0, 2.0, '', '', '', '', ''], [u'Trees', u'Mango', u'Item', 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 5.0, '', '', ''], [u'Other tradeable goods', u'Cell phone', u'Item', 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, '', '', ''], ['', u'Ox plough', u'Item', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, '', '', ''], ['', u'Livestock assets setup missing', '', '', '', '', '', '', '', '', '', '', '', ''], [u'Livestock', u'Cattle', u'Item', 0.0, 5.0, 3.0, 6.0, 7.0, 14.0, 35.0, 87.0, '', 300.0, 300.0], ['', u'Goats', u'Item', 0.0, 6.0, 7.0, 11.0, 12.0, 23.0, 20.0, 22.0, '', 50.0, 50.0], ['', u'?cash?foodstocks', '', '', '', '', '', '', '', '', '', '', '', '']] """ wgassets_data = sheetToGridNoBlank(self.workbook.sheet_by_index(1)) # check the first row has one cell (the title) title_row = wgassets_data[0] stripped_title_row = [x for x in title_row if x != ''] assert len(stripped_title_row) == 1 wealth_groups = {} category = '' data = [] wealth_groups_list = set(wgassets_data[7]) wealth_groups_list.remove('') wealth_groups = {} for wg in wealth_groups_list: wealth_groups[wg] = { 'upper':'', 'lower':'', } for row in wgassets_data[10:]: this_wealth_groups = wealth_groups.copy() # cache category - it is often not repeated, so we will keep using the same # value until it changes if row[0]: category = row[0] datadict = {} datadict['category'] = category datadict['type'] = row[1] datadict['unit'] = row[2] for index,val in enumerate(row[3:]): if val != '' and wgassets_data[7][index+3] != '': this_wealth_groups[wgassets_data[7][index+3]][wgassets_data[8][index+3].lower()] = val datadict['data'] = this_wealth_groups data.append(datadict) def main(doc): di = DataImporter(doc) # TODO: reinstate commented out methods once everything is working di.saveSiteData() di.saveExpenditureData() di.saveWealthGroupAssetsData() if __name__ == '__main__': test_print = True sample_doc = os.path.join(os.path.dirname(__file__), '..', '..', 'EXAMPLEHEADATA.xls') sys.exit(main(sample_doc))
lgpl-2.1
4,157,351,473,706,986,500
44.382488
165
0.508733
false
whatevsz/rbackupd
rbackupd/config/configmanager.py
1
2444
# -*- encoding: utf-8 -*- # Copyright (c) 2013 Hannes Körber <[email protected]> import logging import configobj import validate logger = logging.getLogger(__name__) class ConfigManager(configobj.ConfigObj): """ This class is derived from ConfigObj and validates the configuration file automatically. :param path: The path to the configuration file. :type path: str :param configspec: The path to the configspec describing the structure of the configuration file. Consult the configobj documentation for details. :type path: str :raise ValidationError: if the validation fails :raise IOError: if the configuration file or the configspec is not found """ def __init__(self, path, configspec): configobj.ConfigObj.__init__( self, infile=path, list_values=True, create_empty=False, file_error=True, interpolation=False, raise_errors=True, configspec=configspec, write_empty_values=True) logger.debug("Validating configuration file.") validator = validate.Validator() try: result = self.validate(validator, preserve_errors=True) except IOError: raise if result is not True: message = "" for entry in configobj.flatten_errors(self, result): (sections, key, error) = entry expanded_section = ".".join(sections) if error is False: message += ("In section \"%s\": key \"%s\" not found\n" % (expanded_section, key)) else: message += ("In section \"%s\": failed valiation for key " "\"%s\"\n" % (expanded_section, key)) message = message.rstrip("\n") raise ValidationError(message) class ValidationError(Exception): """ This exception is raised when the validation of the configuration file fails. :param message: A message with details about how the validation failed. :type message: str """ def __init__(self, message): Exception.__init__(self) self.message = message def __str__(self): return self.message ConfigError = configobj.ConfigObjError
gpl-3.0
6,444,203,800,207,971,000
30.320513
78
0.573885
false
ArchiFleKs/magnum
magnum/drivers/swarm_fedora_atomic_v1/template_def.py
2
1075
# Copyright 2016 Rackspace Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from magnum.drivers.heat import swarm_fedora_template_def as sftd class AtomicSwarmTemplateDefinition(sftd.SwarmFedoraTemplateDefinition): """Docker swarm template for a Fedora Atomic VM.""" @property def driver_module_path(self): return __name__[:__name__.rindex('.')] @property def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/cluster.yaml')
apache-2.0
2,792,705,736,945,375,700
36.068966
75
0.716279
false
Ramblurr/Far-Horizons
tools/game_packet.py
1
1525
#!/usr/bin/env python """ This script will create a zip file containing a first turn packet for a player. """ import fhutils import os, sys, tempfile, subprocess import getopt def main(argv): config_file = None discard = False try: opts, args = getopt.getopt(argv, "hc:", ["help", "config="]) except getopt.GetoptError: print(__doc__) sys.exit(2) for opt, arg in opts: if opt in ("-h", "--help"): print(__doc__) sys.exit(0) elif opt in ("-c", "--config"): config_file = arg if config_file: config = fhutils.GameConfig(config_file) else: config = fhutils.GameConfig() game = config.gameslist[0] # for now we only support a single game game_name = game['name'] data_dir = game['datadir'] bin_dir = config.bindir os.chdir(data_dir) # prepare galaxy list output = fhutils.run(bin_dir, "ListGalaxy", ["-p"]) with open("galaxy.list.txt", "w") as f: f.write(output) players = fhutils.Game().players for p in players: try: subprocess.check_call(["zip", "sp%s.zip" % (p['num']), "sp%s.rpt.t1" % (p['num']), "galaxy.map.pdf", "galaxy.map.txt", "game_policies.pdf", "galaxy.list.txt"]) except CalledProcessError: print("ERROR making zip: sp%s.zip" % (p['num'])) if __name__ == "__main__": main(sys.argv[1:])
gpl-2.0
6,489,482,347,662,260,000
30.142857
172
0.52918
false
ITLabProject2016/internet_technology_lab_project
prepare_images.py
1
1075
import os import Image import glob #gets images from a dir recursively, resizes them to optimal width and copies them to a new dir ################################## #change me to your pictures dir!!!! DIR = "/home/kostis/Desktop/img/" #story icons are saved at "./populate_img/stories" with width 100 #story point icons are saved at "./populate_img/points" with width 350 OUT_DIR = "./populate_img/points" opt_width = 300; ################################## G = glob.glob(DIR+"*.jpg") G1 = glob.glob(DIR+"*.png") G2 = glob.glob(DIR+"*.JPEG") G3 = glob.glob(DIR+"*.JPG") G = G + G1 + G2 + G3 for filePath in G: file = open(filePath) img = Image.open(file) width = img.size[0] height = img.size[1] ratio = opt_width / float(width) width = int(width * ratio) height = int(height * ratio) img = img.resize([width,height]) img.save(os.path.join(os.path.join(OUT_DIR, os.path.splitext(os.path.basename(filePath))[0]))+".jpg") print os.path.join(os.path.join(OUT_DIR, os.path.splitext(os.path.basename(filePath))[0]))+".jpg"
apache-2.0
522,612,940,822,323,900
25.875
105
0.617674
false
avaitla/Haskell-to-C---Bridge
pygccxml-1.0.0/unittests/filters_tester.py
1
3555
# Copyright 2004-2008 Roman Yakovenko. # Distributed under the Boost Software License, Version 1.0. (See # accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import os import unittest import autoconfig import parser_test_case from pygccxml import utils from pygccxml import parser from pygccxml import declarations class tester_t( parser_test_case.parser_test_case_t ): global_ns = None COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE def __init__(self, *args ): parser_test_case.parser_test_case_t.__init__( self, *args ) self.header = 'declarations_calldef.hpp' self.global_ns = None def setUp(self): if not tester_t.global_ns: decls = parser.parse( [self.header], self.config ) tester_t.global_ns = declarations.get_global_namespace( decls ) tester_t.global_ns.init_optimizer() self.global_ns = tester_t.global_ns def test_regex( self ): criteria = declarations.regex_matcher_t( 'oper.*' , lambda decl: decl.name ) operators = declarations.matcher.find( criteria, self.global_ns ) operators = filter( lambda d: not d.is_artificial, operators ) self.failUnless( 6 == len(operators) ) def test_access_type( self ): criteria = declarations.access_type_matcher_t( declarations.ACCESS_TYPES.PUBLIC ) public_members = declarations.matcher.find( criteria, self.global_ns ) if '0.9' in public_members[0].compiler: public_members = filter( lambda d: not d.is_artificial, public_members ) self.failUnless( 16 == len( public_members ) ) else: self.failUnless( 20 == len( public_members ) ) def test_or_matcher( self ): criteria1 = declarations.regex_matcher_t( 'oper.*' , lambda decl: decl.name ) criteria2 = declarations.access_type_matcher_t( declarations.ACCESS_TYPES.PUBLIC ) found = declarations.matcher.find( criteria1 | criteria2, self.global_ns ) if '0.9' in found[0].compiler: found = filter( lambda d: not d.is_artificial, found ) self.failUnless( 15 <= len( found ) <= 21) else: self.failUnless( 19 <= len( found ) <= 25) def test_and_matcher( self ): criteria1 = declarations.regex_matcher_t( 'oper.*' , lambda decl: decl.name ) criteria2 = declarations.access_type_matcher_t( declarations.ACCESS_TYPES.PUBLIC ) found = declarations.matcher.find( criteria1 & criteria2, self.global_ns ) found = filter( lambda d: not d.is_artificial, found ) self.failUnless( len( found ) <= 6 ) def test_not_matcher( self ): criteria1 = declarations.regex_matcher_t( 'oper.*' , lambda decl: decl.name ) found = declarations.matcher.find( ~( ~criteria1 ), self.global_ns ) found = filter( lambda d: not d.is_artificial, found ) self.failUnless( len( found ) == 6 ) def create_suite(): suite = unittest.TestSuite() suite.addTest( unittest.makeSuite(tester_t)) return suite def run_suite(): unittest.TextTestRunner(verbosity=2).run( create_suite() ) if __name__ == "__main__": run_suite()
bsd-3-clause
-6,151,372,971,956,007,000
40.831325
90
0.583685
false
googleapis/python-compute
google/cloud/compute_v1/services/routers/transports/rest.py
1
33916
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import gapic_v1 # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.auth.transport.requests import AuthorizedSession from google.cloud.compute_v1.types import compute from .base import RoutersTransport, DEFAULT_CLIENT_INFO class RoutersRestTransport(RoutersTransport): """REST backend transport for Routers. The Routers API. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends JSON representations of protocol buffers over HTTP/1.1 """ def __init__( self, *, host: str = "compute.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client certificate to configure mutual TLS HTTP channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ # Run the base constructor # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object super().__init__( host=host, credentials=credentials, client_info=client_info, ) self._session = AuthorizedSession( self._credentials, default_host=self.DEFAULT_HOST ) if client_cert_source_for_mtls: self._session.configure_mtls_channel(client_cert_source_for_mtls) self._prep_wrapped_messages(client_info) def aggregated_list( self, request: compute.AggregatedListRoutersRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.RouterAggregatedList: r"""Call the aggregated list method over HTTP. Args: request (~.compute.AggregatedListRoutersRequest): The request object. A request message for Routers.AggregatedList. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.RouterAggregatedList: Contains a list of routers. """ # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/aggregated/routers".format( host=self._host, project=request.project, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.AggregatedListRoutersRequest.filter in request: query_params["filter"] = request.filter if compute.AggregatedListRoutersRequest.include_all_scopes in request: query_params["includeAllScopes"] = request.include_all_scopes if compute.AggregatedListRoutersRequest.max_results in request: query_params["maxResults"] = request.max_results if compute.AggregatedListRoutersRequest.order_by in request: query_params["orderBy"] = request.order_by if compute.AggregatedListRoutersRequest.page_token in request: query_params["pageToken"] = request.page_token if compute.AggregatedListRoutersRequest.return_partial_success in request: query_params["returnPartialSuccess"] = request.return_partial_success # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.RouterAggregatedList.from_json( response.content, ignore_unknown_fields=True ) def delete( self, request: compute.DeleteRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Call the delete method over HTTP. Args: request (~.compute.DeleteRouterRequest): The request object. A request message for Routers.Delete. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__ \* `Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.DeleteRouterRequest.request_id in request: query_params["requestId"] = request.request_id # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.delete(url, headers=headers,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.Operation.from_json(response.content, ignore_unknown_fields=True) def get( self, request: compute.GetRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Router: r"""Call the get method over HTTP. Args: request (~.compute.GetRouterRequest): The request object. A request message for Routers.Get. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Router: Represents a Cloud Router resource. For more information about Cloud Router, read the Cloud Router overview. """ # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.Router.from_json(response.content, ignore_unknown_fields=True) def get_nat_mapping_info( self, request: compute.GetNatMappingInfoRoutersRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.VmEndpointNatMappingsList: r"""Call the get nat mapping info method over HTTP. Args: request (~.compute.GetNatMappingInfoRoutersRequest): The request object. A request message for Routers.GetNatMappingInfo. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.VmEndpointNatMappingsList: Contains a list of VmEndpointNatMappings. """ # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.GetNatMappingInfoRoutersRequest.filter in request: query_params["filter"] = request.filter if compute.GetNatMappingInfoRoutersRequest.max_results in request: query_params["maxResults"] = request.max_results if compute.GetNatMappingInfoRoutersRequest.order_by in request: query_params["orderBy"] = request.order_by if compute.GetNatMappingInfoRoutersRequest.page_token in request: query_params["pageToken"] = request.page_token if compute.GetNatMappingInfoRoutersRequest.return_partial_success in request: query_params["returnPartialSuccess"] = request.return_partial_success # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.VmEndpointNatMappingsList.from_json( response.content, ignore_unknown_fields=True ) def get_router_status( self, request: compute.GetRouterStatusRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.RouterStatusResponse: r"""Call the get router status method over HTTP. Args: request (~.compute.GetRouterStatusRouterRequest): The request object. A request message for Routers.GetRouterStatus. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.RouterStatusResponse: """ # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}/getRouterStatus".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.RouterStatusResponse.from_json( response.content, ignore_unknown_fields=True ) def insert( self, request: compute.InsertRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Call the insert method over HTTP. Args: request (~.compute.InsertRouterRequest): The request object. A request message for Routers.Insert. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__ \* `Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Jsonify the request body body = compute.Router.to_json( request.router_resource, including_default_value_fields=False, use_integers_for_enums=False, ) # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers".format( host=self._host, project=request.project, region=request.region, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.InsertRouterRequest.request_id in request: query_params["requestId"] = request.request_id # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.post(url, headers=headers, data=body,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.Operation.from_json(response.content, ignore_unknown_fields=True) def list( self, request: compute.ListRoutersRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.RouterList: r"""Call the list method over HTTP. Args: request (~.compute.ListRoutersRequest): The request object. A request message for Routers.List. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.RouterList: Contains a list of Router resources. """ # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers".format( host=self._host, project=request.project, region=request.region, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.ListRoutersRequest.filter in request: query_params["filter"] = request.filter if compute.ListRoutersRequest.max_results in request: query_params["maxResults"] = request.max_results if compute.ListRoutersRequest.order_by in request: query_params["orderBy"] = request.order_by if compute.ListRoutersRequest.page_token in request: query_params["pageToken"] = request.page_token if compute.ListRoutersRequest.return_partial_success in request: query_params["returnPartialSuccess"] = request.return_partial_success # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.RouterList.from_json( response.content, ignore_unknown_fields=True ) def patch( self, request: compute.PatchRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Call the patch method over HTTP. Args: request (~.compute.PatchRouterRequest): The request object. A request message for Routers.Patch. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__ \* `Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Jsonify the request body body = compute.Router.to_json( request.router_resource, including_default_value_fields=False, use_integers_for_enums=False, ) # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.PatchRouterRequest.request_id in request: query_params["requestId"] = request.request_id # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.patch(url, headers=headers, data=body,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.Operation.from_json(response.content, ignore_unknown_fields=True) def preview( self, request: compute.PreviewRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.RoutersPreviewResponse: r"""Call the preview method over HTTP. Args: request (~.compute.PreviewRouterRequest): The request object. A request message for Routers.Preview. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.RoutersPreviewResponse: """ # Jsonify the request body body = compute.Router.to_json( request.router_resource, including_default_value_fields=False, use_integers_for_enums=False, ) # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}/preview".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.post(url, headers=headers, data=body,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.RoutersPreviewResponse.from_json( response.content, ignore_unknown_fields=True ) def update( self, request: compute.UpdateRouterRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Call the update method over HTTP. Args: request (~.compute.UpdateRouterRequest): The request object. A request message for Routers.Update. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__ \* `Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==) """ # Jsonify the request body body = compute.Router.to_json( request.router_resource, including_default_value_fields=False, use_integers_for_enums=False, ) # TODO(yon-mg): need to handle grpc transcoding and parse url correctly # current impl assumes basic case of grpc transcoding url = "https://{host}/compute/v1/projects/{project}/regions/{region}/routers/{router}".format( host=self._host, project=request.project, region=request.region, router=request.router, ) # TODO(yon-mg): handle nested fields corerctly rather than using only top level fields # not required for GCE query_params = {} if compute.UpdateRouterRequest.request_id in request: query_params["requestId"] = request.request_id # TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here # discards default values # TODO(yon-mg): add test for proper url encoded strings query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()] url += "?{}".format("&".join(query_params)).replace(" ", "+") # Send the request headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.put(url, headers=headers, data=body,) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception # subclass. if response.status_code >= 400: raise core_exceptions.from_http_response(response) # Return the response return compute.Operation.from_json(response.content, ignore_unknown_fields=True) __all__ = ("RoutersRestTransport",)
apache-2.0
2,138,113,358,878,847,700
40.462103
120
0.602105
false
SKA-ScienceDataProcessor/algorithm-reference-library
processing_components/griddata/convolution_functions.py
1
8955
# """ Functions that define and manipulate ConvolutionFunctions. The griddata has axes [chan, pol, z, dy, dx, y, x] where z, y, x are spatial axes in either sky or Fourier plane. The order in the WCS is reversed so the grid_WCS describes UU, VV, DUU, DVV, WW, STOKES, FREQ axes. GridData can be used to hold the Fourier transform of an Image or gridded visibilities. In addition, the convolution function can be stored in a GridData, most probably with finer spatial sampling. """ import copy import logging import numpy from astropy.wcs import WCS from data_models.memory_data_models import GridData, ConvolutionFunction from data_models.memory_data_models import QA from data_models.polarisation import PolarisationFrame from processing_library.image.operations import create_image_from_array log = logging.getLogger(__name__) def convolutionfunction_sizeof(cf: ConvolutionFunction): """ Return size in GB """ return cf.size() def create_convolutionfunction_from_array(data: numpy.array, grid_wcs: WCS, projection_wcs: WCS, polarisation_frame: PolarisationFrame) -> ConvolutionFunction: """ Create a convolution function from an array and wcs's The griddata has axes [chan, pol, z, dy, dx, y, x] where z, y, x are spatial axes in either sky or Fourier plane. The order in the WCS is reversed so the grid_WCS describes UU, VV, WW, STOKES, FREQ axes The axes UU,VV have the same physical stride as the image, The axes DUU, DVV are subsampled. Convolution function holds the original sky plane projection in the projection_wcs. :param data: Numpy.array :param grid_wcs: Grid world coordinate system :param projection_wcs: Projection world coordinate system :param polarisation_frame: Polarisation Frame :return: GridData """ fconvfunc = ConvolutionFunction() fconvfunc.polarisation_frame = polarisation_frame fconvfunc.data = data fconvfunc.grid_wcs = grid_wcs.deepcopy() fconvfunc.projection_wcs = projection_wcs.deepcopy() assert isinstance(fconvfunc, ConvolutionFunction), "Type is %s" % type(fconvfunc) return fconvfunc def create_convolutionfunction_from_image(im: numpy.array, nz=1, zstep=1e15, ztype='WW', oversampling=8, support=16): """ Create a convolution function from an image The griddata has axes [chan, pol, z, dy, dx, y, x] where z, y, x are spatial axes in either sky or Fourier plane. The order in the WCS is reversed so the grid_WCS describes UU, VV, WW, STOKES, FREQ axes The axes UU,VV have the same physical stride as the image, The axes DUU, DVV are subsampled. Convolution function holds the original sky plane projection in the projection_wcs. :param im: Template Image :param nz: Number of z axes, usually z is W :param zstep: Step in z, usually z is W :param ztype: Type of z, usually 'WW' :param oversampling: Oversampling (size of dy, dx axes) :param support: Support of final convolution function (size of y, x axes) :return: Convolution Function """ assert len(im.shape) == 4 assert im.wcs.wcs.ctype[0] == 'RA---SIN' assert im.wcs.wcs.ctype[1] == 'DEC--SIN' d2r = numpy.pi / 180.0 # WCS Coords are [x, y, dy, dx, z, pol, chan] where x, y, z are spatial axes in real space or Fourier space # Array Coords are [chan, pol, z, dy, dx, y, x] where x, y, z are spatial axes in real space or Fourier space cf_wcs = WCS(naxis=7) cf_wcs.wcs.ctype[0] = 'UU' cf_wcs.wcs.ctype[1] = 'VV' cf_wcs.wcs.ctype[2] = 'DUU' cf_wcs.wcs.ctype[3] = 'DVV' cf_wcs.wcs.ctype[4] = ztype cf_wcs.wcs.ctype[5] = im.wcs.wcs.ctype[2] cf_wcs.wcs.ctype[6] = im.wcs.wcs.ctype[3] cf_wcs.wcs.axis_types[0] = 0 cf_wcs.wcs.axis_types[1] = 0 cf_wcs.wcs.axis_types[2] = 0 cf_wcs.wcs.axis_types[3] = 0 cf_wcs.wcs.axis_types[4] = 0 cf_wcs.wcs.axis_types[5] = im.wcs.wcs.axis_types[2] cf_wcs.wcs.axis_types[6] = im.wcs.wcs.axis_types[3] cf_wcs.wcs.crval[0] = 0.0 cf_wcs.wcs.crval[1] = 0.0 cf_wcs.wcs.crval[2] = 0.0 cf_wcs.wcs.crval[3] = 0.0 cf_wcs.wcs.crval[4] = 0.0 cf_wcs.wcs.crval[5] = im.wcs.wcs.crval[2] cf_wcs.wcs.crval[6] = im.wcs.wcs.crval[3] cf_wcs.wcs.crpix[0] = float(support // 2) + 1.0 cf_wcs.wcs.crpix[1] = float(support // 2) + 1.0 cf_wcs.wcs.crpix[2] = float(oversampling // 2) + 1.0 cf_wcs.wcs.crpix[3] = float(oversampling // 2) + 1.0 cf_wcs.wcs.crpix[4] = float(nz // 2 + 1) cf_wcs.wcs.crpix[5] = im.wcs.wcs.crpix[2] cf_wcs.wcs.crpix[6] = im.wcs.wcs.crpix[3] # The sampling on the UU and VV axes should be the same as for the image. # The sampling on the DUU and DVV axes should be oversampling times finer. cf_wcs.wcs.cdelt[0] = 1.0 / (im.shape[3] * d2r * im.wcs.wcs.cdelt[0]) cf_wcs.wcs.cdelt[1] = 1.0 / (im.shape[2] * d2r * im.wcs.wcs.cdelt[1]) cf_wcs.wcs.cdelt[2] = cf_wcs.wcs.cdelt[0] / oversampling cf_wcs.wcs.cdelt[3] = cf_wcs.wcs.cdelt[1] / oversampling cf_wcs.wcs.cdelt[4] = zstep cf_wcs.wcs.cdelt[5] = im.wcs.wcs.cdelt[2] cf_wcs.wcs.cdelt[6] = im.wcs.wcs.cdelt[3] grid_data = im.data[..., numpy.newaxis, :, :].astype('complex') grid_data[...] = 0.0 nchan, npol, ny, nx = im.shape fconvfunc = ConvolutionFunction() fconvfunc.polarisation_frame = im.polarisation_frame fconvfunc.data = numpy.zeros([nchan, npol, nz, oversampling, oversampling, support, support], dtype='complex') fconvfunc.grid_wcs = cf_wcs.deepcopy() fconvfunc.projection_wcs = im.wcs.deepcopy() assert isinstance(fconvfunc, ConvolutionFunction), "Type is %s" % type(fconvfunc) return fconvfunc def convert_convolutionfunction_to_image(cf): """ Convert ConvolutionFunction to an image :param cf: :return: """ return create_image_from_array(cf.data, cf.grid_wcs, cf.polarisation_frame) def apply_bounding_box_convolutionfunction(cf, fractional_level=1e-4): """Apply a bounding box to a convolution function :param cf: :param fractional_level: :return: bounded convolution function """ newcf = copy_convolutionfunction(cf) nx = newcf.data.shape[-1] ny = newcf.data.shape[-2] mask = numpy.max(numpy.abs(newcf.data), axis=(0, 1, 2, 3, 4)) coords = numpy.argwhere(mask > fractional_level * numpy.max(numpy.abs(cf.data))) crpx = int(numpy.round(cf.grid_wcs.wcs.crpix[0])) crpy = int(numpy.round(cf.grid_wcs.wcs.crpix[1])) x0, y0 = coords.min(axis=0) dx = crpx - x0 dy = crpy - y0 x0 -= 1 y0 -= 1 x1 = crpx + dx - 1 y1 = crpy + dy - 1 newcf.data = newcf.data[..., y0:y1, x0:x1] nny, nnx = newcf.data.shape[-2], newcf.data.shape[-1] newcf.grid_wcs.wcs.crpix[0] += nnx / 2 - nx / 2 newcf.grid_wcs.wcs.crpix[1] += nny / 2 - ny / 2 return newcf def calculate_bounding_box_convolutionfunction(cf, fractional_level=1e-4): """Calculate bounding boxes Returns a list of bounding boxes where each element is (z, (y0, y1), (x0, x1)) These can be used in griddata/degridding. :param cf: :param fractional_level: :return: list of bounding boxes """ bboxes = list() threshold = fractional_level * numpy.max(numpy.abs(cf.data)) for z in range(cf.data.shape[2]): mask = numpy.max(numpy.abs(cf.data[:, :, z, ...]), axis=(0, 1, 2, 3)) coords = numpy.argwhere(mask > threshold) x0, y0 = coords.min(axis=0) x1, y1 = coords.max(axis=0) bboxes.append((z, (y0, y1), (x0, x1))) return bboxes def qa_convolutionfunction(cf, context="") -> QA: """Assess the quality of a convolutionfunction :param cf: :return: QA """ assert isinstance(cf, ConvolutionFunction), cf data = {'shape': str(cf.data.shape), 'max': numpy.max(cf.data), 'min': numpy.min(cf.data), 'rms': numpy.std(cf.data), 'sum': numpy.sum(cf.data), 'medianabs': numpy.median(numpy.abs(cf.data)), 'median': numpy.median(cf.data)} qa = QA(origin="qa_image", data=data, context=context) return qa def copy_convolutionfunction(cf): """Make a copy of a convolution function :param cf: :return: """ assert isinstance(cf, ConvolutionFunction), cf fcf = ConvolutionFunction() fcf.polarisation_frame = cf.polarisation_frame fcf.data = copy.deepcopy(cf.data) fcf.projection_wcs = copy.deepcopy(cf.projection_wcs) fcf.grid_wcs = copy.deepcopy(cf.grid_wcs) if convolutionfunction_sizeof(fcf) >= 1.0: log.debug("copy_convolutionfunction: copied %s convolution function of shape %s, size %.3f (GB)" % (fcf.data.dtype, str(fcf.shape), convolutionfunction_sizeof(fcf))) assert isinstance(fcf, ConvolutionFunction), fcf return fcf
apache-2.0
-4,059,187,236,238,970,400
35.70082
121
0.649135
false
alabs/petateca
petateca/apps/userdata/migrations/0006_auto.py
1
14230
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding M2M table for field viewed_episodes on 'UserProfile' db.create_table('userdata_userprofile_viewed_episodes', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('userprofile', models.ForeignKey(orm['userdata.userprofile'], null=False)), ('episode', models.ForeignKey(orm['serie.episode'], null=False)) )) db.create_unique('userdata_userprofile_viewed_episodes', ['userprofile_id', 'episode_id']) def backwards(self, orm): # Removing M2M table for field viewed_episodes on 'UserProfile' db.delete_table('userdata_userprofile_viewed_episodes') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'serie.actor': { 'Meta': {'object_name': 'Actor'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'poster_of'", 'unique': 'True', 'null': 'True', 'to': "orm['serie.ImageActor']"}), 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'serie.episode': { 'Meta': {'object_name': 'Episode'}, 'air_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'created_time': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'description_es': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'episode': ('django.db.models.fields.IntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_time': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}), 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'poster_of'", 'unique': 'True', 'null': 'True', 'to': "orm['serie.ImageEpisode']"}), 'season': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'episodes'", 'to': "orm['serie.Season']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'title_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'title_es': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'serie.genre': { 'Meta': {'object_name': 'Genre'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'name_en': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'name_es': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'serie.imageactor': { 'Meta': {'object_name': 'ImageActor'}, 'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['serie.Actor']"}), 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'serie.imageepisode': { 'Meta': {'object_name': 'ImageEpisode'}, 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'episode': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['serie.Episode']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'serie.imageseason': { 'Meta': {'object_name': 'ImageSeason'}, 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'season': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['serie.Season']"}), 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'serie.imageserie': { 'Meta': {'object_name': 'ImageSerie'}, 'creator': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_poster': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'serie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['serie.Serie']"}), 'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'serie.network': { 'Meta': {'object_name': 'Network'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'serie.role': { 'Meta': {'unique_together': "(('serie', 'actor', 'role'),)", 'object_name': 'Role'}, 'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['serie.Actor']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'serie': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['serie.Serie']"}), 'sortorder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'serie.season': { 'Meta': {'object_name': 'Season'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'poster_of'", 'unique': 'True', 'null': 'True', 'to': "orm['serie.ImageSeason']"}), 'season': ('django.db.models.fields.IntegerField', [], {}), 'serie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'season'", 'to': "orm['serie.Serie']"}) }, 'serie.serie': { 'Meta': {'object_name': 'Serie'}, 'actors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['serie.Actor']", 'null': 'True', 'through': "orm['serie.Role']", 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'description_en': ('django.db.models.fields.TextField', [], {}), 'description_es': ('django.db.models.fields.TextField', [], {}), 'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'genres': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'series'", 'symmetrical': 'False', 'to': "orm['serie.Genre']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'name_en': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'name_es': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'series'", 'to': "orm['serie.Network']"}), 'poster': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'poster_of'", 'unique': 'True', 'null': 'True', 'to': "orm['serie.ImageSerie']"}), 'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}), 'runtime': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'slug_name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}) }, 'userdata.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'favorite_series': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_of'", 'symmetrical': 'False', 'to': "orm['serie.Serie']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}), 'viewed_episodes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'viewed_episodes'", 'symmetrical': 'False', 'to': "orm['serie.Episode']"}) }, 'userdata.usertoinvite': { 'Meta': {'object_name': 'UserToInvite'}, 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}), 'has_been_invited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mail': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}) } } complete_apps = ['userdata']
agpl-3.0
866,152,889,865,654,700
75.096257
197
0.544835
false
colincsl/TemporalConvolutionalNetworks
code/tf_models.py
1
12033
import numpy as np from keras.models import Sequential, Model from keras.layers import Input, Dense, TimeDistributed, merge, Lambda from keras.layers.core import * from keras.layers.convolutional import * from keras.layers.recurrent import * import tensorflow as tf from keras import backend as K from keras.activations import relu from functools import partial clipped_relu = partial(relu, max_value=5) def max_filter(x): # Max over the best filter score (like ICRA paper) max_values = K.max(x, 2, keepdims=True) max_flag = tf.greater_equal(x, max_values) out = x * tf.cast(max_flag, tf.float32) return out def channel_normalization(x): # Normalize by the highest activation max_values = K.max(K.abs(x), 2, keepdims=True)+1e-5 out = x / max_values return out def WaveNet_activation(x): tanh_out = Activation('tanh')(x) sigm_out = Activation('sigmoid')(x) return Merge(mode='mul')([tanh_out, sigm_out]) # ------------------------------------------------------------- def temporal_convs_linear(n_nodes, conv_len, n_classes, n_feat, max_len, causal=False, loss='categorical_crossentropy', optimizer='adam', return_param_str=False): """ Used in paper: Segmental Spatiotemporal CNNs for Fine-grained Action Segmentation Lea et al. ECCV 2016 Note: Spatial dropout was not used in the original paper. It tends to improve performance a little. """ inputs = Input(shape=(max_len,n_feat)) if causal: model = ZeroPadding1D((conv_len//2,0))(model) model = Convolution1D(n_nodes, conv_len, input_dim=n_feat, input_length=max_len, border_mode='same', activation='relu')(inputs) if causal: model = Cropping1D((0,conv_len//2))(model) model = SpatialDropout1D(0.3)(model) model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model) model = Model(input=inputs, output=model) model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal") if return_param_str: param_str = "tConv_C{}".format(conv_len) if causal: param_str += "_causal" return model, param_str else: return model def ED_TCN(n_nodes, conv_len, n_classes, n_feat, max_len, loss='categorical_crossentropy', causal=False, optimizer="rmsprop", activation='norm_relu', return_param_str=False): n_layers = len(n_nodes) inputs = Input(shape=(max_len,n_feat)) model = inputs # ---- Encoder ---- for i in range(n_layers): # Pad beginning of sequence to prevent usage of future data if causal: model = ZeroPadding1D((conv_len//2,0))(model) model = Convolution1D(n_nodes[i], conv_len, border_mode='same')(model) if causal: model = Cropping1D((0,conv_len//2))(model) model = SpatialDropout1D(0.3)(model) if activation=='norm_relu': model = Activation('relu')(model) model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model) elif activation=='wavenet': model = WaveNet_activation(model) else: model = Activation(activation)(model) model = MaxPooling1D(2)(model) # ---- Decoder ---- for i in range(n_layers): model = UpSampling1D(2)(model) if causal: model = ZeroPadding1D((conv_len//2,0))(model) model = Convolution1D(n_nodes[-i-1], conv_len, border_mode='same')(model) if causal: model = Cropping1D((0,conv_len//2))(model) model = SpatialDropout1D(0.3)(model) if activation=='norm_relu': model = Activation('relu')(model) model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model) elif activation=='wavenet': model = WaveNet_activation(model) else: model = Activation(activation)(model) # Output FC layer model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model) model = Model(input=inputs, output=model) model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy']) if return_param_str: param_str = "ED-TCN_C{}_L{}".format(conv_len, n_layers) if causal: param_str += "_causal" return model, param_str else: return model def ED_TCN_atrous(n_nodes, conv_len, n_classes, n_feat, max_len, loss='categorical_crossentropy', causal=False, optimizer="rmsprop", activation='norm_relu', return_param_str=False): n_layers = len(n_nodes) inputs = Input(shape=(None,n_feat)) model = inputs # ---- Encoder ---- for i in range(n_layers): # Pad beginning of sequence to prevent usage of future data if causal: model = ZeroPadding1D((conv_len//2,0))(model) model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model) if causal: model = Cropping1D((0,conv_len//2))(model) model = SpatialDropout1D(0.3)(model) if activation=='norm_relu': model = Activation('relu')(model) model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model) elif activation=='wavenet': model = WaveNet_activation(model) else: model = Activation(activation)(model) # ---- Decoder ---- for i in range(n_layers): if causal: model = ZeroPadding1D((conv_len//2,0))(model) model = AtrousConvolution1D(n_nodes[-i-1], conv_len, atrous_rate=n_layers-i, border_mode='same')(model) if causal: model = Cropping1D((0,conv_len//2))(model) model = SpatialDropout1D(0.3)(model) if activation=='norm_relu': model = Activation('relu')(model) model = Lambda(channel_normalization, name="decoder_norm_{}".format(i))(model) elif activation=='wavenet': model = WaveNet_activation(model) else: model = Activation(activation)(model) # Output FC layer model = TimeDistributed(Dense(n_classes, activation="softmax" ))(model) model = Model(input=inputs, output=model) model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy']) if return_param_str: param_str = "ED-TCNa_C{}_L{}".format(conv_len, n_layers) if causal: param_str += "_causal" return model, param_str else: return model def TimeDelayNeuralNetwork(n_nodes, conv_len, n_classes, n_feat, max_len, loss='categorical_crossentropy', causal=False, optimizer="rmsprop", activation='sigmoid', return_param_str=False): # Time-delay neural network n_layers = len(n_nodes) inputs = Input(shape=(max_len,n_feat)) model = inputs inputs_mask = Input(shape=(max_len,1)) model_masks = [inputs_mask] # ---- Encoder ---- for i in range(n_layers): # Pad beginning of sequence to prevent usage of future data if causal: model = ZeroPadding1D((conv_len//2,0))(model) model = AtrousConvolution1D(n_nodes[i], conv_len, atrous_rate=i+1, border_mode='same')(model) # model = SpatialDropout1D(0.3)(model) if causal: model = Cropping1D((0,conv_len//2))(model) if activation=='norm_relu': model = Activation('relu')(model) model = Lambda(channel_normalization, name="encoder_norm_{}".format(i))(model) elif activation=='wavenet': model = WaveNet_activation(model) else: model = Activation(activation)(model) # Output FC layer model = TimeDistributed(Dense(n_classes, activation="softmax"))(model) model = Model(input=inputs, output=model) model.compile(loss=loss, optimizer=optimizer, sample_weight_mode="temporal", metrics=['accuracy']) if return_param_str: param_str = "TDN_C{}".format(conv_len) if causal: param_str += "_causal" return model, param_str else: return model def Dilated_TCN(num_feat, num_classes, nb_filters, dilation_depth, nb_stacks, max_len, activation="wavenet", tail_conv=1, use_skip_connections=True, causal=False, optimizer='adam', return_param_str=False): """ dilation_depth : number of layers per stack nb_stacks : number of stacks. """ def residual_block(x, s, i, activation): original_x = x if causal: x = ZeroPadding1D(((2**i)//2,0))(x) conv = AtrousConvolution1D(nb_filters, 2, atrous_rate=2**i, border_mode='same', name='dilated_conv_%d_tanh_s%d' % (2**i, s))(x) conv = Cropping1D((0,(2**i)//2))(conv) else: conv = AtrousConvolution1D(nb_filters, 3, atrous_rate=2**i, border_mode='same', name='dilated_conv_%d_tanh_s%d' % (2**i, s))(x) conv = SpatialDropout1D(0.3)(conv) # x = WaveNet_activation(conv) if activation=='norm_relu': x = Activation('relu')(conv) x = Lambda(channel_normalization)(x) elif activation=='wavenet': x = WaveNet_activation(conv) else: x = Activation(activation)(conv) #res_x = Convolution1D(nb_filters, 1, border_mode='same')(x) #skip_x = Convolution1D(nb_filters, 1, border_mode='same')(x) x = Convolution1D(nb_filters, 1, border_mode='same')(x) res_x = Merge(mode='sum')([original_x, x]) #return res_x, skip_x return res_x, x input_layer = Input(shape=(max_len, num_feat)) skip_connections = [] x = input_layer if causal: x = ZeroPadding1D((1,0))(x) x = Convolution1D(nb_filters, 2, border_mode='same', name='initial_conv')(x) x = Cropping1D((0,1))(x) else: x = Convolution1D(nb_filters, 3, border_mode='same', name='initial_conv')(x) for s in range(nb_stacks): for i in range(0, dilation_depth+1): x, skip_out = residual_block(x, s, i, activation) skip_connections.append(skip_out) if use_skip_connections: x = Merge(mode='sum')(skip_connections) x = Activation('relu')(x) x = Convolution1D(nb_filters, tail_conv, border_mode='same')(x) x = Activation('relu')(x) x = Convolution1D(num_classes, tail_conv, border_mode='same')(x) x = Activation('softmax', name='output_softmax')(x) model = Model(input_layer, x) model.compile(optimizer, loss='categorical_crossentropy', sample_weight_mode='temporal') if return_param_str: param_str = "D-TCN_C{}_B{}_L{}".format(2, nb_stacks, dilation_depth) if causal: param_str += "_causal" return model, param_str else: return model def BidirLSTM(n_nodes, n_classes, n_feat, max_len=None, causal=True, loss='categorical_crossentropy', optimizer="adam", return_param_str=False): inputs = Input(shape=(None,n_feat)) model = LSTM(n_nodes, return_sequences=True)(inputs) # Birdirectional LSTM if not causal: model_backwards = LSTM(n_nodes, return_sequences=True, go_backwards=True)(inputs) model = Merge(mode="concat")([model, model_backwards]) model = TimeDistributed(Dense(n_classes, activation="softmax"))(model) model = Model(input=inputs, output=model) model.compile(optimizer=optimizer, loss=loss, sample_weight_mode="temporal", metrics=['accuracy']) if return_param_str: param_str = "LSTM_N{}".format(n_nodes) if causal: param_str += "_causal" return model, param_str else: return model
mit
2,949,897,003,949,492,000
34.919403
131
0.594449
false
agarsev/grafeno
grafeno/transformers/concept_class.py
1
1056
from grafeno.transformers.wordnet import Transformer as WNGet class Transformer (WNGet): '''Finds the wordnet-defined `class' of a concept. Parameters ---------- concept_class_hypernyms : bool If True, a new node is added with the class concept, related to the original node by an ``HYP'' edge. ''' def __init__ (self, concept_class_hypernyms = True, **kwds): super().__init__(**kwds) self.__hyper = concept_class_hypernyms def post_process (self): super().post_process() for n in self.nodes.values(): ss = n.get('synset') if ss: concept_class = ss.lexname().split('.')[1] if concept_class and concept_class != 'Tops': n['class'] = concept_class if self.__hyper: chyp = { 'concept': concept_class } if 'sempos' in n: chyp['sempos'] = n['sempos'] self.sprout(n['id'], 'HYP', chyp)
agpl-3.0
6,876,593,098,635,753,000
35.413793
75
0.507576
false
cvxopt/chompack
examples/symbolic_factorization.py
1
1261
from cvxopt import spmatrix, printing, amd import chompack as cp printing.options['width'] = 17 # Define sparse matrix I = range(17) + [2,2,3,3,4,14,4,14,8,14,15,8,15,7,8,14,8,14,14,15,10,12,13,16,12,13,16,12,13,15,16,13,15,16,15,16,15,16,16] J = range(17) + [0,1,1,2,2,2,3,3,4,4,4,5,5,6,6,6,7,7,8,8,9,9,9,9,10,10,10,11,11,11,11,12,12,12,13,13,14,14,15] A = spmatrix(1.0,I,J,(17,17)) # Test if A is chordal p = cp.maxcardsearch(A) print("\nMaximum cardinality search") print(" -- perfect elimination order:"), cp.peo(A,p) # Test if natural ordering 0,1,2,...,17 is a perfect elimination order p = range(17) print("\nNatural ordering") print(" -- perfect elimination order:"), cp.peo(A,p) p = amd.order(A) print("\nAMD ordering") print(" -- perfect elimination order:"), cp.peo(A,p) # Compute a symbolic factorization symb = cp.symbolic(A, p) print("\nSymbolic factorization:") print("Fill :"), sum(symb.fill) print("Number of cliques :"), symb.Nsn print(symb) # Compute a symbolic factorization with clique merging symb2 = cp.symbolic(A, p, merge_function = cp.merge_size_fill(3,3)) print("Symbolic factorization with clique merging:") print("Fill (fact.+merging) :"), sum(symb2.fill) print("Number of cliques :"), symb2.Nsn print(symb2)
gpl-3.0
-2,211,677,090,977,100,500
34.027778
123
0.679619
false
mikemoorester/ESM
thesis/chapter_esm_modelling.py
1
1912
from __future__ import division, print_function, absolute_import import numpy as np import matplotlib.pyplot as plt #from scipy.stats.stats import pearsonr,kendalltau #vel_light = 299792458.0 #fL1 = 10.23e6*77.*2. #fL2 = 10.23e6*60.*2. #wL1 = vel_light/fL1 #wL2 = vel_light/fL2 #lcl1 = 1./(1.-(fL2/fL1)**2) #lcl2 = -(fL2/fL1)/(1.-(fL2/fL1)**2) def plotFontSize(ax,fontsize=8): for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(fontsize) return ax def pcoBias(args): fig = plt.figure(figsize=(3.62, 2.76)) fig.canvas.set_window_title('pco_pcv_correlation') ax = fig.add_subplot(111) plt.rc('text', usetex=True) nadir = np.linspace(0,14,141) for dr in [0.1, 0.5, 1.0]: dpcv = -dr *(1 - np.cos(np.radians(nadir))) ax.plot(nadir,dpcv) ax.set_ylabel('\Delta' ' Satellite PCV (m)') ax.set_xlabel('Nadir angle' r'($\displaystyle^\circ$)')# ($^\circ$)') ax.set_xlim([0,14]) ax.legend([r'$\Delta r$ = 0.1 m', r'$\Delta r$ = 0.5 m',r'$\Delta r$ = 1.0 m'],fontsize=8,loc='best') ax = plotFontSize(ax,8) plt.tight_layout() if args.plot_save: plt.savefig('pco_pcv_correlation.eps') plt.close() return 1 if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(prog='chapter_esm_modelling',description='Create some basic plots for thesis chapter') parser.add_argument("--pco", dest="pco",action="store_true",default=False, help="Produce a plot of pco bias appearing as a pcv bias (correlation)") parser.add_argument("--ps","--plot_save", dest="plot_save",action="store_true",default=False, help="Save plots in eps format") args = parser.parse_args() if args.pco: pcoBias(args) if not args.plot_save: plt.show()
mit
-856,222,044,414,287,900
29.83871
123
0.610879
false
scdsr/ptt-webcrawler
main.py
1
1556
#!/usr/bin/python3 #coding:utf-8 import requests from bs4 import BeautifulSoup import lib.CalWords as CalWords import lib.url2aid as url2aid # Basic config ## Board name searchBoard = 'https://www.ptt.cc/bbs/test/index.html' ## Pages to crawl searchPages = 1 ## Words required lessNum = 30 res = requests.get(searchBoard) mainSoup = BeautifulSoup(res.text, "html.parser") targetURLs = [] for aclass in mainSoup.select('a'): if aclass.text == '‹ 上頁': targetURLs = ['https://www.ptt.cc' + aclass['href']] startnum = int(targetURLs[0][-6]) targetURLs = [targetURLs[0][:-6] + str(i) + targetURLs[0][-5:] for i in range(startnum + 1, startnum - searchPages + 1, -1)] articleURLs = [] # Get urls for searching for url in targetURLs: res = requests.get(url) soup = BeautifulSoup(res.text, "html.parser") htmltext = '' for entry in soup.select('.r-ent'): htmltext += str(entry) asoup = BeautifulSoup(htmltext, "html.parser") for a in asoup.find_all('a', href=True): articleURLs += ['https://www.ptt.cc' + a['href']] # Print out results try: for url in articleURLs: # Get article uid fn = url[-23:-5] aid = url2aid.url2aid(fn) if CalWords.calculate_words(url)[1] < lessNum: authorEnd = CalWords.calculate_words(url)[0].index(u'(') print(aid, CalWords.calculate_words(url)[0][0:authorEnd], str(CalWords.calculate_words(url)[1]) + ' characters') except: print('No more article less than', lessNum, '!')
mit
668,714,726,556,833,400
27.181818
75
0.634194
false
bartdag/ghmiles
ghmiles.py
1
16809
''' ghmiles generates a milestones model from the list of issues in a github repository. :copyright: Copyright 2011 Barthelemy Dagenais :license: BSD, see LICENSE for details ''' # Necessary for monkey patching from github2.request import GithubRequest from github2.users import Users from github2.repositories import Repositories from github2.commits import Commits import time import sys from github2.issues import Issues, Issue from github2.client import Github import datetime import StringIO import re #### MONKEY PATCH github2 #### def list_by_label(self, project, label): """Get all issues for project' with label'. ``project`` is a string with the project owner username and repository name separated by ``/`` (e.g. ``ask/pygithub2``). ``label`` is a string representing a label (e.g., ``bug``). """ return self.get_values("list", project, "label", label, filter="issues", datatype=Issue) def list_labels(self, project): """Get all labels for project'. ``project`` is a string with the project owner username and repository name separated by ``/`` (e.g. ``ask/pygithub2``). """ return self.get_values("labels", project, filter="labels") def gh_init(self, username=None, api_token=None, debug=False, requests_per_minute=None, access_token=None): self.debug = debug self.request = GithubRequest(username=username, api_token=api_token, debug=self.debug, access_token=access_token, requests_per_minute=requests_per_minute) self.issues = Issues(self.request) self.users = Users(self.request) self.repos = Repositories(self.request) self.commits = Commits(self.request) def gr_init(self, username=None, api_token=None, url_prefix=None, debug=False, requests_per_minute=None, access_token=None): """ Make an API request. """ self.username = username self.api_token = api_token self.access_token = access_token self.url_prefix = url_prefix self.debug = debug if requests_per_minute is not None: self.requests_per_minute = requests_per_minute self.requests_count = 0 self.delay = 60.0 else: self.delay = 0 self.last_request = datetime.datetime(1900, 1, 1) if not self.url_prefix: self.url_prefix = self.url_format % { "github_url": self.github_url, "api_version": self.api_version, "api_format": self.api_format, } def gr_make_request(self, path, extra_post_data=None, method="GET"): # WARNING: THIS CODE IS NOT THREAD SAFE!!!! new_round = False if self.delay: since_last = (datetime.datetime.now() - self.last_request) since_last_seconds = (since_last.days * 24 * 60 * 60) + since_last.seconds + (since_last.microseconds/1000000.0) if since_last_seconds > self.delay: self.requests_count = 1 new_round = True elif self.requests_count >= self.requests_per_minute: duration = self.delay - since_last_seconds if self.debug: sys.stderr.write("delaying API call %s\n" % duration) time.sleep(duration) self.requests_count = 1 new_round = True else: self.requests_count += 1 extra_post_data = extra_post_data or {} url = "/".join([self.url_prefix, path]) result = self.raw_request(url, extra_post_data, method=method) if self.delay and new_round: self.last_request = datetime.datetime.now() return result Issues.list_by_label = list_by_label Issues.list_labels = list_labels GithubRequest.__init__ = gr_init GithubRequest.make_request = gr_make_request Github.__init__ = gh_init #### CONSTANTS #### MILESTONE_LABEL_V = re.compile(r'''^v\d+(?:\.\d+)*$''') '''Regex used to identify milestone labels of the form v0.1''' MILESTONE_LABEL_NUM = re.compile(r'''^\d+(?:\.\d+)*$''') '''Regex used to identify numerical milestone labels of the form 0.1''' MILESTONE_LABEL_V_RELAX = re.compile(r'''^v\d+(?:\.\d+)*''') '''Regex used to identify milestone labels of the form v0.1''' MILESTONE_LABEL_NUM_RELAX = re.compile(r'''^\d+(?:\.\d+)*''') '''Regex used to identify numerical milestone labels of the form 0.1''' SIMPLE_HTML_HEADER = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>{0} Roadmap</title> </head> <body> <h1>{0} Roadmap</h1> ''' SIMPLE_HTML_FOOTER = ''' <hr/> <p> Generated by <a href="https://github.com/bartdag/ghmiles">ghmiles</a> on {0}. </p> <p> <a href="http://validator.w3.org/check?uri=referer"><img src="http://www.w3.org/Icons/valid-xhtml10" alt="Valid XHTML 1.0 Strict" height="31" width="88" /></a> </p> </body> </html>''' FANCY_HTML_HEADER = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>{0} Roadmap</title> <link type="text/css" rel="stylesheet" href="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/themes/ui-lightness/jquery-ui.css"/> <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.5.0/jquery.min.js"></script> <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/jquery-ui.min.js"></script> <script type="text/javascript"> $(function() {{ $(".details").click(function(e) {{ $(e.target).parent().next().fadeToggle("fast", "linear"); }}); }}); </script> <style type="text/css"> #doc {{ margin:auto; width: 960px; font-family: Futura, "Century Gothic", AppleGothic, sans-serif; }} h1 {{ color: #1049A9; }} h2 {{ color: #1049A9; margin-top: 50px; }} a {{ color: #A65100; }} a:hover {{ color: #1049A9; }} .pb {{ width: 480px; font-size: 50%; }} .pb_label {{ float: right; font-size: 200%; font-weight: bold; }} .issues_list {{ list-style-type: none; display: none; }} .issues_list li a {{ font-weight: bold; }} .tickets {{ font-size: 0.7em; font-style: italic; }} .tickets dt {{ display: inline; margin-left: 1em; }} .tickets dd {{ display: inline; margin: 0 1.5em 0 0.5em; }} #hd, #ft {{ height: 40px; }} #ft {{ border-top: 1px solid #ccc; }} #hd {{ text-align: center; }} </style> </head> <body id="doc"> <div id="hd"> <h1>{0} Roadmap</h1> </div> <div id="main"> ''' FANCY_HTML_FOOTER = ''' </div> <div id="ft"> <p> Generated by <a href="https://github.com/bartdag/ghmiles">ghmiles</a> on {0}. </p> <p> <a href="http://validator.w3.org/check?uri=referer"><img src="http://www.w3.org/Icons/valid-xhtml10" alt="Valid XHTML 1.0 Strict" height="31" width="88" /></a> </p> </div> </body> </html>''' #### MILESTONE MODEL ##### class Milestone(object): def __init__(self, title, issues): self.title = title self.issues = issues self.issues.sort(key=lambda item: int(item.number)) self.total = len(issues) self.opened = sum((1 for issue in issues if issue.state == 'open')) self.closed = self.total - self.opened self.progress = float(self.closed) * 100.0 / float(self.total) def __repr__(self): return '<Milestone: {0}, {1} issues, {2:.2f}% completed>'.format( self.title, self.total, self.progress) def label_key(label, padding=5): '''Returns a padded key from a label representing a milestone number. All parts of a label that are numbers are padded so that alphabetical sorting can work as expected (e.g., '2.0' < '11.0'). For example, this function will return 'v00001.00022e-00123b' if label = 'v1.22e-123b'. :param label: the milestone label :param padding: the maximum number of characters for each numeric part. Default=5 :return: a key that can be used in alphabetical sorting ''' key = prefix = '' components = [] in_prefix = True current_number = current_suffix = '' for c in label: if not c.isdigit(): if in_prefix: prefix += c else: current_suffix += c else: if in_prefix: in_prefix = False if current_suffix != '': components.append((current_number, current_suffix)) current_number = current_suffix = '' current_number += c if not in_prefix and current_number != '': components.append((current_number, current_suffix)) key = prefix for component in components: key += component[0].rjust(padding,'0') + component[1] return key def get_milestone_labels(project, milestone_regex, reverse=True, github=None): if github is None: github = Github(requests_per_minute=60) labels = sorted(github.issues.list_labels(project), key=label_key, reverse=reverse) project_labels = (label for label in labels if milestone_regex.match(label)) return project_labels def get_intel_milestone_labels(project, reverse=True, github=None): if github is None: github = Github(requests_per_minute=60) labels = sorted(github.issues.list_labels(project), key=label_key, reverse=reverse) regexes = [MILESTONE_LABEL_NUM, MILESTONE_LABEL_NUM_RELAX, MILESTONE_LABEL_V, MILESTONE_LABEL_V_RELAX] max_labels = 0 limit = len(labels) project_labels = [] for regex in regexes: temp_labels = [label for label in labels if regex.match(label)] size = len(temp_labels) if size > max_labels: project_labels = temp_labels max_labels = size if size == limit: break return (project_labels, labels) def get_milestone(project, milestone_label, github=None): if github is None: github = Github(requests_per_minute=60) issues = github.issues.list_by_label(project, milestone_label) return Milestone(milestone_label, issues) def get_milestones(project, milestone_regex, reverse=True, github=None): '''Generates a list of milestones for a github project :param project: a string of the form `user/project` :param milestone_regex: a regular expression used to identify the labels representing milestones. :param reverse: If True (default), sort the milestones from the highest number to the lowest. Oppositive if False. :param github: a Github client (optional). :return: A generator (iterator) of milestones. ''' if github is None: github = Github(requests_per_minute=60) labels = get_milestone_labels(project, milestone_regex, reverse, github) milestones = (get_milestone(project, label, github) for label in labels) return milestones def get_milestones_from_labels(project, labels, github=None): '''Generates a list of milestones from the specified issue labels of a github project. This can be used to generate a milestone model for recent milestones only. :param project: a string of the form `user/project` :param labels: a list of labels used to generate milestones. :param github: a Github client (optional). :return: A generator (iterator) of milestones. ''' if github is None: github = Github(requests_per_minute=60) milestones = (get_milestone(project, label, github) for label in labels) return milestones #### HTML GENERATION #### def write_simple_html_milestones(milestones, output): for milestone in milestones: output.write('<h2>Milestone: {0}</h2>\n'.format(milestone.title)) output.write('<p><strong>Progress: {0}%</strong></p>' .format(milestone.progress)) output.write('<p><em>Number of tickets: ') output.write('closed: {0} active: {1} total: {2}</em></p>\n' .format(milestone.closed, milestone.opened, milestone.total)) output.write('<p>Issues:</p>\n<ul>\n') for issue in milestone.issues: output.write('<li> #{0} {1} <em>{2}</em></li>\n' .format(issue.number, issue.title, issue.state)) output.write('</ul>\n') def get_simple_html_page(milestones, project_name = 'GitHub Project', save_path=None, header=SIMPLE_HTML_HEADER, footer=SIMPLE_HTML_FOOTER): '''Generates a simple HTML page similar to a Trac roadmap. :param milestones: a list (or iterator) of milestones. :param project_name: a human-readable project name. (optional) :param save_path: the output path used to save the HTML page. If None, a string containing the HTML page will be returned instead. :param header: the HTML header used to generate the HTML page. (optional) :param footer: the HTML footer used to generate the HTML page. (optional) :return: None if a save_path is provided, an HTML string otherwise. ''' return_value = None if save_path is None: output = StringIO.StringIO() else: output = open(save_path, 'w') output.write(header.format(project_name)) write_simple_html_milestones(milestones, output) output.write(footer.format(str(datetime.datetime.now()))) if save_path is None: return_value = output.getvalue() output.close() return return_value def write_fancy_html_milestones(milestones, project, output): for milestone in milestones: new_title = milestone.title.replace('.','--') progress = int(milestone.progress) output.write('<a name="{0}"></a>'.format(milestone.title)) output.write('<h2>Milestone: {0}</h2>\n'.format(milestone.title)) output.write(''' <script type="text/javascript"> $(function() {{ $("#progressbar{0}").progressbar({{value: {1} }}); }}); </script> '''.format(new_title,progress)) output.write(''' <div class="pb"> <div id="progressbar{0}"></div> <div class="pb_label">{1}%</div> </div> '''.format(new_title, progress)) output.write(''' <dl class="tickets"> <dt>Number of tickets:</dt><dd></dd> <dt>closed:</dt> <dd>{0}</dd> <dt>active:</dt> <dd>{1}</dd> <dt>total:</dt> <dd>{2}</dd> </dl> '''.format(milestone.closed, milestone.opened, milestone.total)) output.write('<p><a href="#{0}" class="details">' .format(milestone.title)) output.write('List of Issues:</a></p>\n') output.write('<ul class="issues_list">\n') for issue in milestone.issues: output.write( '<li><a href="https://github.com/{0}/issues/{1}">#{1}</a>' .format(project, issue.number)) output.write(' {0}'.format(issue.title)) output.write(' <strong>- {0}</strong></li>\n'.format(issue.state)) output.write('</ul>\n') def get_fancy_html_page(milestones, project, project_name = None, save_path=None, header=FANCY_HTML_HEADER, footer=FANCY_HTML_FOOTER): '''Generates a fancy HTML page similar to a Trac roadmap. :param milestones: a list (or iterator) of milestones. :param project: a string of the form `user/project` :param project_name: a human-readable project name. (optional) :param save_path: the output path used to save the HTML page. If None, a string containing the HTML page will be returned instead. :param header: the HTML header used to generate the HTML page. (optional) :param footer: the HTML footer used to generate the HTML page. (optional) :return: None if a save_path is provided, an HTML string otherwise. ''' return_value = None if project_name is None: project_name = project.split('/')[1] if save_path is None: output = StringIO.StringIO() else: output = open(save_path, 'w') output.write(header.format(project_name)) write_fancy_html_milestones(milestones, project, output) output.write(footer.format(str(datetime.datetime.now()))) if save_path is None: return_value = output.getvalue() output.close() return return_value
bsd-3-clause
4,907,401,982,529,198,000
31.017143
135
0.616277
false
Scille/parsec-cloud
tests/backend/user/test_user_create.py
1
21387
# Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS import pytest import pendulum from parsec.backend.user import INVITATION_VALIDITY, User, Device from parsec.api.data import UserCertificateContent, DeviceCertificateContent, UserProfile from parsec.api.protocol import DeviceID from tests.common import freeze_time from tests.backend.common import user_get, user_create @pytest.mark.trio @pytest.mark.parametrize( "profile,with_labels", [(profile, profile != UserProfile.STANDARD) for profile in UserProfile] ) async def test_user_create_ok( backend, backend_sock_factory, alice_backend_sock, alice, mallory, profile, with_labels ): now = pendulum.now() user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=profile, ) redacted_user_certificate = user_certificate.evolve(human_handle=None) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ) redacted_device_certificate = device_certificate.evolve(device_label=None) if not with_labels: user_certificate = redacted_user_certificate device_certificate = redacted_device_certificate user_certificate = user_certificate.dump_and_sign(alice.signing_key) device_certificate = device_certificate.dump_and_sign(alice.signing_key) redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=redacted_user_certificate, redacted_device_certificate=redacted_device_certificate, ) assert rep == {"status": "ok"} # Make sure mallory can connect now async with backend_sock_factory(backend, mallory) as sock: rep = await user_get(sock, user_id=mallory.user_id) assert rep["status"] == "ok" # Check the resulting data in the backend backend_user, backend_device = await backend.user.get_user_with_device( mallory.organization_id, mallory.device_id ) assert backend_user == User( user_id=mallory.user_id, human_handle=mallory.human_handle if with_labels else None, profile=profile, user_certificate=user_certificate, redacted_user_certificate=redacted_user_certificate, user_certifier=alice.device_id, created_on=now, ) assert backend_device == Device( device_id=mallory.device_id, device_label=mallory.device_label if with_labels else None, device_certificate=device_certificate, redacted_device_certificate=redacted_device_certificate, device_certifier=alice.device_id, created_on=now, ) @pytest.mark.trio async def test_user_create_invalid_certificate(alice_backend_sock, alice, bob, mallory): now = pendulum.now() good_user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(alice.signing_key) good_device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ).dump_and_sign(alice.signing_key) bad_user_certificate = UserCertificateContent( author=bob.device_id, timestamp=now, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(bob.signing_key) bad_device_certificate = DeviceCertificateContent( author=bob.device_id, timestamp=now, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ).dump_and_sign(bob.signing_key) for cu, cd in [ (good_user_certificate, bad_device_certificate), (bad_user_certificate, good_device_certificate), (bad_user_certificate, bad_device_certificate), ]: rep = await user_create( alice_backend_sock, user_certificate=cu, device_certificate=cd, redacted_user_certificate=good_user_certificate, redacted_device_certificate=good_device_certificate, ) assert rep == { "status": "invalid_certification", "reason": "Invalid certification data (Signature was forged or corrupt).", } # Same thing for the redacted part for cu, cd in [ (good_user_certificate, bad_device_certificate), (bad_user_certificate, good_device_certificate), (bad_user_certificate, bad_device_certificate), ]: rep = await user_create( alice_backend_sock, user_certificate=good_user_certificate, device_certificate=good_device_certificate, redacted_user_certificate=cu, redacted_device_certificate=cd, ) assert rep == { "status": "invalid_certification", "reason": "Invalid certification data (Signature was forged or corrupt).", } @pytest.mark.trio async def test_user_create_not_matching_user_device(alice_backend_sock, alice, bob, mallory): now = pendulum.now() user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(alice.signing_key) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=bob.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ).dump_and_sign(alice.signing_key) rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=device_certificate, ) assert rep == { "status": "invalid_data", "reason": "Device and User must have the same user ID.", } @pytest.mark.trio async def test_user_create_bad_redacted_device_certificate(alice_backend_sock, alice, mallory): now = pendulum.now() user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=mallory.user_id, human_handle=None, # Can be used as regular and redacted certificate public_key=mallory.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(alice.signing_key) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ) good_redacted_device_certificate = device_certificate.evolve(device_label=None) device_certificate = device_certificate.dump_and_sign(alice.signing_key) for bad_redacted_device_certificate in ( good_redacted_device_certificate.evolve(timestamp=now.add(seconds=1)), good_redacted_device_certificate.evolve(device_id=alice.device_id), good_redacted_device_certificate.evolve(verify_key=alice.verify_key), ): rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=bad_redacted_device_certificate.dump_and_sign( alice.signing_key ), ) assert rep == { "status": "invalid_data", "reason": "Redacted Device certificate differs from Device certificate.", } # Missing redacted certificate is not allowed as well rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=None, ) assert rep == { "status": "bad_message", "reason": "Invalid message.", "errors": {"redacted_device_certificate": ["Missing data for required field."]}, } # Finally just make sure good was really good rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=good_redacted_device_certificate.dump_and_sign( alice.signing_key ), ) assert rep == {"status": "ok"} @pytest.mark.trio async def test_user_create_bad_redacted_user_certificate(alice_backend_sock, alice, mallory): now = pendulum.now() device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=mallory.device_id, device_label=None, # Can be used as regular and redacted certificate verify_key=mallory.verify_key, ).dump_and_sign(alice.signing_key) user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ) good_redacted_user_certificate = user_certificate.evolve(human_handle=None) user_certificate = user_certificate.dump_and_sign(alice.signing_key) for bad_redacted_user_certificate in ( good_redacted_user_certificate.evolve(timestamp=now.add(seconds=1)), good_redacted_user_certificate.evolve(user_id=alice.user_id), good_redacted_user_certificate.evolve(public_key=alice.public_key), good_redacted_user_certificate.evolve(profile=UserProfile.OUTSIDER), ): rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=bad_redacted_user_certificate.dump_and_sign( alice.signing_key ), redacted_device_certificate=device_certificate, ) assert rep == { "status": "invalid_data", "reason": "Redacted User certificate differs from User certificate.", } # Missing redacted certificate is not allowed as well rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=None, redacted_device_certificate=device_certificate, ) assert rep == { "status": "bad_message", "reason": "Invalid message.", "errors": {"redacted_user_certificate": ["Missing data for required field."]}, } # Finally just make sure good was really good rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=good_redacted_user_certificate.dump_and_sign(alice.signing_key), redacted_device_certificate=device_certificate, ) assert rep == {"status": "ok"} @pytest.mark.trio async def test_user_create_already_exists(alice_backend_sock, alice, bob): now = pendulum.now() user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=bob.user_id, human_handle=None, public_key=bob.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(alice.signing_key) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=bob.device_id, device_label=None, verify_key=bob.verify_key, ).dump_and_sign(alice.signing_key) rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=device_certificate, ) assert rep == {"status": "already_exists", "reason": f"User `{bob.user_id}` already exists"} @pytest.mark.trio async def test_user_create_human_handle_already_exists(alice_backend_sock, alice, bob): now = pendulum.now() bob2_device_id = DeviceID("bob2@dev1") user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=bob2_device_id.user_id, human_handle=bob.human_handle, public_key=bob.public_key, profile=UserProfile.STANDARD, ) redacted_user_certificate = user_certificate.evolve(human_handle=None) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=bob2_device_id, device_label="dev2", verify_key=bob.verify_key, ) redacted_device_certificate = device_certificate.evolve(device_label=None) user_certificate = user_certificate.dump_and_sign(alice.signing_key) redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) device_certificate = device_certificate.dump_and_sign(alice.signing_key) redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=redacted_user_certificate, redacted_device_certificate=redacted_device_certificate, ) assert rep == { "status": "already_exists", "reason": f"Human handle `{bob.human_handle}` already corresponds to a non-revoked user", } @pytest.mark.trio async def test_user_create_human_handle_with_revoked_previous_one( alice_backend_sock, alice, bob, backend_data_binder ): # First revoke bob await backend_data_binder.bind_revocation(user_id=bob.user_id, certifier=alice) # Now recreate another user with bob's human handle now = pendulum.now() bob2_device_id = DeviceID("bob2@dev1") user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=bob2_device_id.user_id, human_handle=bob.human_handle, public_key=bob.public_key, profile=UserProfile.STANDARD, ) redacted_user_certificate = user_certificate.evolve(human_handle=None) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=bob2_device_id, device_label=bob.device_label, # Device label doesn't have to be unique verify_key=bob.verify_key, ) redacted_device_certificate = device_certificate.evolve(device_label=None) user_certificate = user_certificate.dump_and_sign(alice.signing_key) redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) device_certificate = device_certificate.dump_and_sign(alice.signing_key) redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=redacted_user_certificate, redacted_device_certificate=redacted_device_certificate, ) assert rep == {"status": "ok"} @pytest.mark.trio async def test_user_create_not_matching_certified_on(alice_backend_sock, alice, mallory): date1 = pendulum.datetime(2000, 1, 1) date2 = date1.add(seconds=1) user_certificate = UserCertificateContent( author=alice.device_id, timestamp=date1, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(alice.signing_key) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=date2, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ).dump_and_sign(alice.signing_key) with freeze_time(date1): rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=device_certificate, ) assert rep == { "status": "invalid_data", "reason": "Device and User certificates must have the same timestamp.", } @pytest.mark.trio async def test_user_create_certificate_too_old(alice_backend_sock, alice, mallory): too_old = pendulum.datetime(2000, 1, 1) now = too_old.add(seconds=INVITATION_VALIDITY + 1) user_certificate = UserCertificateContent( author=alice.device_id, timestamp=too_old, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ).dump_and_sign(alice.signing_key) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=too_old, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ).dump_and_sign(alice.signing_key) with freeze_time(now): rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=device_certificate, ) assert rep == { "status": "invalid_certification", "reason": "Invalid timestamp in certificate.", } @pytest.mark.trio async def test_user_create_author_not_admin(backend, bob_backend_sock): # No need for valid certificate given given access right should be # checked before payload deserialization rep = await user_create( bob_backend_sock, user_certificate=b"<user_certificate>", device_certificate=b"<device_certificate>", redacted_user_certificate=b"<redacted_user_certificate>", redacted_device_certificate=b"<redacted_device_certificate>", ) assert rep == {"status": "not_allowed", "reason": "User `bob` is not admin"} @pytest.mark.trio async def test_redacted_certificates_cannot_contain_sensitive_data( alice_backend_sock, alice, mallory ): now = pendulum.now() user_certificate = UserCertificateContent( author=alice.device_id, timestamp=now, user_id=mallory.user_id, human_handle=mallory.human_handle, public_key=mallory.public_key, profile=UserProfile.STANDARD, ) redacted_user_certificate = user_certificate.evolve(human_handle=None) device_certificate = DeviceCertificateContent( author=alice.device_id, timestamp=now, device_id=mallory.device_id, device_label=mallory.device_label, verify_key=mallory.verify_key, ) redacted_device_certificate = device_certificate.evolve(device_label=None) user_certificate = user_certificate.dump_and_sign(alice.signing_key) device_certificate = device_certificate.dump_and_sign(alice.signing_key) redacted_user_certificate = redacted_user_certificate.dump_and_sign(alice.signing_key) redacted_device_certificate = redacted_device_certificate.dump_and_sign(alice.signing_key) with freeze_time(now): rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=user_certificate, redacted_device_certificate=redacted_device_certificate, ) assert rep == { "status": "invalid_data", "reason": "Redacted User certificate must not contain a human_handle field.", } rep = await user_create( alice_backend_sock, user_certificate=user_certificate, device_certificate=device_certificate, redacted_user_certificate=redacted_user_certificate, redacted_device_certificate=device_certificate, ) assert rep == { "status": "invalid_data", "reason": "Redacted Device certificate must not contain a device_label field.", }
agpl-3.0
2,347,862,217,895,652,000
37.05516
98
0.667275
false
neversakura/EE511_Fall2016
Basic Examples/SolvingLinearEquations.py
1
2763
# This is a simple example using numpy to solve a linear system of the form # A*x=B # You can find more details on the following webpage: # http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.solve.html # Author: Huo Chen # Revison History # =========================================================================== # First commit 8/28/2016 # Singular matrix 9/6/2016 import numpy as np def nullspace(A, atol=1e-13, rtol=0): """Compute an approximate basis for the nullspace of A. The algorithm used by this function is based on the singular value decomposition of `A`. Parameters ---------- A : ndarray A should be at most 2-D. A 1-D array with length k will be treated as a 2-D with shape (1, k) atol : float The absolute tolerance for a zero singular value. Singular values smaller than `atol` are considered to be zero. rtol : float The relative tolerance. Singular values less than rtol*smax are considered to be zero, where smax is the largest singular value. If both `atol` and `rtol` are positive, the combined tolerance is the maximum of the two; that is:: tol = max(atol, rtol * smax) Singular values smaller than `tol` are considered to be zero. Return value ------------ ns : ndarray If `A` is an array with shape (m, k), then `ns` will be an array with shape (k, n), where n is the estimated dimension of the nullspace of `A`. The columns of `ns` are a basis for the nullspace; each element in numpy.dot(A, ns) will be approximately zero. """ A = np.atleast_2d(A) u, s, vh = np.lin.svd(A) tol = max(atol, rtol * s[0]) nnz = (s >= tol).sum() ns = vh[nnz:].conj().T return ns A=np.random.randint(10,size=(3,3)) B=np.random.randint(10,size=(3,)) C=np.concatenate((A,np.transpose([B])),axis=1) print('The linear system has the form of A*x=B, where A is:') print(A) print('B is:') print(B) # if matrix A is singluar, we have two situations-- infinite many solutions # or no solution at all if np.linalg.matrix_rank(A)!=3 and np.linalg.matrix_rank(C) == np.linalg.matrix_rank(A): print('The matrix is singular and there are infinite number of solutions.') print('The homogeneous solution is:') print(nullspace(A)) print('The particular solution is:') print(np.linalg.lstsq(A,B)[0]) elif np.linalg.matrix_rank(A)!=3 and np.linalg.matrix_rank(C) != np.linalg.matrix_rank(A): print('Matrix A is singular and there is no proper solution.') else: print('The solution x is:') x=np.linalg.solve(A,B) print(x)
mit
954,687,164,481,706,400
33.111111
90
0.612378
false
Marcerisson/his-dudeness
graphalama/screen.py
1
4929
# -*- coding: utf-8 -*- import random import pygame import os import graphalama from graphalama.borg import Borg from graphalama import fonts as f from graphalama.color import Color from graphalama.rectangle import Rectangle from graphalama.text import Text from graphalama.CONSTANTS import * path = str(os.path.dirname(graphalama.__file__)).replace('\\', '/') + '/assets/img/' wait_party_images = [pygame.image.load(path + 'wait_party/bunch_of_mms.png')] # we load the wait party image icon_image = pygame.image.load(path + 'logo.png') # we load the logo image borg_baby = Borg() def new_display(name='Test with Graphalama', size=(0, 0), full_screen=False, icon_path='icon.png'): if not full_screen: display = pygame.display.set_mode(size, pygame.RESIZABLE | pygame.SRCALPHA) # window creation w/o fullscreen else: display = pygame.display.set_mode([0, 0], pygame.FULLSCREEN | pygame.SRCALPHA) # window creation w/ fullscreen pygame.display.set_caption(name + ' ' + str(Borg().version)) # the window title try: icon_image = pygame.image.load(icon_path) # we load the logo image pygame.display.set_icon(icon_image) # the logo (task bar) except pygame.error: print('Icon file not found or unreadable') Borg().SCREEN_SIZE = display.get_size() return display def resize(display, inputs): """ :param display: The pygame display, created with new_display() :param inputs: An Inputs object :return: """ screen_size_before = borg_baby.SCREEN_SIZE if inputs['F12']['just pressed']: inputs['screen']['fullscreen'] = not inputs['screen']['fullscreen'] if not inputs['screen']['fullscreen']: display = pygame.display.set_mode([800, 500], pygame.RESIZABLE | pygame.SRCALPHA) else: display = pygame.display.set_mode([0, 0], pygame.FULLSCREEN | pygame.SRCALPHA) elif inputs['screen']['size'] != borg_baby.SCREEN_SIZE: display = pygame.display.set_mode(inputs['screen']['size'], pygame.RESIZABLE | pygame.SRCALPHA) inputs['screen']['change'] = False borg_baby.SCREEN_SIZE = display.get_size() if screen_size_before != borg_baby.SCREEN_SIZE: inputs['screen']['size'] = borg_baby.SCREEN_SIZE def wait_party(surface): max_w = surface.get_width() h = surface.get_height() def color_mode_function(color): color.do_rainbow(random.randint(20, 50)) return color.rainbow color = Color(WHITE, my_rainbow=color_mode_function) color.mode = 'my_rainbow' bpoints = [0] hpoints = [0] allc = list() while bpoints[-1] < max_w or hpoints[-1] < max_w: bpoints.append(bpoints[-1] + random.randint(20, 100)) hpoints.append(hpoints[-1] + random.randint(20, 100)) allc.append(color.RGB) points = ((hpoints[-2], 0), (hpoints[-1], 0), (bpoints[-1], h), (bpoints[-2], h)) pygame.draw.polygon(surface, allc[-1], points) for c, hx, bx in zip(allc, bpoints, hpoints): pygame.draw.aaline(surface, c, (bx, 0), (hx, h)) pygame.display.update() # TODO : Faire retourner ça chez lui, dans Hubert. Et modifier le fonctionnement avec une Queue. # Pour plus d'info, envoyez QUEUE au 06 95 40 21 62 ! def error_screen(display, inputs, error, fatal_error=False): wait_party(display) log_rect = Rectangle(0.1, 0.25, 0.8, 0.5, WHITE, border=(RED, 1)) text_1_error = 'An error occured :', 'en' text_2_error = 'Please press enter to continue', 'en' error_1_text = Text(text_1_error, (0.15, 0.28, 0.7, 0.05), f.Font(f.Calibri, 1, True), color=RED, anchor=('tc', 0, 0)) error_2_text = Text(str(error), (0.15, 0.37, 0.7, 0.05), f.Font(f.Calibri, 1, True), anchor=('tc', 0, 0), color=RED) error_3_text = Text(text_2_error, (0.15, 0.49, 0.7, 0.05), f.Font(f.Calibri, 1, True), anchor=('tc', 0, 0), color=RED) valid_button = Text('Ok :/', (0.4, 0.62, 0.2, 0.1, L_BLUE, ROUNDED, 0.5), f.Font(f.Calibri, 0.3), RED, anchor=('cc', 0, 0)) enter = False while not (enter or valid_button.mouse_click_area(1)): # tant qu'on pèse pas sur Entrée if inputs['enter']['is pressed']: enter = True log_rect.render() error_1_text.render() error_2_text.render() error_3_text.render() valid_button.render() display.blit(log_rect, log_rect.real_topleft) display.blit(error_1_text, error_1_text.real_topleft) display.blit(error_2_text, error_2_text.real_topleft) display.blit(error_3_text, error_3_text.real_topleft) display.blit(valid_button, valid_button.real_topleft) yield "APOUAAAAAAAAAAAAAAAAAAAAAL nick fury ta maire de Strasbourg #rolandries" if fatal_error: quit() __all__ = ['new_display', "resize", 'wait_party', 'error_scren']
mit
-1,188,802,242,255,722,000
39.04878
119
0.627893
false
micktwomey/gamecraft-mk-iii
gamecraft/settings_local_development.py
1
1218
from gamecraft.settings import * import os DJANGO_SECRET_KEY = "foo" DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.environ.get("GAMECRAFT_SQLITE_DB", "/tmp/gamecraft.sqlite"), } } INSTALLED_APPS = INSTALLED_APPS + ( 'debug_toolbar.apps.DebugToolbarConfig', ) INTERNAL_IPS = ['127.0.0.1', 'localhost', '::1'] DEBUG_TOOLBAR_CONFIG = { "SHOW_TOOLBAR_CALLBACK": "gamecraft.utils.debug_toolbar_callback", } STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage' PIPELINE_COMPILERS = ( 'pipeline.compilers.less.LessCompiler', ) PIPELINE_CSS = { 'gamecraft': { 'source_filenames': ( 'css/gamecraft.less', 'css/leaflet.css', ), 'output_filename': 'css/gamecraft.css', }, } PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor' PIPELINE_JS = { 'gamecraft': { 'source_filenames': ( 'js/holder.js', 'js/jquery.js', 'js/bootstrap.js', 'js/leaflet.js', # 'js/react-with-addons.js', # bug in processing the '\uFEFF' in the file ), 'output_filename': 'js/gamecraft.js', }, }
mit
-4,015,671,678,422,743,000
21.981132
86
0.595238
false
mozilla/peekaboo
peekaboo/settings/base.py
1
3762
# This is your project's main settings file that can be committed to your # repo. If you need to override a setting locally, use settings_local.py from funfactory.settings_base import * # NOQA # Name of the top-level module where you put all your apps. # If you did not install Playdoh with the funfactory installer script # you may need to edit this value. See the docs about installing from a # clone. PROJECT_MODULE = 'peekaboo' USE_TZ = True TIME_ZONE = 'US/Pacific' # Defines the views served for root URLs. ROOT_URLCONF = '%s.urls' % PROJECT_MODULE INSTALLED_APPS = ( 'funfactory', 'compressor', 'django_browserid', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', 'commonware.response.cookies', 'session_csrf', # Application base, containing global templates. '%s.base' % PROJECT_MODULE, '%s.main' % PROJECT_MODULE, '%s.sheet' % PROJECT_MODULE, '%s.authentication' % PROJECT_MODULE, '%s.users' % PROJECT_MODULE, '%s.locations' % PROJECT_MODULE, 'sorl.thumbnail', 'bootstrapform', 'cronjobs', 'django.contrib.admin', 'raven.contrib.django.raven_compat', 'django_nose', # deliberately making this the last one ) LOCALE_PATHS = ( os.path.join(ROOT, PROJECT_MODULE, 'locale'), ) TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' # Because Jinja2 is the default template loader, add any non-Jinja templated # apps here: JINGO_EXCLUDE_APPS = [ 'admin', 'bootstrapform', 'browserid', ] # BrowserID configuration AUTHENTICATION_BACKENDS = [ 'django_browserid.auth.BrowserIDBackend', 'django.contrib.auth.backends.ModelBackend', ] SITE_URL = 'http://localhost:8000' LOGIN_URL = '/auth/login/' LOGIN_REDIRECT_URL = '/' LOGIN_REDIRECT_URL_FAILURE = '/auth/login/' TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.media', 'django.core.context_processors.request', 'session_csrf.context_processor', 'django.contrib.messages.context_processors.messages', 'funfactory.context_processors.globals', 'peekaboo.main.context_processors.main', ) # Should robots.txt deny everything or disallow a calculated list of URLs we # don't want to be crawled? Default is false, disallow everything. # Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710 ENGAGE_ROBOTS = False # Always generate a CSRF token for anonymous users. ANON_ALWAYS = True MIDDLEWARE_CLASSES = ( 'funfactory.middleware.LocaleURLMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'session_csrf.CsrfMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'commonware.middleware.FrameOptionsHeader', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware' ) # We're never storing any passwords so this can be anything HMAC_KEYS = {'something': 'anything'} PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',) LOGGING = dict(loggers=dict(playdoh={'level': logging.DEBUG})) SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' # Whether the picture taking part of sign in process should be enabled DEFAULT_TAKE_PICTURE = True BROWSERID_REQUEST_ARGS = {'siteName': 'Peek-a-boo!'} RECYCLE_MINIMUM_HOURS = 30 # days # Set to True if you want to keep the components that are made to generate # PDFs when printing badges DEBUG_PDF_PROGRAM = False # Default in Django is 2 weeks (1209600 = 60 * 60 * 24 * 7 * 2) SESSION_COOKIE_AGE = 60 * 60 * 24 * 365 # 1 year
mpl-2.0
6,753,622,614,826,250,000
30.35
78
0.723551
false
chadhs/idonethis-spectator
spectator.py
1
1732
#!/usr/bin/env python from flask import Flask from flask import render_template import requests import datetime import config ## configuration token = config.token team = config.team user = config.user idt_url = "https://idonethis.com" api_dones_url = "%s/api/v0.1/dones/?owner=%s&team=%s&page_size=100" % (idt_url, user, team) ## the app app = Flask(__name__) ### helpers def get_json_data(url): """ fetch dones from the iDoneThis api, return list of dones from the json response """ headers = {'content-type': 'application/json', 'authorization': 'token %s' % (token)} r = requests.get(url, headers=headers) data = r.json() dones = data['results'] return dones def fix_rel_url(dones): """ replace relative urls in markedup_text with absolute urls """ for done in dones: done['markedup_text'] = done['markedup_text'].replace("/hashtags","%s/hashtags" % (idt_url)) done['markedup_text'] = done['markedup_text'].replace("/cal","%s/cal" % (idt_url)) return dones ### urls @app.route("/") def display_dones(): startdate = datetime.date.today() - datetime.timedelta(1) enddate = datetime.date.today() - datetime.timedelta(7) url_today = "%s&done_date=today" % (api_dones_url) dones_today = get_json_data(url_today) dones_today = fix_rel_url(dones_today) url_lastweek = "%s&done_date_after=%s&done_date_before=%s" % (api_dones_url, enddate, startdate) dones_lastweek = get_json_data(url_lastweek) dones_lastweek = fix_rel_url(dones_lastweek) return render_template('dones.html', team=team, user=user, dones_today=dones_today, dones_lastweek=dones_lastweek) ## run app if __name__ == "__main__": app.run(debug=True)
bsd-3-clause
8,088,948,709,020,566,000
27.393443
118
0.658776
false
berjc/code-complete
code_complete/code_snippet_providers/github_code_snippet_provider.py
1
5232
# -*- coding: utf-8 -*- """ Encapsulates Functionality for Gathering Relevant Code Snippets from Github. """ import httplib from lxml import html import requests from abstract_code_snippet_provider import AbstractCodeSnippetProvider from utils.request_builder import RequestBuilder class GithubCodeSnippetProvider(AbstractCodeSnippetProvider): """ Encapsulates Functionality for Gathering Relevant Code Snippets from Github. """ PATH_DELIM = '/' SPACE_DELIM = '+' # The number of search pages to iterate through on Github. NUM_PAGES_TO_CHECK = 2 GITHUB_DOMAIN = 'github.com' GITHUB_SEARCH_PATH = '/search' RAW_GITHUB_USER_CONTENT_DOMAIN = 'raw.githubusercontent.com' BLOB_INDEX = 3 # Github Search Request Parameters. GITHUB_LANGUAGE_KEY = 'l' GITHUB_QUERY_KEY = 'q' GITHUB_PAGE_KEY = 'p' GITHUB_TYPE_KEY = 'type' GITHUB_TYPE_VALUE = 'Code' # The xpath for parsing snippet URLs from the Github search results page. XPATH_SNIPPET_URLS = '//div[contains(@class, "code-list-item") and contains(@class, "code-list-item-public")]' \ '//p[@class="title"]//a[@title]/@href' def __init__(self, task_description, language): """ Initializes the `GithubCodeSnippetProvider` object. :param task_description: A description of the task to complete. :type task_description: str :param language: The programming language the code snippets should be in. :type language: str """ AbstractCodeSnippetProvider.__init__(self, task_description, language) @staticmethod def _construct_raw_user_content_url_path(code_snippet_url): """ Returns the raw user content URL for the given code snippet URL. :return: The raw user content URL for the given code snippet URL. :rtype: str .. code-block:: python code_snippet_url = '/username/reponame/blob/hashvalue/path/to/file' # Returns ... '/username/reponame/hashvalue/path/to/file' """ parts_of_path = code_snippet_url.split(GithubCodeSnippetProvider.PATH_DELIM) return GithubCodeSnippetProvider.PATH_DELIM.join( parts_of_path[:GithubCodeSnippetProvider.BLOB_INDEX] + parts_of_path[GithubCodeSnippetProvider.BLOB_INDEX + 1:] ) @staticmethod def _get_code_snippets_from_snippet_urls(code_snippet_urls): """ Returns the code snippets resident at the given snippet URls. :param code_snippet_urls: A list of the URLs of code snippets related to the given task description and language. :type code_snippet_urls: list :return: A list of the code snippets resident at the given snippet URLs. :rtype: list """ code_snippets = [] for code_snippet_url in code_snippet_urls: raw_user_content_url_path = GithubCodeSnippetProvider._construct_raw_user_content_url_path(code_snippet_url) request_url = RequestBuilder( GithubCodeSnippetProvider.RAW_GITHUB_USER_CONTENT_DOMAIN, path=raw_user_content_url_path, ).build() page = requests.get(request_url) code_snippets.append(page.content) return code_snippets def _get_code_snippet_urls(self): """ Returns the URLs of all code snippets related to the given task description and language. :return: A list of the URLs of code snippets related to the given task description and language. :rtype: list """ code_snippet_urls = [] for page_number in xrange(GithubCodeSnippetProvider.NUM_PAGES_TO_CHECK): request_url = RequestBuilder( GithubCodeSnippetProvider.GITHUB_DOMAIN, path=GithubCodeSnippetProvider.GITHUB_SEARCH_PATH, params={ GithubCodeSnippetProvider.GITHUB_LANGUAGE_KEY: self._language, GithubCodeSnippetProvider.GITHUB_PAGE_KEY: page_number + 1, GithubCodeSnippetProvider.GITHUB_QUERY_KEY: GithubCodeSnippetProvider.SPACE_DELIM.join( self._task_description.split() ), GithubCodeSnippetProvider.GITHUB_TYPE_KEY: GithubCodeSnippetProvider.GITHUB_TYPE_VALUE, }, ).build() page = requests.get(request_url) if page.status_code != httplib.OK: # This occurs if the page number exceeds the the number of pages for the available search results. break tree = html.fromstring(page.content) code_snippet_urls[0:0] = tree.xpath(GithubCodeSnippetProvider.XPATH_SNIPPET_URLS) return code_snippet_urls def get_code_snippets(self): """ Returns the code snippets related to the given task description and language. :return: A list of code snippets related to the given task description and language. :rtype: list """ code_snippet_urls = self._get_code_snippet_urls() self._code_snippets = GithubCodeSnippetProvider._get_code_snippets_from_snippet_urls(code_snippet_urls) return self._code_snippets
mit
2,548,377,350,738,495,000
39.875
120
0.648891
false
pkhorrami4/make_chen_dataset
code/detect_faces.py
1
6022
import argparse import os import shutil import sys from time import time import numpy import skimage.transform import dlib from ffvideo import VideoStream def detect_crop_all_faces(X): num_frames = X.shape[0] all_cropped_faces = numpy.zeros((num_frames, 3, 96, 96), dtype=numpy.uint8) all_landmarks = numpy.zeros((num_frames, 2*68), dtype=numpy.float32) fail_vec = numpy.zeros(num_frames, dtype=numpy.uint8) print all_cropped_faces.shape for i in range(num_frames): #for i in range(100): img = X[i, :, :, :] # Detect face / landmarks with dlib time_start = time() detect_flag, landmarks = detect_face_dlib(img) # If face detected: if detect_flag != 0: # Crop it (using landmarks) and convert to grayscale crop_frame, bb = crop_frame_using_landmarks(img, landmarks) crop_frame = skimage.transform.resize(crop_frame, (96, 96)) crop_frame = numpy.uint8(crop_frame*255.0) # skimage.io.imsave('./img_%.4d.jpg' % i, crop_frame) # Re-adjust the landmarks landmarks = normalize_landmarks(landmarks, bb, 96) # Save cropped image all_cropped_faces[i, :, :, :] = crop_frame.transpose(2, 0, 1) all_landmarks[i, :] = landmarks fail_vec[i] = 0 time_elapsed = time() - time_start print 'Processing frame (%d/%d) -- %.2f sec.' % (i, num_frames, time_elapsed) else: print 'Face missed in frame (%d/%d)' % (i, num_frames) fail_vec[i] = 1 return all_cropped_faces, all_landmarks, fail_vec def detect_face_dlib(frame): num_landmarks = 68 predictor_path = '/var/research/Code/dlib-18.17/python_examples/' \ 'shape_predictor/shape_predictor_68_face_landmarks.dat' detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(predictor_path) det = detector(frame, 1) if det: det = det[0] detect_flag = 1 landmarks = [] shape = predictor(frame, det) for i in range(num_landmarks): part = shape.part(i) landmarks.append(part.x) landmarks.append(part.y) landmarks = numpy.array(landmarks, dtype='float32') else: detect_flag = 0 landmarks = numpy.zeros((2*num_landmarks), dtype='float32') # print detect_flag, landmarks return detect_flag, landmarks def normalize_landmarks(landmarks, face_bb, new_img_size): """ Function to readjust the detected facial landmarks when the face is cropped out of the frame.""" # Subtract upper left corner of face bounding box rep_face_bb = numpy.tile(face_bb[0:2], len(landmarks)/2) landmarks -= rep_face_bb # Scale x,y coordinates from face_w, face_h to be in 96x96 image scale_vec = numpy.tile([new_img_size/face_bb[2], new_img_size/face_bb[3]], len(landmarks)/2) landmarks *= scale_vec return landmarks def crop_frame_using_landmarks(frame, landmarks): """ Function to crop the face using the detected facial landmarks (courtesy of dlib).""" landmarks = numpy.reshape(landmarks, (2, len(landmarks)/2), 'F') min_x = numpy.min(landmarks[0, :]) min_y = numpy.min(landmarks[1, :])-30 # include more of the brow max_x = numpy.max(landmarks[0, :]) max_y = numpy.max(landmarks[1, :]) # print min_x, max_x # print min_y, max_y crop_frame = frame[min_y:max_y, min_x:max_x, :] bb = (min_x, min_y, max_x-min_x, max_y-min_y) return crop_frame, bb def save_out_data(save_path, save_filename, data): """Save data as .npy file to location given by save_path.""" if not os.path.exists(save_path): os.makedirs(save_path) save_file_path = os.path.join(save_path, save_filename) numpy.save(save_file_path, data) def parse_args(): parser = argparse.ArgumentParser(description='Detect and extract faces in ' 'specified .npy and save it.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--npy_file_path', dest='npy_file_path', default='/data/Expr_Recog/Chen_Huang_avdata_python/npy_files_raw/', help='Path to .npy file containing un-cropped faces.') parser.add_argument('--save_path', dest='save_path', default='./npy_cropped_faces/', help='Folder to save output .npy files.') parser.add_argument('--subj_id', dest='subj_id', help='Subject to extract cropped faces.') if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() print 'Args: ', args time_start = time() npy_file_path = args.npy_file_path save_path = args.save_path subj_id = args.subj_id # Load data input_X_filename = 'X_'+subj_id+'.npy' X = numpy.load(os.path.join(npy_file_path, input_X_filename)) # Detect and crop faces all_cropped_faces, all_landmarks, fail_vec = detect_crop_all_faces(X) # Save data to .npy files output_X_filename = 'X_'+subj_id+'.npy' output_landmark_filename = 'landmarks_'+subj_id+'.npy' output_fail_vec_filename = 'fail_vec_'+subj_id+'.npy' save_out_data(save_path, output_X_filename, all_cropped_faces) save_out_data(save_path, output_landmark_filename, all_landmarks) save_out_data(save_path, output_fail_vec_filename, fail_vec) # Copy label file y_src_file = os.path.join(npy_file_path, 'y_'+subj_id+'.npy') y_dest_file = os.path.join(save_path, 'y_'+subj_id+'.npy') shutil.copyfile(y_src_file, y_dest_file) time_elapsed = time() - time_start print 'Total Execution Time: %.2f sec.' % time_elapsed
gpl-3.0
1,692,513,392,116,138,500
33.022599
92
0.599469
false
praekelt/ummeli
ummeli/base/migrations/0009_auto__add_skill__add_field_curriculumvitae_preferred_skill.py
1
12779
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Skill' db.create_table('base_skill', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('skill', self.gf('django.db.models.fields.CharField')(max_length=45)), )) db.send_create_signal('base', ['Skill']) # Adding field 'CurriculumVitae.preferred_skill' db.add_column('base_curriculumvitae', 'preferred_skill', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='profiles_preferred', null=True, to=orm['base.Skill']), keep_default=False) # Adding M2M table for field skills on 'CurriculumVitae' db.create_table('base_curriculumvitae_skills', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('curriculumvitae', models.ForeignKey(orm['base.curriculumvitae'], null=False)), ('skill', models.ForeignKey(orm['base.skill'], null=False)) )) db.create_unique('base_curriculumvitae_skills', ['curriculumvitae_id', 'skill_id']) def backwards(self, orm): # Deleting model 'Skill' db.delete_table('base_skill') # Deleting field 'CurriculumVitae.preferred_skill' db.delete_column('base_curriculumvitae', 'preferred_skill_id') # Removing M2M table for field skills on 'CurriculumVitae' db.delete_table('base_curriculumvitae_skills') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'base.article': { 'Meta': {'object_name': 'Article'}, 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 3, 29, 18, 14, 13, 829532)', 'blank': 'True'}), 'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'text': ('django.db.models.fields.TextField', [], {}) }, 'base.category': { 'Meta': {'object_name': 'Category'}, 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.Article']", 'null': 'True', 'blank': 'True'}), 'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'province': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['base.Province']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'user_submitted_job_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.UserSubmittedJobArticle']", 'null': 'True', 'blank': 'True'}) }, 'base.certificate': { 'Meta': {'object_name': 'Certificate'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}) }, 'base.curriculumvitae': { 'Meta': {'object_name': 'CurriculumVitae'}, 'certificates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Certificate']", 'symmetrical': 'False', 'blank': 'True'}), 'connection_requests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'connection_requests'", 'blank': 'True', 'to': "orm['auth.User']"}), 'date_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'highest_grade': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'highest_grade_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'house_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Language']", 'symmetrical': 'False', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'nr_of_faxes_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'preferred_skill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'profiles_preferred'", 'null': 'True', 'to': "orm['base.Skill']"}), 'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Reference']", 'symmetrical': 'False', 'blank': 'True'}), 'school': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'skills': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'profiles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['base.Skill']"}), 'street_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'surname': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}), 'work_experiences': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.WorkExperience']", 'symmetrical': 'False', 'blank': 'True'}) }, 'base.language': { 'Meta': {'object_name': 'Language'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'read_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'base.province': { 'Meta': {'object_name': 'Province'}, 'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'search_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}) }, 'base.reference': { 'Meta': {'object_name': 'Reference'}, 'contact_no': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'fullname': ('django.db.models.fields.CharField', [], {'max_length': '45'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'relationship': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}) }, 'base.skill': { 'Meta': {'object_name': 'Skill'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'skill': ('django.db.models.fields.CharField', [], {'max_length': '45'}) }, 'base.usersubmittedjobarticle': { 'Meta': {'object_name': 'UserSubmittedJobArticle'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'text': ('django.db.models.fields.TextField', [], {'default': "''"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_submitted_job_article_user'", 'to': "orm['auth.User']"}) }, 'base.workexperience': { 'Meta': {'object_name': 'WorkExperience'}, 'company': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}), 'end_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'start_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '45'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['base']
bsd-3-clause
3,870,404,524,602,337,300
73.296512
219
0.555677
false
gnarph/DIRT
utilities/path.py
1
1652
import os import shutil def iter_files_in(directory): """ Iterate over all filenames in a directory Does not descend into sub-directories :param directory: directory to look for :return: generator """ for item_name in os.listdir(directory): full_name = os.path.join(directory, item_name) print full_name, directory if should_use_file(full_name): yield full_name def should_use_file(name): """ Should DIRT use the file? :param name: name of file :return: boolean """ if is_hidden_file(name): return False return os.path.isfile(name) def is_hidden_file(full_name): """ Is a file hidden? :param full_name: name of file :return: True or False """ name = get_name(full_name) return name[0] == '.' def get_name(filename, extension=True): """ Get name of a file without the path portion :param filename: path of file :param extension: include the extension? :return: name of file without path """ fn = filename if extension else os.path.splitext(filename)[0] return os.path.split(fn)[1] def delete_folder(name): """ Deletes folder, if folder does not exist, fails silently :param name: name/path of folder to delete """ try: shutil.rmtree(name) except OSError: pass def create_folder(name): """ Creates a folder :param name: folder name/path """ os.makedirs(name) def reset_folder(name): """ Cleanout a folder :param name: folder to clean out :return: """ delete_folder(name) create_folder(name)
mit
8,095,127,568,173,375,000
20.179487
65
0.621671
false
valdergallo/mock_django_orm
app/models.py
1
1142
from django.db import models from app.managers import AppSecondManager class AppOne(models.Model): name = models.CharField(max_length=50) description = models.TextField() def get_full_description(self): return u'%s / %s' % (self.name, self.description) def __unicode__(self): return self.name class AppSecond(models.Model): name = models.CharField(max_length=50) description = models.TextField() objects = AppSecondManager() def __unicode__(self): return self.name class AppThird(models.Model): name = models.CharField(max_length=50) app_one = models.ForeignKey(AppOne) app_second = models.ForeignKey(AppSecond) def get_extra(self): if self.app_one.name == '1': return u'%s-%s' % (self.app_one.name, self.name) elif self.app_second.name == '1': return u'%s-%s' % (self.app_second.name, self.name) elif self.app_one.name == '2' and self.app_second.name == '2': return u'%s%s-%s' % (self.app_one.name, self.app_second.name, self.name)
mit
-6,667,769,444,941,581,000
28.282051
70
0.595447
false
amsqr/k-Met
kMet.py
1
5656
#!/usr/local/bin/python # -*- coding: utf-8 -*- import os,sys,glob import re from decimal import * import re, collections from phonetic_algorithms import PhoneticAlgorithms import difflib #from zlib import compress # kMet Phonetic Clustering Algorithm # Copyright (C) 2012 Alejandro Mosquera <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. print "kMet 0.1" print "Support: [email protected]\n\n" class kMet(object): """ kMet Phonetic Clustering Algorithm """ def __init__(self): self.groups={} self.phon_dict={} def save_phon_dict(self,words,dict_name): f = open(words,'w') for k in dict_name: f.write(str(k) + '|||' + str(dict_name[k]) + '\n') f.close() def text2phon(self,word,pword): phon = PhoneticAlgorithms().double_metaphone(pword) phon = str(phon).split(',') phon1= str(phon[0])[1:] phon2= str(phon[1])[1:-1] #print phon1,phon2 if phon1 in self.phon_dict: lista=self.phon_dict[phon1] lista=lista.split('###') found=0 if word not in lista: self.phon_dict[phon1]=self.phon_dict[phon1] + word +'###' else: self.phon_dict[phon1]=word+'###' if phon2 in self.phon_dict: lista=self.phon_dict[phon2] lista=lista.split('###') found=0 if word not in lista: self.phon_dict[phon2]=self.phon_dict[phon2] + word +'###' else: if phon2!='None': self.phon_dict[phon2]=word+'###' ##def zip_sim(stringA, stringB): ## a = len(compress(stringA)) ## b = len(compress(stringB)) ## c = len(compress(stringA + stringB)) ## return 1.0 -(0.0 +a +b -c )/max (a ,b ) def replace_numbers(self,foo): vocals=['a','e','i','o','u'] if (foo.isdigit()==False): foo2=foo foo=foo.replace('0','o') foo=foo.replace('3','e') foo=foo.replace('5','s') foo=foo.replace('6','g') foo=foo.replace('7','t') foo=foo.replace('9','g') foo=foo.replace('8','eight') foo=foo.replace('4','for') foo=foo.replace('2','to') foo=foo.replace('1','one') return foo def cluster(self,met, words): clustered = False for key in self.groups: # Check for similarity seq=difflib.SequenceMatcher(a=key,b=met) dis=seq.ratio() if dis>0.80: if words!='' and words not in self.groups[key]: self.groups[key].append(words) clustered = True break if not clustered: if not self.groups.has_key(met): self.groups[met] = [] self.groups[met].append(words) def process_text(self,text): punt_list=['.',',','!','?',';',':'] s=list(text) texto=''.join([ o for o in s if not o in punt_list ]).split() for word in texto: if word in punt_list or word.find('http:')>-1 or word.find('www.')>-1: pass else: pattern = re.compile('[\W_]+') word=pattern.sub('',word) if word!='': self.text2phon(word,self.replace_numbers(word)) for met in self.phon_dict: #print met lista = self.phon_dict[met].split('###') for l in lista: self.cluster(met,l) def main(): kcluster=kMet() kcluster.process_text('praises prices precious, process, presses, precise, purses, growing, grunge, grunge, carrying, crying, caring, carnage, crank, grinch, chronic, crank, to, the, do, they, day, needed, nudity, noted, knitted, knotted, that, thought, they, those, this, thought, without') #print kcluster.groups for g in kcluster.groups: print str(g).replace("'",'') + '###' + str(kcluster.groups[g]).replace("['",'').replace("']",'').replace("', '",'|||') if __name__ == '__main__': main()
gpl-3.0
3,341,968,527,875,160,600
34.797468
299
0.455622
false
BD2KGenomics/slugflow
src/toil/jobStores/utils.py
1
12719
import errno import logging import os from abc import ABC, abstractmethod from toil.lib.threading import ExceptionalThread log = logging.getLogger(__name__) class WritablePipe(ABC): """ An object-oriented wrapper for os.pipe. Clients should subclass it, implement :meth:`.readFrom` to consume the readable end of the pipe, then instantiate the class as a context manager to get the writable end. See the example below. >>> import sys, shutil >>> class MyPipe(WritablePipe): ... def readFrom(self, readable): ... shutil.copyfileobj(codecs.getreader('utf-8')(readable), sys.stdout) >>> with MyPipe() as writable: ... _ = writable.write('Hello, world!\\n'.encode('utf-8')) Hello, world! Each instance of this class creates a thread and invokes the readFrom method in that thread. The thread will be join()ed upon normal exit from the context manager, i.e. the body of the `with` statement. If an exception occurs, the thread will not be joined but a well-behaved :meth:`.readFrom` implementation will terminate shortly thereafter due to the pipe having been closed. Now, exceptions in the reader thread will be reraised in the main thread: >>> class MyPipe(WritablePipe): ... def readFrom(self, readable): ... raise RuntimeError('Hello, world!') >>> with MyPipe() as writable: ... pass Traceback (most recent call last): ... RuntimeError: Hello, world! More complicated, less illustrative tests: Same as above, but proving that handles are closed: >>> x = os.dup(0); os.close(x) >>> class MyPipe(WritablePipe): ... def readFrom(self, readable): ... raise RuntimeError('Hello, world!') >>> with MyPipe() as writable: ... pass Traceback (most recent call last): ... RuntimeError: Hello, world! >>> y = os.dup(0); os.close(y); x == y True Exceptions in the body of the with statement aren't masked, and handles are closed: >>> x = os.dup(0); os.close(x) >>> class MyPipe(WritablePipe): ... def readFrom(self, readable): ... pass >>> with MyPipe() as writable: ... raise RuntimeError('Hello, world!') Traceback (most recent call last): ... RuntimeError: Hello, world! >>> y = os.dup(0); os.close(y); x == y True """ @abstractmethod def readFrom(self, readable): """ Implement this method to read data from the pipe. This method should support both binary and text mode output. :param file readable: the file object representing the readable end of the pipe. Do not explicitly invoke the close() method of the object, that will be done automatically. """ raise NotImplementedError() def _reader(self): with os.fdopen(self.readable_fh, 'rb') as readable: # TODO: If the reader somehow crashes here, both threads might try # to close readable_fh. Fortunately we don't do anything that # should be able to fail here. self.readable_fh = None # signal to parent thread that we've taken over self.readFrom(readable) self.reader_done = True def __init__(self, encoding=None, errors=None): """ The specified encoding and errors apply to the writable end of the pipe. :param str encoding: the name of the encoding used to encode the file. Encodings are the same as for encode(). Defaults to None which represents binary mode. :param str errors: an optional string that specifies how encoding errors are to be handled. Errors are the same as for open(). Defaults to 'strict' when an encoding is specified. """ super(WritablePipe, self).__init__() self.encoding = encoding self.errors = errors self.readable_fh = None self.writable = None self.thread = None self.reader_done = False def __enter__(self): self.readable_fh, writable_fh = os.pipe() self.writable = os.fdopen(writable_fh, 'wb' if self.encoding == None else 'wt', encoding=self.encoding, errors=self.errors) self.thread = ExceptionalThread(target=self._reader) self.thread.start() return self.writable def __exit__(self, exc_type, exc_val, exc_tb): # Closeing the writable end will send EOF to the readable and cause the reader thread # to finish. # TODO: Can close() fail? If so, whould we try and clean up after the reader? self.writable.close() try: if self.thread is not None: # reraises any exception that was raised in the thread self.thread.join() except Exception as e: if exc_type is None: # Only raise the child exception if there wasn't # already an exception in the main thread raise else: log.error('Swallowing additional exception in reader thread: %s', str(e)) finally: # The responsibility for closing the readable end is generally that of the reader # thread. To cover the small window before the reader takes over we also close it here. readable_fh = self.readable_fh if readable_fh is not None: # Close the file handle. The reader thread must be dead now. os.close(readable_fh) class ReadablePipe(ABC): """ An object-oriented wrapper for os.pipe. Clients should subclass it, implement :meth:`.writeTo` to place data into the writable end of the pipe, then instantiate the class as a context manager to get the writable end. See the example below. >>> import sys, shutil >>> class MyPipe(ReadablePipe): ... def writeTo(self, writable): ... writable.write('Hello, world!\\n'.encode('utf-8')) >>> with MyPipe() as readable: ... shutil.copyfileobj(codecs.getreader('utf-8')(readable), sys.stdout) Hello, world! Each instance of this class creates a thread and invokes the :meth:`.writeTo` method in that thread. The thread will be join()ed upon normal exit from the context manager, i.e. the body of the `with` statement. If an exception occurs, the thread will not be joined but a well-behaved :meth:`.writeTo` implementation will terminate shortly thereafter due to the pipe having been closed. Now, exceptions in the reader thread will be reraised in the main thread: >>> class MyPipe(ReadablePipe): ... def writeTo(self, writable): ... raise RuntimeError('Hello, world!') >>> with MyPipe() as readable: ... pass Traceback (most recent call last): ... RuntimeError: Hello, world! More complicated, less illustrative tests: Same as above, but proving that handles are closed: >>> x = os.dup(0); os.close(x) >>> class MyPipe(ReadablePipe): ... def writeTo(self, writable): ... raise RuntimeError('Hello, world!') >>> with MyPipe() as readable: ... pass Traceback (most recent call last): ... RuntimeError: Hello, world! >>> y = os.dup(0); os.close(y); x == y True Exceptions in the body of the with statement aren't masked, and handles are closed: >>> x = os.dup(0); os.close(x) >>> class MyPipe(ReadablePipe): ... def writeTo(self, writable): ... pass >>> with MyPipe() as readable: ... raise RuntimeError('Hello, world!') Traceback (most recent call last): ... RuntimeError: Hello, world! >>> y = os.dup(0); os.close(y); x == y True """ @abstractmethod def writeTo(self, writable): """ Implement this method to write data from the pipe. This method should support both binary and text mode input. :param file writable: the file object representing the writable end of the pipe. Do not explicitly invoke the close() method of the object, that will be done automatically. """ raise NotImplementedError() def _writer(self): try: with os.fdopen(self.writable_fh, 'wb') as writable: self.writeTo(writable) except IOError as e: # The other side of the pipe may have been closed by the # reading thread, which is OK. if e.errno != errno.EPIPE: raise def __init__(self, encoding=None, errors=None): """ The specified encoding and errors apply to the readable end of the pipe. :param str encoding: the name of the encoding used to encode the file. Encodings are the same as for encode(). Defaults to None which represents binary mode. :param str errors: an optional string that specifies how encoding errors are to be handled. Errors are the same as for open(). Defaults to 'strict' when an encoding is specified. """ super(ReadablePipe, self).__init__() self.encoding = encoding self.errors = errors self.writable_fh = None self.readable = None self.thread = None def __enter__(self): readable_fh, self.writable_fh = os.pipe() self.readable = os.fdopen(readable_fh, 'rb' if self.encoding == None else 'rt', encoding=self.encoding, errors=self.errors) self.thread = ExceptionalThread(target=self._writer) self.thread.start() return self.readable def __exit__(self, exc_type, exc_val, exc_tb): # Close the read end of the pipe. The writing thread may # still be writing to the other end, but this will wake it up # if that's the case. self.readable.close() try: if self.thread is not None: # reraises any exception that was raised in the thread self.thread.join() except: if exc_type is None: # Only raise the child exception if there wasn't # already an exception in the main thread raise class ReadableTransformingPipe(ReadablePipe): """ A pipe which is constructed around a readable stream, and which provides a context manager that gives a readable stream. Useful as a base class for pipes which have to transform or otherwise visit bytes that flow through them, instead of just consuming or producing data. Clients should subclass it and implement :meth:`.transform`, like so: >>> import sys, shutil >>> class MyPipe(ReadableTransformingPipe): ... def transform(self, readable, writable): ... writable.write(readable.read().decode('utf-8').upper().encode('utf-8')) >>> class SourcePipe(ReadablePipe): ... def writeTo(self, writable): ... writable.write('Hello, world!\\n'.encode('utf-8')) >>> with SourcePipe() as source: ... with MyPipe(source) as transformed: ... shutil.copyfileobj(codecs.getreader('utf-8')(transformed), sys.stdout) HELLO, WORLD! The :meth:`.transform` method runs in its own thread, and should move data chunk by chunk instead of all at once. It should finish normally if it encounters either an EOF on the readable, or a :class:`BrokenPipeError` on the writable. This means tat it should make sure to actually catch a :class:`BrokenPipeError` when writing. See also: :class:`toil.lib.misc.WriteWatchingStream`. """ def __init__(self, source, encoding=None, errors=None): """ :param str encoding: the name of the encoding used to encode the file. Encodings are the same as for encode(). Defaults to None which represents binary mode. :param str errors: an optional string that specifies how encoding errors are to be handled. Errors are the same as for open(). Defaults to 'strict' when an encoding is specified. """ super(ReadableTransformingPipe, self).__init__(encoding=encoding, errors=errors) self.source = source @abstractmethod def transform(self, readable, writable): """ Implement this method to ship data through the pipe. :param file readable: the input stream file object to transform. :param file writable: the file object representing the writable end of the pipe. Do not explicitly invoke the close() method of the object, that will be done automatically. """ raise NotImplementedError() def writeTo(self, writable): self.transform(self.source, writable)
apache-2.0
4,686,543,333,265,231,000
38.623053
131
0.629766
false
gary-pickens/HouseMonitor
housemonitor/inputs/zigbeeinput/xbeeinputthread.py
1
1775
''' Created on Oct 10, 2012 @author: Gary ''' import threading import os from housemonitor.inputs.zigbeeinput.beagleboneblackxbeecommunications import BeagleboneBlackXbeeCommunications from windowsxbeecommunications import WindowsXbeeCommunications from housemonitor.inputs.dataenvelope import DataEnvelope from housemonitor.lib.constants import Constants from housemonitor.lib.base import Base class UnsupportedSystemError( Exception ): pass class XBeeInputThread( Base, threading.Thread ): ''' classdocs ''' input_queue = None zigbee = None exit_flag = False communication_module = {'posix': BeagleboneBlackXbeeCommunications, 'nt': WindowsXbeeCommunications} @property def logger_name( self ): return Constants.LogKeys.inputsZigBee def __init__( self, queue ): ''' Constructor args: queue is the InputQueue ''' super( XBeeInputThread, self ).__init__() threading.Thread.__init__( self ) self.input_queue = queue def startCorrectZigbee( self, os_name=os.name ): if ( os_name in self.communication_module ): self.logger.debug( 'connect to zigbee on {}'.format( os_name ) ) self.zigbee = self.communication_module[os_name]() else: raise UnsupportedSystemError( "System {} not supported".format( os_name ) ) def run( self ): self.startCorrectZigbee() self.zigbee.connect() while True: packet = self.zigbee.read() env = DataEnvelope( Constants.EnvelopeTypes.XBEE, **packet ) self.input_queue.transmit( env, self.input_queue.MID_PRIORITY ) if ( self.exit_flag ): break
mit
8,195,371,330,058,601,000
27.629032
111
0.642817
false
bobslee/orbeon-xml-api
orbeon_xml_api/tests/controls/test_currency.py
1
2103
# -*- coding: utf-8 -*- # Copyright 2017-2018 Bob Leers (http://www.novacode.nl) # See LICENSE file for full licensing details. from . import CommonTestCase from ..controls import DecimalControl class CurrencyTestCase(CommonTestCase): def setUp(self): super(CurrencyTestCase, self).setUp() self.control = self.builder.controls['currency'] def test_control(self): self.assertIsInstance(self.control, DecimalControl) def test_builder_bind(self): self.assertEqual(self.control._bind.id, 'currency-bind') self.assertEqual(self.control._bind.name, 'currency') def test_builder_parent(self): self.assertEqual(self.control._parent._bind.id, 'typed-controls-bind') self.assertEqual(self.control._parent._bind.name, 'typed-controls') self.assertEqual(self.control._parent._resource_element.label, 'Typed Controls') def test_builder_form(self): self.assertEqual(self.control.label, 'Currency') self.assertEqual(self.control.hint, 'Currency field') self.assertEqual(self.control.alert, None) self.assertEqual(self.control._resource_element.label, 'Currency') self.assertEqual(self.control._resource_element.hint, 'Currency field') # Doesn't exist, but shouldn't raise Exception self.assertEqual(self.control._resource_element.alert, None) def test_builder_form_default_value(self): self.assertEqual(self.control.default_raw_value, '10.99') self.assertEqual(self.control.default_value, 10.99) self.assertIsInstance(self.control.default_value, float) def test_runner_value(self): self.assertEqual(self.runner.get_value('currency'), 101.33) self.assertIsInstance(self.runner.get_value('currency'), float) def test_runner_form(self): self.assertEqual(self.runner.form.currency.label, 'Currency') self.assertEqual(self.runner.form.currency.value, 101.33) self.assertEqual(self.runner.form.currency.raw_value, '101.33') self.assertIsInstance(self.runner.form.currency.value, float)
mit
402,809,001,055,917,440
39.442308
88
0.700428
false
kaspermunch/CoalhmmPipeline
CoalhmmPipeline/MafJunkContentQualityFilter.py
1
1480
class MafJunkContentQualityFilter: def __init__(self, acceptableNpercentage, junkCharacters): self.acceptableNpercentage = acceptableNpercentage self.junkchars = junkCharacters def accept(self, maf): junkchars = self.junkchars for i in range(maf.count()): ns = 0 total = 0 for j in maf.data(i): if j in junkchars: ns += 1 total += 1 if ns > self.acceptableNpercentage*total: return False return True class MafIngroupJunkContentQualityFilter: def __init__(self, ingroup, acceptableNpercentage, junkCharacters): self.acceptableNpercentage = acceptableNpercentage self.junkchars = junkCharacters self.ingroup = ingroup def accept(self, maf): junkchars = self.junkchars for i in range(maf.count()): if maf.name(i) not in self.ingroup: continue ns = 0 total = 0 for j in maf.data(i): if j in junkchars: ns += 1 total += 1 if ns > self.acceptableNpercentage*total: return False return True
gpl-2.0
858,707,482,881,620,200
30.489362
71
0.462162
false
clld/lexibank
lexibank/scripts/util.py
1
5246
from __future__ import unicode_literals from itertools import groupby import transaction from six import text_type from clld.db.meta import DBSession from clld.db.models.common import ValueSet from clld.scripts.util import Data from clld.lib.bibtex import EntryType, FIELDS from clldutils.dsv import reader from pycldf.dataset import Dataset from pycldf.util import MD_SUFFIX from tqdm import tqdm from lexibank.models import ( LexibankLanguage, Concept, Counterpart, Provider, CounterpartReference, LexibankSource, Cognateset, CognatesetCounterpart, ) def unique_id(contrib, local_id): return '%s-%s' % (contrib.id, local_id) def cldf2clld(source, contrib, id_): name = source.id if source.get('author'): name = source['author'] if source.get('year'): name += ' %s' % source['year'] description = source.get('title') return LexibankSource( id=unique_id(contrib, id_), provider=contrib, bibtex_type=getattr(EntryType, source.genre, EntryType.misc), name=name, description=description, **{k: v for k, v in source.items() if k in FIELDS and k not in ['institution']}) def import_dataset(ds, contrib, languoids, conceptsets, sources, values): data = Data() concepts = {p.id: p for p in DBSession.query(Concept)} langs = {l.id: l for l in DBSession.query(LexibankLanguage)} for i, row in enumerate(ds.rows): if not row['Value'] or not row['Parameter_ID'] or not row['Language_ID']: continue lid = row['Language_ID'].lower() if lid == 'none': continue if not row['Parameter_ID'].strip(): continue language = langs.get(lid) if language is None: languoid = languoids.get(lid) if not languoid: continue langs[lid] = language = LexibankLanguage( id=lid, name=languoid.name, level=text_type(languoid.level.name), latitude=languoid.latitude if languoid.id != 'plau1238' else -10, longitude=languoid.longitude) concept = concepts.get(row['Parameter_ID']) if concept is None: cs = conceptsets[row['Parameter_ID']] concepts[row['Parameter_ID']] = concept = Concept( id=row['Parameter_ID'], name=cs.gloss, description=cs.definition, semanticfield=cs.semanticfield) vsid = unique_id(contrib, '%s-%s-%s' % (ds.name, language.id, concept.id)) vid = unique_id(contrib, row['ID']) vs = data['ValueSet'].get(vsid) if vs is None: vs = data.add( ValueSet, vsid, id=vsid, parameter=concept, language=language, contribution=contrib, source=None) # FIXME: add sources! counterpart = values.add( Counterpart, row['ID'], id=vid, valueset=vs, name=row['Form'], description=row.get('Comment'), context=row['Value'], variety_name=row.get('Language_name'), loan=row.get('Loan', False), ) for ref in row.refs: CounterpartReference( counterpart=counterpart, source=sources[ref.source.id], description=ref.description) def import_cldf(srcdir, md, languoids, conceptsets): with transaction.manager: contrib = Provider( id=srcdir.name, name=md['dc:title'], description=md.get('dc:bibliographicCitation'), url=md.get('dc:identifier'), license=md.get('dc:license'), aboutUrl=md.get('aboutUrl'), ) DBSession.add(contrib) sources = {} cldfdir = srcdir.joinpath('cldf') values = Data() for fname in tqdm(list(cldfdir.glob('*' + MD_SUFFIX)), leave=False): ds = Dataset.from_metadata(fname) for src in ds.sources.items(): if src.id not in sources: sources[src.id] = cldf2clld(src, contrib, len(sources) + 1) import_dataset(ds, contrib, languoids, conceptsets, sources, values) DBSession.flush() # import cognates: if cldfdir.joinpath('cognates.csv').exists(): for csid, cognates in groupby( reader(cldfdir.joinpath('cognates.csv'), dicts=True), lambda i: i['Cognate_set_ID']): cs = Cognateset(id=unique_id(contrib, csid), contribution=contrib) for cognate in cognates: cp = values['Counterpart'].get(cognate['Word_ID']) if cp: DBSession.add(CognatesetCounterpart( cognateset=cs, counterpart=cp, cognate_detection_method=cognate['Cognate_detection_method'], alignment=cognate['Alignment'], alignment_method=cognate['Alignment_method'], doubt=cognate['Doubt'] == 'True'))
apache-2.0
3,760,867,208,592,166,000
35.17931
89
0.559665
false
Syncano/syncano-cli
syncano_cli/parse_to_syncano/processors/klass.py
1
7049
# -*- coding: utf-8 -*- import json import requests import six from syncano_cli.base.formatters import Formatter from syncano_cli.parse_to_syncano.migrations.aggregation import ClassAggregate from syncano_cli.parse_to_syncano.parse.constants import ParseFieldTypeE class SyncanoSchema(object): def __init__(self, class_name, schema, relations): self.class_name = class_name self.schema = schema self.relations = relations def process_relations(self): pass @property def has_relations(self): return bool(self.relations) class ClassProcessor(object): map = { 'Number': 'integer', 'Date': 'datetime', 'Boolean': 'boolean', 'String': 'string', 'Array': 'array', 'Object': 'object', 'Pointer': 'reference', 'File': 'file', 'GeoPoint': 'geopoint', 'Relation': 'relation', } original_datetime_label = 'original_{}' @classmethod def handle_value(cls, value): return value @classmethod def handle_json_value(cls, value): return json.dumps(value) @classmethod def get_fields(cls, parse_fields): fields_to_skip = ['ACL', 'self'] # TODO: handle ACL later on fields = [] for field in parse_fields: if field in fields_to_skip: continue fields.append(field.lower()) return fields @classmethod def process_object(cls, parse_object, reference_map): syncano_fields = ClassProcessor.get_fields(parse_object.keys()) processed_object = {} files = {} for key, value in six.iteritems(parse_object): if isinstance(value, dict): if '__type' in value: if value['__type'] == ParseFieldTypeE.RELATION: continue # will be handled in RelationProcessor cls._process_field_with_type(key, value, processed_object, files, reference_map) else: # and 'Object' case processed_object[key.lower()] = json.dumps(value) elif isinstance(value, list): cls._process_array_field(key, value, processed_object) else: cls._process_other_fields(key, value, processed_object, syncano_fields) return processed_object, files @classmethod def _process_field_with_type(cls, key, value, processed_object, files, reference_map): if value['__type'] == ParseFieldTypeE.DATE: processed_object[key.lower()] = value['iso'] elif value['__type'] == ParseFieldTypeE.POINTER: processed_object[key.lower()] = reference_map.get(value['objectId']) elif value['__type'] == ParseFieldTypeE.FILE: file_data = requests.get(value['url']) file_path = '/tmp/{}'.format(value['name']) with open(file_path, 'wb+') as file_d: file_d.write(file_data.content) file_descriptor = open(file_path, 'rb') files[key] = file_descriptor elif value['__type'] == ParseFieldTypeE.GEO_POINT: processed_object[key.lower()] = {'longitude': value['longitude'], 'latitude': value['latitude']} @classmethod def _process_array_field(cls, key, value, processed_object): for i, item in enumerate(value): if isinstance(item, dict): if item.get('__type') == ParseFieldTypeE.POINTER: Formatter().write('Array of pointers not supported, writing: {}'.format(item.get('objectId'))) value[i] = item['objectId'] values_list = json.dumps(value) processed_object[key.lower()] = values_list @classmethod def _process_other_fields(cls, key, value, processed_object, syncano_fields): if key.lower() in syncano_fields: if key in ['createdAt', 'updatedAt']: processed_object[cls.original_datetime_label.format(key.lower())] = value else: processed_object[key.lower()] = value @classmethod def create_schema(cls, parse_schema): """ Return syncano schema for a Class; :param parse_schema: the schema from parse; :return: the Class name and the schema used in Syncano; """ fields_to_skip = ['ACL'] # TODO: handle ACL later on class_name = cls.normalize_class_name(parse_schema['className']) schema = [] relations = [] for field, field_meta in six.iteritems(parse_schema['fields']): if field not in fields_to_skip: type = field_meta['type'] new_type = ClassProcessor.map[type] if type == 'Relation': if class_name == cls.normalize_class_name(field_meta['targetClass']): target = 'self' else: target = cls.normalize_class_name(field_meta['targetClass']) schema.append({ 'name': field.lower(), 'type': new_type, 'target': target }) relations.append({field: field_meta}) continue if field == 'objectId': schema.append({ 'name': field.lower(), 'type': new_type, 'filter_index': True }) continue if field in ['updatedAt', 'createdAt']: schema.append({ 'name': cls.original_datetime_label.format(field.lower()), 'type': new_type, 'filter_index': True, 'order_index': True, }) continue if new_type == 'reference': schema.append({ 'name': field.lower(), 'type': new_type, 'target': cls.normalize_class_name(field_meta['targetClass'])} ) continue schema.append({'name': field.lower(), 'type': new_type}) return SyncanoSchema(class_name=class_name, schema=schema, relations=relations) @classmethod def normalize_class_name(cls, class_name): name = class_name if name.startswith('_'): name = 'internal_' + name[1:].lower() return name @classmethod def show_class_name(cls, klass): """ Displays Class name in click progress bar. :param klass: the Class name; :return: Formatted Class name; """ if klass is not None: if isinstance(klass, ClassAggregate): return u"Class: {}".format(klass.syncano_name) elif isinstance(klass, tuple): return u"Class: {}".format(klass[0]) return u'Done.'
mit
-1,686,309,550,870,439,400
34.964286
114
0.533976
false
softak/webfaction_demo
apps/stores/models.py
1
5850
import math import decimal from django.contrib.gis.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User from django.db.models import Q from django.db.models.query import QuerySet from utils import QuerySetManager from stores.fields import ColorField class Category(models.Model): name = models.CharField(_('name'), max_length=50) icon = models.ImageField(_('icon'), upload_to='category-icons', null=True) marker = models.ImageField(_('marker'), upload_to='category-markers', null=True) class Meta: verbose_name_plural = "categories" def __unicode__(self): return self.name class Store(models.Model): user = models.OneToOneField(User, related_name='store') name = models.CharField(_('business name'), max_length=100) category = models.ForeignKey(Category, related_name='stores') location = models.PointField(_('location'), srid=4326) address = models.CharField(_('address'), max_length=1000) is_active = models.BooleanField(_('active'), default=False) window_image = models.ImageField(_('window image'), upload_to='store-images', null=True, blank=True) phone = models.CharField(_('phone'), max_length=50) paypal_email = models.EmailField(_('PayPal e-mail'), null=True, blank=True, max_length=100) objects = models.GeoManager() def __unicode__(self): return self.name @models.permalink def get_absolute_url(self): return ('stores.view_store', [str(self.id)]) def get_buyer_ids(self): from cart.models import SocialTag, PersonalTag r1 = SocialTag.objects.filter(buy__store=self) \ .values_list('user__id', flat=True).distinct() r2 = PersonalTag.objects.filter(item__store=self) \ .values_list('user__id', flat=True).distinct() return list(r1) + list(r2) def get_buyers(self): return User.objects.filter(id__in=self.get_buyer_ids()) class StoreDesign(models.Model): store = models.OneToOneField(Store, related_name='design') background_image = models.ImageField(_('Image'), upload_to='store_desings', null=True, blank=True) is_repeated = models.BooleanField(_('Repeat'), default=False) background_color = ColorField(_('Color'), default='#ffffff') class ItemQuerySet(QuerySet): def in_stock(self): return self.exclude(is_out_of_stock=True) \ .filter(Q(quantity__gt=0) | Q(quantity__isnull=True)) class Item(models.Model): store = models.ForeignKey(Store, related_name='items') name = models.CharField(_('name'), max_length=100) description = models.CharField(_('description'), max_length=1000) price = models.DecimalField(_('price'), max_digits=10, decimal_places=2) discount = models.PositiveSmallIntegerField(_('advertised discount'), default=0) # TODO make NOT NULL quantity = models.PositiveIntegerField(_('quantity'), null=True, blank=True) is_out_of_stock = models.BooleanField(_('is out of stock'), default=False) discount_group = models.ForeignKey('DiscountGroup', null=True, blank=True, on_delete=models.SET_NULL, related_name='items') objects = QuerySetManager(qs_class=ItemQuerySet) def save(self, *args, **kwargs): if not self.discount_group is None: if self.store != self.discount_group.discount.store: raise ValidationError('Item can\'t belong to specified discount group!') return super(Item, self).save(*args, **kwargs) def get_default_image(self): default_images = self.images.filter(is_default=True) if default_images.exists(): return default_images[0].image elif self.images.exists(): return self.images.all()[0].image else: return None @models.permalink def get_absolute_url(self): return ('stores.item', [str(self.id)]) def __unicode__(self): return self.name class DiscountGroup(models.Model): # TODO add store field? name = models.CharField(_('discount name'), max_length=100) discount = models.ForeignKey('Discount', related_name='discount_groups') class Discount(models.Model): store = models.ForeignKey(Store, related_name='discount_models') name = models.CharField(_('discount name'), max_length=100) for_additional_item = models.DecimalField(_('discount for each additional item'), max_digits=4, decimal_places=2) for_additional_buyer = models.DecimalField(_('discount for each additional buyer'), max_digits=4, decimal_places=2) lower_bound = models.DecimalField(_('lower bound'), max_digits=4, decimal_places=2) def __unicode__(self): return 'Discount: %s' % self.name def apply(self, items_number=None, buyers_number=None): discount = math.pow((100 - self.for_additional_buyer) / decimal.Decimal(100), buyers_number) * \ math.pow((100 - self.for_additional_item) / decimal.Decimal(100), items_number) return max(decimal.Decimal(discount), (100 - self.lower_bound) / decimal.Decimal(100)) class ItemImage(models.Model): item = models.ForeignKey(Item, related_name='images') image = models.ImageField(_('image'), upload_to='item-images') is_default = models.BooleanField(_('is default'), default=False) def __unicode__(self): return 'Image of %s' % self.item.name
bsd-3-clause
-5,412,624,703,133,597,000
31.142857
104
0.621368
false
danzek/nlhbi-malware-extractor
getNLindicators.py
1
3176
#!/usr/bin/env python """ Dan O'Day, Purdue University CNIT581 Cyber Forensics of Malware Extract natural language host-based indicators from malware sample objects """ __author__ = "Dan O'Day" __credits__ = ["Dan O'Day", "Sam Liles"] __license__ = "GNU General Public License" __version__ = "0.1" __maintainer__ = "Dan O'Day" __email__ = "[email protected]" __status__ = "Development" import os import sys from malware import MalwareSample from nltk.tokenize import WordPunctTokenizer from nltk.corpus import wordnet def enumerate_files(folders): """ Iterates through supplied folder(s) for PE32 files (.dll or .exe), creates malware sample objects :param folders: folder(s) containing PE32 files (.dll or .exe) :return: list of (parsed) malware sample objects """ msl = [] # list of malware sample objects for folder in folders: for root, dirs, files in os.walk(folder): for fn in files: if fn.lower().endswith(".exe") or fn.lower().endswith(".dll"): print "---------------------------------------------------------------------------" print "analyzing", os.path.join(root, fn), "..." ms = MalwareSample(os.path.join(root, fn)) msl.append(ms) # add processed malware sample to list (msl) return msl def extract_nl_text(ms): """ Extracts and tokenizes text from malware sample object :param ms: MalwareSample object :return: list of tokenized strings found in malware sample object's internal strings list """ wpt = WordPunctTokenizer() all_tokenized_strings_in_ms = [] inside_xml_privileges = False for s in ms.strings: if 'requestedPrivileges' in s or 'This program cannot be run in DOS mode' in s: continue elif inside_xml_privileges: continue elif '<assembly xmlns' in s: inside_xml_privileges = True continue elif '</assembly>' in s: inside_xml_privileges = False continue tokenized_string = [] tokens = wpt.tokenize(s) if tokens: for t in tokens: if wordnet.synsets(t) and len(t) > 3: # had to use length to eliminate false positives tokenized_string.extend(tokens) break if tokenized_string: all_tokenized_strings_in_ms.append(tokenized_string) return all_tokenized_strings_in_ms def process_malware_sample(ms): """ Central function for calling other functions used in processing malware sample objects :param ms: MalwareSample object """ tokenized_strings_in_ms = extract_nl_text(ms) print 'Strings from', str(ms), tokenized_strings_in_ms def main(): args = sys.argv[1:] if not args: print 'No arguments specified.\nusage: ./malwareNLIndicators.py {folder(s) containing malware sample(s)}\n' sys.exit(1) malware_samples = enumerate_files(args) # returns list of malware sample objects for ms in malware_samples: process_malware_sample(ms) if __name__ == '__main__': main()
gpl-2.0
-2,411,974,937,378,561,500
30.147059
115
0.609572
false
tensorflow/agents
tf_agents/train/interval_trigger.py
1
2315
# coding=utf-8 # Copyright 2020 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Utility that Triggers every n calls.""" from typing import Callable from absl import logging class IntervalTrigger(object): """Triggers on every fixed interval. Note that as long as the >= `interval` number of steps have passed since the last trigger, the event gets triggered. The current value is not necessarily `interval` steps away from the last triggered value. """ def __init__(self, interval: int, fn: Callable[[], None], start: int = 0): """Constructs the IntervalTrigger. Args: interval: The triggering interval. fn: callable with no arguments that gets triggered. start: An initial value for the trigger. """ self._interval = interval self._original_start_value = start self._last_trigger_value = start self._fn = fn if self._interval <= 0: logging.info( 'IntervalTrigger will not be triggered because interval is set to %d', self._interval) def __call__(self, value: int, force_trigger: bool = False) -> None: """Maybe trigger the event based on the interval. Args: value: the value for triggering. force_trigger: If True, the trigger will be forced triggered unless the last trigger value is equal to `value`. """ if self._interval <= 0: return if (force_trigger and value != self._last_trigger_value) or ( value >= self._last_trigger_value + self._interval): self._last_trigger_value = value self._fn() def reset(self) -> None: """Resets the trigger interval.""" self._last_trigger_value = self._original_start_value def set_start(self, start: int) -> None: self._last_trigger_value = start
apache-2.0
1,041,133,332,050,366,700
31.605634
80
0.684233
false
kiip/statsite
tests/helpers.py
1
1340
""" Contains helper classes and methods for tests. """ from statsite.aggregator import Aggregator from statsite.collector import Collector from statsite.metrics_store import MetricsStore class DumbAggregator(Aggregator): def __init__(self, *args, **kwargs): super(DumbAggregator, self).__init__(*args, **kwargs) self.flushed = False self.metrics = [] def add_metrics(self, metrics): self.metrics.extend(metrics) def flush(self): self.flushed = True class DumbCollector(Collector): # Note that the host/port arguments are to avoid exceptions when # setting the settings in the "servers" funcarg def __init__(self, host=None, port=None, aggregator=None): super(DumbCollector, self).__init__(aggregator) pass class DumbMetricsStore(MetricsStore): # Note that the host/port arguments are to avoid exceptions when # setting the settings in the "servers" funcarg def __init__(self, host=None, port=None, prefix=None): self.data = [] def flush(self, data): self.data.extend(data) def statsite_settings(settings): """ Decorator to set the settings for Statsite for the "servers" funcarg. """ def decorator(func): func.func_dict["statsite_settings"] = settings return func return decorator
bsd-3-clause
3,805,028,084,313,234,400
26.916667
68
0.673134
false
hainm/ambertools-binary-test
devtools/ci/download_circleci_AmberTools.py
1
4060
#!/usr/bin/env python # Note: This program is for internal use (developers) and # should be never included in release tar file. # (It has our circleci private token) # require: requests (python), wget # How? # - get info without downloading # python download_circleci_AmberTools.py --info # - download # python download_circleci_AmberTools.py # What does this script do? Will download latest succesful AmberTools build on circleci # https://circleci.com/gh/Amber-MD/ambertools-ci/481 # (Note: If you are in Amber-MD organization on github, you can change settings) # Why does this matter? Collaborators (e.g: phenix) can download, untar and just use the binary distribution (dev) import os import argparse import requests import json import subprocess def get_circle_info(url): # info: List[Dict] print('url', url) x = requests.get(url) json.loads(x.content.decode()) info = json.loads(x.content.decode()) return info def download_non_conda_install_tarfiles(url_artifact_info, dry_run=False, exclude_conda=False): # require: wget info = get_circle_info(url_artifact_info) for path_dict in info: url = path_dict['url'] if exclude_conda: if 'non-conda-install' in path_dict['pretty_path']: print('Downloading ', url) if not dry_run: subprocess.check_output(['wget', url]) else: print('Downloading ', url) if not dry_run: subprocess.check_output(['wget', url]) def get_latest_build_info(url_info): info_collection = get_circle_info(url_info) info = {} for info in info_collection: if info['status'] == 'success': break keys = [ 'username', 'branch', 'author_name', 'committer_date', 'has_artifacts', 'build_url', 'vcs_url', 'status', 'build_num', 'all_commit_details', ] for k in keys: if k == 'all_commit_details': details = info.get(k)[0] print(details['body']) else: print(k, info.get(k)) print("") def main(): parser = argparse.ArgumentParser( description='Download/get-info binary builds from circleci', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-r', '--repo', default='ambertools-ci', help="(default: %(default)s)") parser.add_argument( '-b', '--branch', default='nightly', help="(default: %(default)s)") parser.add_argument( '--build-num', default='latest', help="(default: %(default)s)") parser.add_argument( '--info', action='store_true', help='Only show information without downloading') parser.add_argument( '--exclude-conda', action='store_true', help='Exclude conda build') parser.add_argument('-d', '--dry-run', action='store_true', help='dry run') args = parser.parse_args() repo = args.repo branch = args.branch build_num = args.build_num url_info = "https://circleci.com/api/v1.1/project/github/Amber-MD/{repo}/tree/{branch}".format( repo=repo, branch=branch) # token was generated by visiting: https://circleci.com/account/api # (private env) token = os.getenv('AMBERTOOLS_TOKEN') base_dir = 'https://circleci.com/api/v1.1/project/github/Amber-MD/' my_branch = '{repo}/{build_num}/artifacts?circle-token={token}&branch={branch}&filter=successful'.format( token=token, repo=repo, branch=branch, build_num=build_num) url_artifact_info = base_dir + my_branch get_latest_build_info(url_info) if args.dry_run: print('Dry run') if not args.info: download_non_conda_install_tarfiles(url_artifact_info, dry_run=args.dry_run, exclude_conda=args.exclude_conda) else: print('skip downloading since --info is given') if __name__ == '__main__': main()
mit
-244,840,541,640,465,920
29.757576
114
0.608374
false
Ecpy/ecpy_hqc_legacy
exopy_hqc_legacy/tasks/tasks/instr/apply_mag_field_task.py
1
3238
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details. # # Distributed under the terms of the BSD license. # # The full license is in the file LICENCE, distributed with this software. # ----------------------------------------------------------------------------- """Task to apply a magnetic field. """ from time import sleep import numbers from atom.api import (Unicode, Float, Bool, set_default) from exopy.tasks.api import InstrumentTask, validators class ApplyMagFieldTask(InstrumentTask): """Use a supraconducting magnet to apply a magnetic field. Parallel task. """ # Target magnetic field (dynamically evaluated) field = Unicode().tag(pref=True, feval=validators.SkipLoop(types=numbers.Real)) # Rate at which to sweep the field. rate = Float(0.01).tag(pref=True) # Whether to stop the switch heater after setting the field. auto_stop_heater = Bool(True).tag(pref=True) # Time to wait before bringing the field to zero after closing the switch # heater. post_switch_wait = Float(30.0).tag(pref=True) parallel = set_default({'activated': True, 'pool': 'instr'}) database_entries = set_default({'field': 0.01}) def check_for_interruption(self): """Check if the user required an interruption. """ return self.root.should_stop.is_set() def perform(self, target_value=None): """Apply the specified magnetic field. """ # make ready if (self.driver.owner != self.name or not self.driver.check_connection()): self.driver.owner = self.name if target_value is None: target_value = self.format_and_eval_string(self.field) driver = self.driver normal_end = True if (abs(driver.read_persistent_field() - target_value) > driver.output_fluctuations): job = driver.sweep_to_persistent_field() if job.wait_for_completion(self.check_for_interruption, timeout=60, refresh_time=1): driver.heater_state = 'On' else: return False # set the magnetic field job = driver.sweep_to_field(target_value, self.rate) normal_end = job.wait_for_completion(self.check_for_interruption, timeout=60, refresh_time=10) # Always close the switch heater when the ramp was interrupted. if not normal_end: job.cancel() driver.heater_state = 'Off' self.write_in_database('field', driver.read_persistent_field()) return False # turn off heater if self.auto_stop_heater: driver.heater_state = 'Off' sleep(self.post_switch_wait) job = driver.sweep_to_field(0) job.wait_for_completion(self.check_for_interruption, timeout=60, refresh_time=1) self.write_in_database('field', target_value)
bsd-3-clause
-6,838,290,255,176,113,000
34.582418
79
0.564237
false
subuk/xtnews
xtnews/views.py
1
2322
import transaction from pyramid.view import view_config from pyramid.exceptions import NotFound from pyramid.httpexceptions import HTTPNoContent, HTTPCreated from forms import NewsForm from xtnews import models class BaseView(object): def __init__(self, request): self.request = request self.request.response.content_type = 'application/xml' self.db = models.DBSession() class Collection(BaseView): def get_object_list(self): return self.db.query(models.News).all() @view_config(route_name='list', renderer='object_list.xml.jinja2', request_method='GET') def get(self): return { 'object_list': (x.as_dict() for x in self.get_object_list()) } @view_config(route_name='list', renderer='object_form_errors.xml.jinja2', request_method='POST') def post(self): form = NewsForm(**self.request.POST) if not form.validate(): self.request.response.status = 400 return {'form': form} obj = models.News() form.populate_obj(obj) self.db.add(obj) transaction.commit() return HTTPCreated() class Item(BaseView): def get_object(self, object_id): obj = self.db.query(models.News).get(object_id) if not obj: raise NotFound("Object with id %s not found" % object_id) return obj @view_config(route_name='item', renderer='object.xml.jinja2', request_method='GET') def get(self): obj = self.get_object(self.request.matchdict['id']) return { 'object': obj.as_dict(), } @view_config(route_name='item', renderer='object.xml.jinja2', request_method='DELETE') def delete(self): obj = self.get_object(self.request.matchdict['id']) self.db.delete(obj) transaction.commit() return HTTPNoContent() @view_config(route_name='item', renderer='object_form_errors.xml.jinja2', request_method='PUT') def put(self): form = NewsForm(**self.request.POST) if not form.validate(): self.request.response.status = 400 return {'form': form} obj = self.get_object(self.request.matchdict['id']) form.populate_obj(obj) self.db.add(obj) transaction.commit() return HTTPNoContent()
mit
-6,245,548,743,591,391,000
30.808219
100
0.624462
false
akshayparopkari/phylotoast
bin/filter_ambiguity.py
2
3638
''' Created on Dec 6, 2012 Author: Shareef M. Dabdoub ''' import sys try: from Bio import SeqIO except ImportError as ie: sys.exit('Import Error. Please install missing module: {}'.format(ie)) import argparse from Bio import SeqIO from Bio.Seq import Seq def filter_ambiguity(records, percent=0.5): # , repeats=6) """ Filters out sequences with too much ambiguity as defined by the method parameters. :type records: list :param records: A list of sequences :type repeats: int :param repeats: Defines the number of repeated N that trigger truncating a sequence. :type percent: float :param percent: Defines the overall percentage of N in a sequence that will cause the sequence to be filtered out. """ seqs = [] # Ns = ''.join(['N' for _ in range(repeats)]) count = 0 for record in records: if record.seq.count('N')/float(len(record)) < percent: # pos = record.seq.find(Ns) # if pos >= 0: # record.seq = Seq(str(record.seq)[:pos]) seqs.append(record) count += 1 return seqs, count def handle_program_options(): """ Uses the built-in argparse module to handle command-line options for the program. :return: The gathered command-line options specified by the user :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser(description="Filter an input \ FASTA-formatted file to \ remove or truncate sequences\ based on ambiguous base (N) \ content.") parser.add_argument('--version', action='version', version='Sequence Ambiguity Filter v0.1') parser.add_argument('fasta_input', help="QIIME-formatted mapping file \ linking Sample IDs with barcodes \ and primers.") parser.add_argument('-o', '--output', default='output.fna', help='The name of the file to output the set of \ filtered sequences. Default: \'output.fna\'.') # parser.add_argument('-r', '--repeats', type=int, default=6, # help='Truncates a sequence when a string of ambiguous \ # bases (N) of REPEATS or longer is found. \ # Default: REPEATS=6.') parser.add_argument('-p', '--percent', type=int, default=5, help='Removes any sequence containing the specified \ percentage (or greater) of ambiguous bases (N).\ Default: PERCENT=5') parser.add_argument('-v', '--verbose', action='store_true') return parser.parse_args() def main(): args = handle_program_options() try: with open(args.fasta_input): pass except IOError as ioe: sys.exit('\nError with QIIME formatted mapping file:{}\n'.format(ioe)) with open(args.fasta_input, 'rU') as inF: in_records = SeqIO.parse(inF, 'fasta') records, count = filter_ambiguity(in_records, args.percent/100.0) # , args.repeats) SeqIO.write(records, args.output, "fasta") if args.verbose: print '%i sequences found.' % count print '%i sequences kept.' % len(records) print print 'Output written to: %s' % args.output if __name__ == '__main__': main()
mit
-8,652,287,533,092,176,000
35.38
92
0.551127
false
8devices/IoTPy
IoTPy/transport.py
1
1376
import socket from IoTPy.detect_sfp_serial import detect_sfp_serial from IoTPy.errors import IoTPy_IOError class SocketTransport(object): def __init__(self, host='127.0.0.1', port=7777): self.host = host self.port = port self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print(self.host, self.port) self.socket.connect((self.host, self.port)) def read(self): return self.socket.recv(1024) def write(self, data): self.socket.send(data) def close(self): self.socket.shutdown(socket.SHUT_RDWR) self.socket.close() class SerialTransport(object): def __init__(self, serial_port=None, uid=None): self.serial_port = serial_port if not self.serial_port: self.serial_port = detect_sfp_serial(uid) def read(self): try: data = self.serial_port.read(1) # read one, blocking n = self.serial_port.inWaiting() # look if there is more if n: data = data + self.serial_port.read(n) # and get as much as possible return data except IOError: pass except TypeError: pass return None def write(self, data): self.serial_port.write(data) def close(self): self.serial_port.close()
mit
7,456,966,032,519,541,000
27.081633
86
0.585029
false
faddai/newfies-dialer
newfies/survey/urls.py
1
1222
# # Newfies-Dialer License # http://www.newfies-dialer.org # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (C) 2011-2012 Star2Billing S.L. # # The Initial Developer of the Original Code is # Arezqui Belaid <[email protected]> # from django.conf.urls.defaults import * from django.conf import settings from survey.views import * urlpatterns = patterns('', # Survey urls (r'^survey/$', 'survey.views.survey_list'), (r'^survey_grid/$', 'survey.views.survey_grid'), (r'^survey/add/$', 'survey.views.survey_add'), (r'^survey/del/(.+)/$', 'survey.views.survey_del'), (r'^survey/(.+)/$', 'survey.views.survey_change'), (r'^survey_finestatemachine/$', 'survey.views.survey_finestatemachine'), (r'^survey_report/$', 'survey.views.survey_report'), # Audio urls (r'^audio/$', 'survey.views.audio_list'), (r'^audio_grid/$', 'survey.views.audio_grid'), (r'^audio/add/$', 'survey.views.audio_add'), (r'^audio/del/(.+)/$', 'survey.views.audio_del'), (r'^audio/(.+)/$', 'survey.views.audio_change'), )
mpl-2.0
3,954,223,356,471,408,000
30.333333
76
0.6473
false
analphagamma/SorosTracker9000
SCrawler.py
1
4913
#!/usr/bin/env python import re import json import requests from datetime import date from bs4 import BeautifulSoup class SorosCrawler(object): '''This object is a crawler that -opens the website -gets all links from it -picks out the links that have "soros" in them To instantiate the object provide a name (i.e. the website name) and an URL Example: obj = SorosCrawler('Best News Site', 'http://www.bestnewssite.com')''' def __init__(self, source_name, url): self.source_name = source_name self.url = url def get_links(self): '''opens the website and gets all links from the html source Returns a set of the links to prevent duplication''' html = requests.get(self.url).content soup = BeautifulSoup(html, 'html.parser') all_links = [] for link in soup.find_all('a'): all_links.append(link.get('href')) return set(all_links) #all the hyperlinks from the website + duplicates removed def keyword_filter(self, keyword, href_link): '''uses regex to find the keyword in the link then transform the link to a valid hyperlink returns an url in string format''' if re.search(keyword, href_link.lower()) and not re.search('cth', href_link.lower()): if href_link[0] == '/': #no double slashes href_link = href_link.strip('/') if href_link[:3] == 'www': href_link = 'http://' + href_link if not re.search(self.url, href_link): #sometimes the href doesn't have the full, absolute path href_link = self.url + href_link return href_link else: return None def parse_links(self): '''Picks all links that have 'soros' in them If the href is incomplete it adds 'http:' to the front''' links = [] for link in self.get_links(): try: re.search('soros', link) except TypeError: pass else: #search for the word soros in link link = self.keyword_filter('soros', link) if link != None: print(link) try: html = requests.get(link).content except: print('Requests encountered an error with the link:\n', link) else: soup = BeautifulSoup(html, 'html.parser') links.append((soup.title.string.strip('\n').strip(), link)) return links #list of tuples (article title, article link) def simple_log(source_website, links): '''updates the JSON log with today's articles''' with open('tweet_log.json', 'r+') as f: tweet_log = json.load(f) try: #if there's no entry for today it creates key tweet_log[str(date.today())] except KeyError: tweet_log[str(date.today())] = {} tweet_log[str(date.today())][source_website] = links with open('tweet_log.json', 'w+') as f: json.dump(tweet_log, f) print('Links for {} logged for {}'.format(source_website, date.today())) f.close() def crawl_websites(websites): '''collects all the articles from all sources then picks out the sources that has the most articles [In] -> takes a dictionary as an argument {source name: link to source's main page} [Out] -> dict {source: list of links}''' todays_articles = {} for name, url in websites.items(): '''Iterating through all the news sources Scraping links Logging links to date''' obj = SorosCrawler(name, url) print('\nGetting news from {}'.format(name)) todays_links = obj.parse_links() print('Number of articles found: ', len(todays_links)) #logging links with open('links.txt', 'r+') as f: linkdb = f.read().split('\n') rejected_links = [] for link in todays_links: print(link) #removing the ones that are already in the db if link[1] in linkdb: rejected_links.append(link) print('Link already in list') else: print('New article found.') for link in rejected_links: todays_links.remove(link) simple_log(name, todays_links) todays_articles[name] = todays_links return todays_articles if __name__ == '__main__': NEWS_SOURCES = {'Magyar Hírlap': 'http://magyarhirlap.hu/', 'Hirado.hu': 'http://www.hirado.hu/', 'Magyar Idők': 'http://magyaridok.hu/', 'Origo.hu': 'http://www.origo.hu/', '888.hu': 'http://888.hu/'} print(crawl_websites(NEWS_SOURCES))
bsd-2-clause
-5,828,904,765,428,295,000
33.342657
93
0.556302
false
lungsi/cerebellum-unit
cerebunit/validation_tests/cells/PurkinjeCell/test_for_quasilinear_behavior.py
1
15037
# ============================================================================ # test_for_quasilinear_behavior.py # # created 06 September 2017 Lungsi # modified 09 October 2017 Lungsi # # ============================================================================ # import sciunit import quantities as pq from elephant.statistics import mean_firing_rate as mfr # from cerebunit.capabilities.cells.response import ProducesSpikeTrain from cerebunit.score_manager import BinaryScore, OverallBinaryScore # # class QuasiLinearTest(sciunit.Test, BinaryScore, OverallBinaryScore): ''' The QuasiLinear Test is a test where the model is injected with currents. First the model is injected with increasing currents in steps. This is followed by decreasing currents (same amplitudes). For each respective amplitude current injection the mean spiking frequencies are compared. The Binary score is 1 if the frequencies are different. The OverallBinary score is 1 if this is the case for all the amplitudes. ''' required_capabilities = (ProducesSpikeTrain,) score_type = OverallBinaryScore # # def generate_prediction(self, model, verbose=False): ''' Generates spike train from "vm_soma", cell region. The function is automatically called by sciunit.Test whic this test is a child of. Therefore as part of sciunit generate_prediction is mandatory. ''' # ============Ramp Up and then Down Step Currents (nA)============== self.ramp_up_down_currents = \ { #"current1": {"amp": 0.6, "dur": 100.0, "delay": 100.0}, #"current2": {"amp": 0.8, "dur": 100.0, "delay": 200.0}, #"current3": {"amp": 1.0, "dur": 100.0, "delay": 300.0}, #"current4": {"amp": 0.8, "dur": 100.0, "delay": 400.0}, #"current5": {"amp": 0.6, "dur": 100.0, "delay": 500.0}, "current1": {"amp": 0.4, "dur": 250.0, "delay": 250.0}, "current2": {"amp": 0.8, "dur": 250.0, "delay": 500.0}, "current3": {"amp": 1.2, "dur": 250.0, "delay": 750.0}, "current4": {"amp": 1.6, "dur": 250.0, "delay": 1000.0}, "current5": {"amp": 1.2, "dur": 250.0, "delay": 1250.0}, "current6": {"amp": 0.8, "dur": 250.0, "delay": 1500.0}, "current7": {"amp": 0.4, "dur": 250.0, "delay": 1750.0} } stimulus = \ model.set_stimulation_properties( self.ramp_up_down_currents ) # below line is necessary for the simulation to run "correctly" [ stimulus[i].loc(0.5, sec=model.cell.soma) \ for i in range(len(stimulus)) ] # ============================================================= self.setup_parameters = { "dt": 0.025, "celsius": 37, "tstop": 2250, "v_init": -65 } #"tstop": 600, "v_init": -65 } model.set_simulation_properties(self.setup_parameters) # ============================================================= model.produce_spike_train() return model # # def get_spike_train_for_each_current(self, model): ''' The model.produce_spike_train() results in spike train for all the current inject in one place model.predictions["spike_train"]["vm_soma"] This function slices the spike train for ramp-up current phase and ramp-down current phases. And for each ramp spike trains for each respective current amplitude is stored in a dictionary such that ramp_currents = {"currentid": sliced_spike_train, ... } =======================Use Case================================ ramp_up_train, ramp_down_train = \ self.get_spike_train_for_each_current(model) =============================================================== This function is called by process_prediction ''' last_I_id = len(self.ramp_up_down_currents) # get all the spike train for desired cell region cell_region = "vm_soma" response_type = "spike_train" all_spike_train = model.predictions[response_type][cell_region] # # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # +++++++Spike trains for current0 => no current injection++++++ # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # ====================Setup For Ramp-Up========================= # set the time boundaries spike_start = 0.0 spike_stop = self.ramp_up_down_currents["current1"]["delay"] # get the spike train for the time boundaries ramp_up_spike_train_for = \ { "current0": all_spike_train.time_slice(spike_start, spike_stop) } # # ====================Setup For Ramp-Down====================== # set the time boundaries spike_start = \ self.ramp_up_down_currents["current"+str(last_I_id)]["delay"] \ + self.ramp_up_down_currents["current"+str(last_I_id)]["dur"] spike_stop = self.setup_parameters["tstop"] # get the spike train for the time boundaries ramp_down_spike_train_for = \ { "current0": all_spike_train.time_slice(spike_start, spike_stop) } # # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++Spike trains for currenti for each ith current injections++ # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # ==========Setup indices for Ramp-Up and Ramp-Down============ # total number of injections no_of_Iclamps = len(self.ramp_up_down_currents) ramp_up_start_idx = 1 # first current is current1 ramp_down_stop_idx = no_of_Iclamps # last current is currentN if no_of_Iclamps % 2 == 0: # there is no largest current in the middle of ramp-up/down # that is, all the currents are repeated in ramp-down # so the middle current is the last current in ramp-up ramp_up_stop_idx = no_of_Iclamps / 2 # so ramp-down current starts from ramp-up last current + 1 ramp_down_start_idx = ramp_up_stop_idx + 1 else: # the largest current is the middle of ramp-up & ramp-down # so the last ramp-up current is the one before the largest ramp_up_stop_idx = (no_of_Iclamps - 1) / 2 # so ramp-down current starts from ramp-up last current + 1 ramp_down_start_idx = ramp_up_stop_idx + 2 # # create list of current indices from current1 for both ramps ramp_down_indices = \ [ k for k in range(ramp_down_stop_idx+1) if k >= ramp_down_start_idx and k <= ramp_down_stop_idx ] # # Note: ramp_down_stop_idx is not the last currentID in ramp-down # The last currentID in ramp-down = first currentID in ramp-up ramp_down_indices.reverse() # This is done as follows: # ============Loop through each current injection============== no_of_I_per_ramp = len(ramp_down_indices) # ramp-up = ramp-down for i in range(no_of_Iclamps): # currentID in self.ramp_up_down_currents start from current1 idx = i+1 # get current stimulation parameters for currenti inj_times = self.ramp_up_down_currents["current"+str(idx)] # lower bound of the time boundary spike_start = inj_times["delay"] # upper bound of the time boundary spike_stop = spike_start + inj_times["dur"] # if the current stimulation is during ramp-up phase # i.e idx in ramp_up_indices if idx <= ramp_up_stop_idx: # slice the spike train from total spike train into a # dictionary with respective currenti tag spike_train = \ { "current"+str(idx): all_spike_train.time_slice(spike_start, spike_stop) } # add the dictionary into the dictionary for ramp-up trains ramp_up_spike_train_for.update(spike_train) # on the other hand if the stimulation is during ramp-down # do the above and add the dictionary inot ramp-down trains elif idx in ramp_down_indices: dwn_idx = ramp_down_indices.index(idx)+1 spike_train = \ { "current"+str(dwn_idx): # 0 is reserved for no injection all_spike_train.time_slice(spike_start, spike_stop) } ramp_down_spike_train_for.update(spike_train) # ============================================================ # return the dictionaries for both ramp-up and ramp-down phases return ramp_up_spike_train_for, ramp_down_spike_train_for # # def get_prediction_for_each_current(self, ramp_spike_train): ''' For a given ramp (up or down) dictionary of spike trains tagged with respective current id their mean frequencies are calculated and its magnitude is stored in a dictionary of the form {currentid: {mean_freq: magnitude}} for all the currents in A ramp. ========================Use Case=============================== ramp_up_mean_spike_freq = \ self.get_prediction_for_each_current(ramp_up_spike_train) =============================================================== This function is called by process_prediction ''' ramp_mean_spike_freq = {} for current_id, spike_array in ramp_spike_train.iteritems(): x = mfr(spike_array) y = {current_id: {"mean_freq": x.rescale(pq.Hz).item()} } # just the magnitude ramp_mean_spike_freq.update(y) return ramp_mean_spike_freq # # def process_prediction(self, model): ''' Once the model has run, this function can be used to process the spike_train prediction to get the prediction of interest, mean firing rate. =======================Use Case=============================== ramp_up_freq, ramp_down_freq = process_prediction(model) ============================================================== This function is called by compute_score ''' # First, # get spike trains for respective currents during both # ramp Up and ramp Down stages ramp_up_spike_train, ramp_down_spike_train = \ self.get_spike_train_for_each_current(model) # # Now for each ramps get the spike frequencies # For Ramp-Up stage # compute and store mean firing rate for each spike train ramp_up_mean_spike_freq = \ self.get_prediction_for_each_current(ramp_up_spike_train) # For Ramp-Down stage # compute and store mean firing rate for each spike train ramp_down_mean_spike_freq = \ self.get_prediction_for_each_current(ramp_down_spike_train) # For both Ramp-Up and Ramp-Down # Return the mean firing rates (respective currents) return ramp_up_mean_spike_freq, ramp_down_mean_spike_freq # # def compute_score(self, observation, model, verbose=False): ''' This function is like generate_prediction. It is therefore called automatically by sciunit which this test is a child of. This function with the same name compute_score is also therefore mandatory. ''' # Since the model has already run, call process_prediction # to get the spike freqs for ramp up and ramp down phases ramp_up_mean_spike_freq_for, ramp_down_mean_spike_freq_for = \ self.process_prediction(model) score_breakdown = {} # store here the score breakdowns list_of_scores = [] # store here the list of scores # =======Loop through each current id in ramp up phase====== # Note: this includes current0, no injection for current_id in ramp_up_mean_spike_freq_for.keys(): # take corresponding freq at ramp up as observation raw_observation = ramp_up_mean_spike_freq_for[current_id] observation = \ { "inequality": "!= " + str(raw_observation["mean_freq"]) } # if this current id is also in ramp down phase if current_id in ramp_down_mean_spike_freq_for.keys(): # take corresponding freq at ramp down as prediction a_prediction = ramp_down_mean_spike_freq_for[current_id] # get their Binary score x = BinaryScore.compute( observation, a_prediction ) y = BinaryScore(x) # Create details to be added in score_breakdown dict step_up_freq = \ "stepUp = "+str(raw_observation["mean_freq"])+" Hz" step_down_freq = \ "stepDown = "+str(a_prediction["mean_freq"])+" Hz" if current_id=="current0": score_detail = { current_id: [ "0 nA", step_up_freq, step_down_freq, y ] } else: amp = \ self.ramp_up_down_currents[current_id]["amp"] score_detail = { current_id: [ str(amp)+" nA", step_up_freq, step_down_freq, y ] } # For the respective current id # Store the score breakdown in the dictionary score_breakdown.update(score_detail) # Store the score in the list list_of_scores.append(y.score) # Send all the scores and its breakdown to get OverallBinary score x2 = OverallBinaryScore.compute( list_of_scores, score_breakdown ) score = OverallBinaryScore(x2) if score.score==1: score.description = "The model " + model.name + " passed the " + self.__class__.__name__ + ". The mean spike frequencies of a given amplitude of injection during ramp-up phase is different from those during ramp-down phase." else: score.description = "The model " + model.name + " failed the " + self.__class__.__name__ + ". The mean spike frequencies of an (or many) amplitude of injection are similar for ramp-up phase versus ramp-down phase." print score.description return score
bsd-3-clause
1,648,019,135,874,678,500
50.67354
236
0.525304
false
lneuhaus/pyrpl
pyrpl/test/test_registers.py
1
7911
import logging logger = logging.getLogger(name=__name__) from pyrpl.modules import Module from pyrpl.attributes import * from .test_redpitaya import TestRedpitaya class TestRegisters(TestRedpitaya): """ This test verifies that all registers behave as expected. The test is not only useful to test the python interface, but also checks that the fpga is not behaving stragely, i.e. loosing data or writing the wrong data. Thus, it is the principal test to execute on new fpga designs. """ def test_generator(self): if self.r is None: assert False for modulekey, module in self.r.__dict__.items(): if isinstance(module, Module): logger.info("Scanning module %s...", modulekey) for regkey, regclass in type(module).__dict__.items(): if isinstance(regclass, BaseRegister): logger.info("Scanning register %s...", regkey) yield self.register_validation, module, modulekey, \ regclass, regkey def register_validation(self, module, modulekey, reg, regkey): logger.debug("%s %s", modulekey, regkey) if type(reg) is BaseRegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents an int if not isinstance(value, int): assert False, 'wrong type: int != %s' % str(type(value)) # write back to it to test setter module.__setattr__(regkey, value) newvalue = module.__getattribute__(regkey) assert value == newvalue, \ "Mismatch: value=" + str(value) + " new value = " + str( newvalue) if type(reg) is LongRegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents an int if not isinstance(value, int) and not isinstance(value, long): assert False, 'wrong type: int/long != %s' % str(type(value)) # write back to it to test setter module.__setattr__(regkey, value) newvalue = module.__getattribute__(regkey) if regkey not in ["current_timestamp"]: assert value == newvalue, "Mismatch: value=" + str(value) \ + " new value = " + str(newvalue) if type(reg) is BoolRegister or type(reg) is IORegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents an int if type(value) != bool: assert False # exclude read-only registers if regkey in ['_reset_writestate_machine', '_trigger_armed', '_trigger_delay_running', 'pretrig_ok', 'armed', 'on']: return # write opposite value and confirm it has changed module.__setattr__(regkey, not value) if value == module.__getattribute__(regkey): assert False # write back original value and check for equality module.__setattr__(regkey, value) if value != module.__getattribute__(regkey): assert False if type(reg) is FloatRegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents a float if not isinstance(value, float): assert False # exclude read-only registers if regkey in ['pfd_integral', 'ch1_firstpoint', 'ch2_firstpoint', 'voltage_out1', 'voltage_out2', 'voltage_in1', 'voltage_in2', 'firstpoint', 'lastpoint' ] or modulekey == 'sampler': return # write something different and confirm change if value == 0: write = 1e10 else: write = 0 module.__setattr__(regkey, write) if value == module.__getattribute__(regkey): assert False # write sth negative write = -1e10 module.__setattr__(regkey, write) if module.__getattribute__(regkey) >= 0: if reg.signed: assert False else: # unsigned registers should use absolute value and # therefore not be zero when assigned large negative values if module.__getattribute__(regkey) == 0: assert False # set back original value module.__setattr__(regkey, value) if value != module.__getattribute__(regkey): assert False if type(reg) is PhaseRegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents a float if not isinstance(value, float): assert False # make sure any random phase has an error below 1e-6 degrees ! if regkey not in ['scopetriggerphase']: for phase in np.linspace(-1234, 5678, 90): module.__setattr__(regkey, phase) diff = abs(module.__getattribute__(regkey) - (phase % 360)) bits = getattr(module.__class__, regkey).bits thr = 360.0/2**bits/2 # factor 2 because rounding is used if diff > thr: assert False, \ "at phase " + str(phase) + ": diff = " + str(diff) # set back original value module.__setattr__(regkey, value) if value != module.__getattribute__(regkey): assert False if type(reg) is FrequencyRegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents a float if not isinstance(value, float): assert False # make sure any frequency has an error below 100 mHz! if regkey not in []: for freq in [0, 1, 10, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 125e6 / 2]: # FrequencyRegisters are now limited. module.__setattr__(regkey, freq) diff = abs(module.__getattribute__(regkey) - freq) if diff > 0.1: assert False, \ "at freq " + str(freq) + ": diff = " + str(diff) # set back original value module.__setattr__(regkey, value) if value != module.__getattribute__(regkey): assert False if type(reg) is SelectRegister: # try to read value = module.__getattribute__(regkey) # make sure Register represents an int if not isinstance((sorted(reg.options(module))[0]), type(value)): assert False # exclude read-only registers if regkey in ["id"]: return # try all options and confirm change that they are saved for option in sorted(reg.options(module)): module.__setattr__(regkey, option) if option != module.__getattribute__(regkey): assert False # set back original value module.__setattr__(regkey, value) if value != module.__getattribute__(regkey): assert False return
gpl-3.0
-5,935,855,645,894,125,000
44.734104
79
0.502591
false
davidhawkes11/p3w
p3w_06.0d.3.py
1
1072
# Code to ask for user's two favourite numbers, to operate on them and to display the output to the screen. # Input: two numbers, integer variables # Output: the result of a mathematical operation on the two numbers # The above text is commentary. The actual program starts below: print ("\n\n\nPython 3.0 Workbook\nStudent Work Booklet\nStudent Activity p3w_06.0d.3\n\n") print ("A program to demonstrate string selection and to display the output to the screen.\n\n" ) number1 = int(input("What is your first favourite number? ")) number2 = int(input("What is your second favourite number? ")) menu = "What would you like to do with these two numbers? Enter 1, 2, 3, 4 or 9:\n\ 1. Add +\n\ 2. Subtract -\n\ 3. Multiply *\n\ 4. Divide /\n\ 9. Quit\n" x = int(input(menu)) if x == 1: print("", number1 + number2) elif x == 2: print("", number1- number2) elif x == 3: print("", number1 * number2) elif x == 4: print("", number1 / number2) elif x == 9: print("Goodbye!")
mit
1,125,062,174,275,446,800
29.529412
107
0.632463
false
orf/websocket_stdout_example
runner.py
1
3081
from twisted.internet import reactor, protocol from autobahn.websocket import WebSocketServerFactory, \ WebSocketServerProtocol, \ listenWS from twisted.python.log import startLogging, msg import sys startLogging(sys.stdout) # Examples: # runner.py /bin/sh -c "tail -f /var/log/nginx/access.log | grep -v secret_admin_page" --line-buffered | awk '{\$1=\"\"; print}'" # runner.py tail tail -F /var/log/nginx/access.log COMMAND_NAME = sys.argv[1] COMMAND_ARGS = sys.argv[1:] LOCAL_ONLY = False DEBUG = True class ProcessProtocol(protocol.ProcessProtocol): """ I handle a child process launched via reactor.spawnProcess. I just buffer the output into a list and call WebSocketProcessOutputterThingFactory.broadcast when any new output is read """ def __init__(self, websocket_factory): self.ws = websocket_factory self.buffer = [] def outReceived(self, message): self.ws.broadcast(message) self.buffer.append(message) self.buffer = self.buffer[-10:] # Last 10 messages please def errReceived(self, data): print "Error: %s" % data # http://autobahn.ws/python class WebSocketProcessOutputterThing(WebSocketServerProtocol): """ I handle a single connected client. We don't need to do much here, simply call the register and un-register functions when needed. """ def onOpen(self): self.factory.register(self) for line in self.factory.process.buffer: self.sendMessage(line) def connectionLost(self, reason): WebSocketServerProtocol.connectionLost(self, reason) #super(WebSocketProcessOutputterThing, self).connectionLost(self, reason) self.factory.unregister(self) class WebSocketProcessOutputterThingFactory(WebSocketServerFactory): """ I maintain a list of connected clients and provide a method for pushing a single message to all of them. """ protocol = WebSocketProcessOutputterThing def __init__(self, *args, **kwargs): WebSocketServerFactory.__init__(self, *args, **kwargs) #super(WebSocketProcessOutputterThingFactory, self).__init__(self, *args, **kwargs) self.clients = [] self.process = ProcessProtocol(self) reactor.spawnProcess(self.process,COMMAND_NAME, COMMAND_ARGS, {}, usePTY=True) def register(self, client): msg("Registered client %s" % client) if not client in self.clients: self.clients.append(client) def unregister(self, client): msg("Unregistered client %s" % client) if client in self.clients: self.clients.remove(client) def broadcast(self, message): for client in self.clients: client.sendMessage(message) if __name__ == "__main__": print "Running process %s with args %s" % (COMMAND_NAME, COMMAND_ARGS) factory = WebSocketProcessOutputterThingFactory("ws://%s:9000" % ("localhost" if LOCAL_ONLY else "0.0.0.0"), debug=False) listenWS(factory) reactor.run()
mit
2,425,071,918,070,515,700
35.678571
129
0.668939
false
googleapis/googleapis-gen
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/ad_group_bid_modifier_service/client.py
1
23635
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.ads.googleads.v8.common.types import criteria from google.ads.googleads.v8.enums.types import bid_modifier_source from google.ads.googleads.v8.resources.types import ad_group_bid_modifier from google.ads.googleads.v8.services.types import ad_group_bid_modifier_service from google.rpc import status_pb2 # type: ignore from .transports.base import AdGroupBidModifierServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import AdGroupBidModifierServiceGrpcTransport class AdGroupBidModifierServiceClientMeta(type): """Metaclass for the AdGroupBidModifierService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = OrderedDict() # type: Dict[str, Type[AdGroupBidModifierServiceTransport]] _transport_registry['grpc'] = AdGroupBidModifierServiceGrpcTransport def get_transport_class(cls, label: str = None, ) -> Type[AdGroupBidModifierServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class AdGroupBidModifierServiceClient(metaclass=AdGroupBidModifierServiceClientMeta): """Service to manage ad group bid modifiers.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = 'googleads.googleapis.com' DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AdGroupBidModifierServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AdGroupBidModifierServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename) kwargs['credentials'] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> AdGroupBidModifierServiceTransport: """Return the transport used by the client instance. Returns: AdGroupBidModifierServiceTransport: The transport used by the client instance. """ return self._transport @staticmethod def ad_group_path(customer_id: str,ad_group_id: str,) -> str: """Return a fully-qualified ad_group string.""" return "customers/{customer_id}/adGroups/{ad_group_id}".format(customer_id=customer_id, ad_group_id=ad_group_id, ) @staticmethod def parse_ad_group_path(path: str) -> Dict[str,str]: """Parse a ad_group path into its component segments.""" m = re.match(r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$", path) return m.groupdict() if m else {} @staticmethod def ad_group_bid_modifier_path(customer_id: str,ad_group_id: str,criterion_id: str,) -> str: """Return a fully-qualified ad_group_bid_modifier string.""" return "customers/{customer_id}/adGroupBidModifiers/{ad_group_id}~{criterion_id}".format(customer_id=customer_id, ad_group_id=ad_group_id, criterion_id=criterion_id, ) @staticmethod def parse_ad_group_bid_modifier_path(path: str) -> Dict[str,str]: """Parse a ad_group_bid_modifier path into its component segments.""" m = re.match(r"^customers/(?P<customer_id>.+?)/adGroupBidModifiers/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str, ) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str,str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str, ) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder, ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str,str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str, ) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization, ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str,str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str, ) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project, ) @staticmethod def parse_common_project_path(path: str) -> Dict[str,str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str, ) -> str: """Return a fully-qualified location string.""" return "projects/{project}/locations/{location}".format(project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str,str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__(self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, AdGroupBidModifierServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the ad group bid modifier service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.AdGroupBidModifierServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc # type: ignore cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, AdGroupBidModifierServiceTransport): # transport is a AdGroupBidModifierServiceTransport instance. if credentials: raise ValueError('When providing a transport instance, ' 'provide its credentials directly.') self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = AdGroupBidModifierServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, ) def get_ad_group_bid_modifier(self, request: ad_group_bid_modifier_service.GetAdGroupBidModifierRequest = None, *, resource_name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> ad_group_bid_modifier.AdGroupBidModifier: r"""Returns the requested ad group bid modifier in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Args: request (:class:`google.ads.googleads.v8.services.types.GetAdGroupBidModifierRequest`): The request object. Request message for [AdGroupBidModifierService.GetAdGroupBidModifier][google.ads.googleads.v8.services.AdGroupBidModifierService.GetAdGroupBidModifier]. resource_name (:class:`str`): Required. The resource name of the ad group bid modifier to fetch. This corresponds to the ``resource_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v8.resources.types.AdGroupBidModifier: Represents an ad group bid modifier. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([resource_name]): raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') # Minor optimization to avoid making a copy if the user passes # in a ad_group_bid_modifier_service.GetAdGroupBidModifierRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, ad_group_bid_modifier_service.GetAdGroupBidModifierRequest): request = ad_group_bid_modifier_service.GetAdGroupBidModifierRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if resource_name is not None: request.resource_name = resource_name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_ad_group_bid_modifier] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('resource_name', request.resource_name), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def mutate_ad_group_bid_modifiers(self, request: ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest = None, *, customer_id: str = None, operations: Sequence[ad_group_bid_modifier_service.AdGroupBidModifierOperation] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> ad_group_bid_modifier_service.MutateAdGroupBidModifiersResponse: r"""Creates, updates, or removes ad group bid modifiers. Operation statuses are returned. List of thrown errors: `AdGroupBidModifierError <>`__ `AuthenticationError <>`__ `AuthorizationError <>`__ `ContextError <>`__ `CriterionError <>`__ `DatabaseError <>`__ `DistinctError <>`__ `FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__ `IdError <>`__ `InternalError <>`__ `MutateError <>`__ `NewResourceCreationError <>`__ `NotEmptyError <>`__ `OperatorError <>`__ `QuotaError <>`__ `RangeError <>`__ `RequestError <>`__ `ResourceCountLimitExceededError <>`__ `SizeLimitError <>`__ `StringFormatError <>`__ `StringLengthError <>`__ Args: request (:class:`google.ads.googleads.v8.services.types.MutateAdGroupBidModifiersRequest`): The request object. Request message for [AdGroupBidModifierService.MutateAdGroupBidModifiers][google.ads.googleads.v8.services.AdGroupBidModifierService.MutateAdGroupBidModifiers]. customer_id (:class:`str`): Required. ID of the customer whose ad group bid modifiers are being modified. This corresponds to the ``customer_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. operations (:class:`Sequence[google.ads.googleads.v8.services.types.AdGroupBidModifierOperation]`): Required. The list of operations to perform on individual ad group bid modifiers. This corresponds to the ``operations`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v8.services.types.MutateAdGroupBidModifiersResponse: Response message for ad group bid modifiers mutate. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([customer_id, operations]): raise ValueError('If the `request` argument is set, then none of ' 'the individual field arguments should be set.') # Minor optimization to avoid making a copy if the user passes # in a ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest): request = ad_group_bid_modifier_service.MutateAdGroupBidModifiersRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if customer_id is not None: request.customer_id = customer_id if operations is not None: request.operations = operations # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_ad_group_bid_modifiers] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(( ('customer_id', request.customer_id), )), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response __all__ = ( 'AdGroupBidModifierServiceClient', )
apache-2.0
-5,977,166,958,312,141,000
45.162109
175
0.629236
false
JohnVillalovos/hardlinkpy
hardlinkpy/hardlink.py
1
24410
#!/usr/bin/python3 -ttu # hardlink - Goes through a directory structure and creates hardlinks for # files which are identical. # # Copyright (C) 2003 - 2019 John L. Villalovos, Hillsboro, Oregon # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place, Suite 330, Boston, MA 02111-1307, USA. # # # ------------------------------------------------------------------------ # John Villalovos # email: [email protected] # http://www.sodarock.com/ # # Inspiration for this program came from the hardlink.c code. I liked what it # did but did not like the code itself, to me it was very unmaintainable. So I # rewrote in C++ and then I rewrote it in python. In reality this code is # nothing like the original hardlink.c, since I do things quite differently. # Even though this code is written in python the performance of the python # version is much faster than the hardlink.c code, in my limited testing. This # is mainly due to use of different algorithms. # # Original inspirational hardlink.c code was written by: Jakub Jelinek # <[email protected]> # # ------------------------------------------------------------------------ # # TODO(jlvillal): # * Thinking it might make sense to walk the entire tree first and collect # up all the file information before starting to do comparisons. Thought # here is we could find all the files which are hardlinked to each other # and then do a comparison. If they are identical then hardlink # everything at once. import argparse import logging import os import re import stat import sys import time from typing import Dict, List, NamedTuple, Optional, Tuple class FileInfo(NamedTuple): filename: str stat_info: os.stat_result # MAX_HASHES must be a power of 2, so that MAX_HASHES - 1 will be a value with # all bits set to 1 MAX_HASHES = 2 ** 17 assert (MAX_HASHES & (MAX_HASHES - 1)) == 0, "MAX_HASHES must be a power of 2" MAX_HASHES_MINUS_1 = MAX_HASHES - 1 # Hash functions # Create a hash from a file's size and time values def hash_size_time(*, size: int, time: float) -> int: return (size ^ int(time)) & (MAX_HASHES_MINUS_1) def hash_size(size: int) -> int: return (size) & (MAX_HASHES_MINUS_1) def hash_value(*, size: int, time: float, notimestamp: bool) -> int: if notimestamp: return hash_size(size) else: return hash_size_time(size=size, time=time) # If two files have the same inode and are on the same device then they are # already hardlinked. def is_already_hardlinked(*, st1: os.stat_result, st2: os.stat_result) -> bool: result = (st1.st_ino == st2.st_ino) and (st1.st_dev == st2.st_dev) return result # Determine if a file is eligibile for hardlinking. Files will only be # considered for hardlinking if this function returns true. def eligible_for_hardlink( *, st1: os.stat_result, st2: os.stat_result, args: argparse.Namespace ) -> bool: # Must meet the following # criteria: # * NOT already hard linked to each other # * sizes are equal # * size is greater than or equal to args.min_size # * file modes are equal OR we are comparing content only # * owner user ids are equal OR we are comparing content only # * owner group ids are equal OR we are comparing content only # * modified times are equal OR date hashing is off OR we are comparing # content only # * device is the same # * sizes are equal if not (st1.st_size == st2.st_size): return False # * size is greater than or equal to args.min_size # The size should always be greater than or equal to the min size as the # caller should ensure that, but to be safe we check anyway. if st1.st_size < args.min_size: return False if not args.content_only: # * file modes are equal if not (st1.st_mode == st2.st_mode): return False # * owner user ids are equal if not (st1.st_uid == st2.st_uid): return False # * owner group ids are equal if not (st1.st_gid == st2.st_gid): return False if not args.content_only and not args.notimestamp: # * modified times are equal if not (st1.st_mtime == st2.st_mtime): return False # * device is the same if not (st1.st_dev == st2.st_dev): return False # * NOT already hard linked to each other # The files should not be hardlinked to each other as the caller should # ensure that, but to be safe we check anyway. if is_already_hardlinked(st1=st1, st2=st2): return False return True def are_file_contents_equal( *, filename1: str, filename2: str, args: argparse.Namespace ) -> bool: """Determine if the contents of two files are equal. **!! This function assumes that the file sizes of the two files are equal. """ try: # Open our two files with open(filename1, "rb") as file1: with open(filename2, "rb") as file2: gStats.did_comparison() if args.show_progress: print(f"Comparing: {filename1}") print(f" to : {filename2}") buffer_size = 1024 * 1024 while True: buffer1 = file1.read(buffer_size) buffer2 = file2.read(buffer_size) if buffer1 != buffer2: return False if not buffer1: return True except (OSError, PermissionError) as exc: print("Error opening file in are_file_contents_equal()") print("Was attempting to open:") print(f"file1: {filename1}") print(f"file2: {filename2}") print("When an exception occurred: {}".format(exc)) return False # Determines if two files should be hard linked together. def are_files_hardlinkable( *, file_info_1: FileInfo, file_info_2: FileInfo, args: argparse.Namespace ) -> bool: # See if the files are eligible for hardlinking if not eligible_for_hardlink( st1=file_info_1.stat_info, st2=file_info_2.stat_info, args=args ): return False if args.samename: # Check if the base filenames are the same basename1 = os.path.basename(file_info_1.filename) basename2 = os.path.basename(file_info_2.filename) if basename1 != basename2: return False return are_file_contents_equal( filename1=file_info_1.filename, filename2=file_info_2.filename, args=args ) # Hardlink two files together def hardlink_files( *, sourcefile: str, destfile: str, stat_info: os.stat_result, args: argparse.Namespace, ) -> bool: # rename the destination file to save it temp_name = destfile + ".$$$___cleanit___$$$" try: if not args.dry_run: os.rename(destfile, temp_name) except OSError as error: print(f"Failed to rename: {destfile} to {temp_name}") print(error) result = False else: # Now link the sourcefile to the destination file try: if not args.dry_run: os.link(sourcefile, destfile) except: # noqa TODO(fix this bare except) logging.exception(f"Failed to hardlink: {sourcefile} to {destfile}") # Try to recover try: os.rename(temp_name, destfile) except: # noqa TODO(fix this bare except) logging.exception( "BAD BAD - failed to rename back {} to {}".format( temp_name, destfile ) ) result = False else: # hard link succeeded # Delete the renamed version since we don't need it. if not args.dry_run: try: os.unlink(temp_name) except FileNotFoundError: # If our temporary file disappears under us, ignore it. # Probably an rsync is running and deleted it. logging.warning(f"Temporary file vanished: {temp_name}") pass # update our stats gStats.did_hardlink(sourcefile, destfile, stat_info) if args.show_progress: if args.dry_run: print("Did NOT link. Dry run") size = stat_info.st_size print(f"Linked: {sourcefile}") print(f" to: {destfile}, saved {size}") result = True return result def hardlink_identical_files( *, dir_entry: os.DirEntry, args: argparse.Namespace ) -> None: """hardlink identical files The purpose of this function is to hardlink files together if the files are the same. To be considered the same they must be equal in the following criteria: * file size * file contents * file mode (default) * owner user id (default) * owner group id (default) * modified time (default) Also, files will only be hardlinked if they are on the same device. This is because hardlink does not allow you to hardlink across file systems. The basic idea on how this is done is as follows: Walk the directory tree building up a list of the files. For each file, generate a simple hash based on the size and modified time. For any other files which share this hash make sure that they are not identical to this file. If they are identical then hardlink the files. Add the file info to the list of files that have the same hash value. """ for exclude in args.excludes: if re.search(exclude, dir_entry.path): return stat_info = dir_entry.stat(follow_symlinks=False) # Is it a regular file? if stat.S_ISREG(stat_info.st_mode): # Create the hash for the file. file_hash = hash_value( size=stat_info.st_size, time=stat_info.st_mtime, notimestamp=(args.notimestamp or args.content_only), ) # Bump statistics count of regular files found. gStats.found_regular_file() if args.verbose >= 2: print(f"File: {dir_entry.path}") work_file_info = (dir_entry.path, stat_info) work_file_info = FileInfo(filename=dir_entry.path, stat_info=stat_info) if file_hash in file_hashes: # We have file(s) that have the same hash as our current file. # Let's go through the list of files with the same hash and see if # we are already hardlinked to any of them. for temp_file_info in file_hashes[file_hash]: if is_already_hardlinked(st1=stat_info, st2=temp_file_info.stat_info): gStats.found_hardlink( temp_file_info.filename, dir_entry.path, temp_file_info.stat_info, ) break else: # We did not find this file as hardlinked to any other file # yet. So now lets see if our file should be hardlinked to any # of the other files with the same hash. for temp_file_info in file_hashes[file_hash]: if are_files_hardlinkable( file_info_1=work_file_info, # file_info_2=(temp_filename, temp_stat_info), file_info_2=temp_file_info, args=args, ): hardlink_files( sourcefile=temp_file_info.filename, destfile=dir_entry.path, stat_info=temp_file_info.stat_info, args=args, ) break else: # The file should NOT be hardlinked to any of the other # files with the same hash. So we will add it to the list # of files. file_hashes[file_hash].append(work_file_info) else: # There weren't any other files with the same hash value so we will # create a new entry and store our file. file_hashes[file_hash] = [work_file_info] class cStatistics(object): def __init__(self) -> None: self.dircount = 0 # how many directories we find self.regularfiles = 0 # how many regular files we find self.comparisons = 0 # how many file content comparisons self.hardlinked_thisrun = 0 # hardlinks done this run self.hardlinked_previously = 0 # hardlinks that are already existing self.bytes_saved_thisrun = 0 # bytes saved by hardlinking this run self.bytes_saved_previously = 0 # bytes saved by previous hardlinks self.hardlinkstats: List[ Tuple[str, str] ] = [] # list of files hardlinked this run self.starttime = time.time() # track how long it takes self.previouslyhardlinked: Dict[ str, Tuple[os.stat_result, List[str]] ] = {} # list of files hardlinked previously def found_directory(self) -> None: self.dircount = self.dircount + 1 def found_regular_file(self) -> None: self.regularfiles = self.regularfiles + 1 def did_comparison(self) -> None: self.comparisons = self.comparisons + 1 def found_hardlink( self, sourcefile: str, destfile: str, stat_info: os.stat_result ) -> None: filesize = stat_info.st_size self.hardlinked_previously = self.hardlinked_previously + 1 self.bytes_saved_previously = self.bytes_saved_previously + filesize if sourcefile not in self.previouslyhardlinked: self.previouslyhardlinked[sourcefile] = (stat_info, [destfile]) else: self.previouslyhardlinked[sourcefile][1].append(destfile) def did_hardlink( self, sourcefile: str, destfile: str, stat_info: os.stat_result ) -> None: filesize = stat_info.st_size self.hardlinked_thisrun = self.hardlinked_thisrun + 1 self.bytes_saved_thisrun = self.bytes_saved_thisrun + filesize self.hardlinkstats.append((sourcefile, destfile)) def print_stats(self, args: argparse.Namespace) -> None: if args.show_progress: print("") print("Hard linking Statistics:") # Print out the stats for the files we hardlinked, if any if self.previouslyhardlinked and args.printprevious: keys = self.previouslyhardlinked.keys() print("Files Previously Hardlinked:") for key in sorted(keys): stat_info, file_list = self.previouslyhardlinked[key] size = stat_info.st_size print(f"Hardlinked together: {key}") for filename in file_list: print(f" : {filename}") print( "Size per file: {} Total saved: {}".format( size, size * len(file_list) ) ) print() if self.hardlinkstats: if args.dry_run: print("Statistics reflect what would have happened if not a dry run") print("Files Hardlinked this run:") for (source, dest) in self.hardlinkstats: print(f"Hardlinked: {source}") print(f" to: {dest}") print() print(f"Directories : {self.dircount:,}") print(f"Regular files : {self.regularfiles:,}") print(f"Comparisons : {self.comparisons:,}") print(f"Hardlinked this run : {self.hardlinked_thisrun:,}") print( "Total hardlinks : {:,}".format( self.hardlinked_previously + self.hardlinked_thisrun ) ) print( "Bytes saved this run : {:,} ({})".format( self.bytes_saved_thisrun, humanize_number(self.bytes_saved_thisrun) ) ) totalbytes = self.bytes_saved_thisrun + self.bytes_saved_previously print( "Total bytes saved : {:,} ({})".format( totalbytes, humanize_number(totalbytes) ) ) run_time = time.time() - self.starttime print( "Total run time : {:,.2f} seconds ({})".format( run_time, humanize_time(run_time) ) ) def humanize_time(seconds: float) -> str: if seconds > 3600: # 3600 seconds = 1 hour return "{:0.2f} hours".format(seconds / 3600.0) if seconds > 60: return "{:0.2f} minutes".format(seconds / 60.0) return f"{seconds:,.2f} seconds" def humanize_number(number: int) -> str: if number > 1024 ** 4: return "{:0.3f} tibibytes".format(number / (1024.0 ** 4)) if number > 1024 ** 3: return "{:0.3f} gibibytes".format(number / (1024.0 ** 3)) if number > 1024 ** 2: return "{:0.3f} mebibytes".format(number / (1024.0 ** 2)) if number > 1024: return "{:0.3f} kibibytes".format(number / 1024.0) return f"{number} bytes" def parse_args(passed_args: Optional[List[str]] = None) -> argparse.Namespace: parser = argparse.ArgumentParser() # usage=usage) parser.add_argument( "directories", nargs="+", metavar="DIRECTORY", help="Directory name" ) parser.add_argument("--version", action="version", version=VERSION) parser.add_argument( "-f", "--filenames-equal", help="Filenames have to be identical", action="store_true", dest="samename", ) parser.add_argument( "-n", "--dry-run", help="Do NOT actually hardlink files", action="store_true" ) parser.add_argument( "-p", "--print-previous", help="Print previously created hardlinks", action="store_true", dest="printprevious", ) parser.add_argument( "--no-progress", help="Don't print progress information during execution", action="store_false", dest="show_progress", ) parser.add_argument( "-q", "--no-stats", help="Do not print the final statistics", action="store_false", dest="printstats", ) parser.add_argument( "-t", "--timestamp-ignore", "--ignore-timestamp", help="File modification times do NOT have to be identical", action="store_true", dest="notimestamp", ) parser.add_argument( "-c", "--content-only", help="Only file contents have to match", action="store_true", ) parser.add_argument( "-s", "--min-size", help="Minimum file size to perform a hard link. Must be 1 or greater", type=int, default=1, ) parser.add_argument( "-x", "--exclude", help=( "Regular expression used to exclude files/dirs (may specify multiple " "times" ), metavar="REGEX", action="append", dest="excludes", default=[], ) verbosity_group = parser.add_mutually_exclusive_group() verbosity_group.add_argument( "-v", "--verbose", help="Verbosity level. Can be used multiple times.", action="count", default=1, ) verbosity_group.add_argument( "--quiet", help="Minimizes output", action="store_true" ) args = parser.parse_args(args=passed_args) if args.quiet: args.verbose = 0 args.show_progress = False args.printstats = False if args.min_size < 1: parser.error("-s/--min-size must be 1 or greater") args.directories = [ os.path.abspath(os.path.expanduser(dirname)) for dirname in args.directories ] for dirname in args.directories: if not os.path.isdir(dirname): parser.print_help() print() print(f"Error: {dirname} is NOT a directory") sys.exit(1) return args def check_python_version() -> None: # Make sure we have the minimum required Python version if sys.version_info < (3, 6, 0): sys.exit("ERROR: This program requires Python 3.6 or higher to run") def setup_logger(verbose_level: int) -> None: log_level = logging.INFO if verbose_level >= 1: log_level = logging.DEBUG # Setup logging format. logging.basicConfig( format="%(levelname)s:%(filename)s:%(funcName)s():L%(lineno)d %(message)s", level=log_level, ) # Start of global declarations debug = None debug1 = None gStats = cStatistics() file_hashes: Dict[int, List[FileInfo]] = {} VERSION = "0.7.0 - 2020-05-13 (13-May-2020)" def main(passed_args: Optional[List[str]] = None) -> int: check_python_version() # Parse our argument list and get our list of directories args = parse_args(passed_args=passed_args) # Compile up our regexes ahead of time MIRROR_PL_REGEX = re.compile(r"^\.in\.") RSYNC_TEMP_REGEX = re.compile((r"^\..*\.\?{6,6}$")) # Now go through all the directories that have been added. # NOTE: hardlink_identical_files() will add more directories to the # directories list as it finds them. directories = args.directories.copy() while directories: # Get the last directory in the list directory = directories.pop() + "/" if not os.path.isdir(directory): print(f"{directory} is NOT a directory!") else: gStats.found_directory() # Loop through all the files in the directory try: dir_entries = os.scandir(directory) except (OSError, PermissionError) as exc: print( f"Error: Unable to do an os.scandir on: {directory} Skipping...", exc, ) continue directories_found = [] for dir_entry in sorted(dir_entries, key=lambda x: x.name): pathname = dir_entry.path # Look at files/dirs beginning with "." if dir_entry.name.startswith("."): # Ignore any mirror.pl files. These are the files that # start with ".in." if MIRROR_PL_REGEX.match(dir_entry.name): continue # Ignore any RSYNC files. These are files that have the # format .FILENAME.?????? if RSYNC_TEMP_REGEX.match(dir_entry.name): continue if dir_entry.is_symlink(): if debug1: print(f"{pathname}: is a symbolic link, ignoring") continue if dir_entry.is_dir(): directories_found.append(pathname) continue if dir_entry.stat(follow_symlinks=False).st_size < args.min_size: if debug1: print(f"{pathname}: Size is not large enough, ignoring") continue hardlink_identical_files(dir_entry=dir_entry, args=args) # Add our found directories in reverse order because we pop them # off the end. Goal is to go through our directories in # alphabetical order. directories.extend(reversed(directories_found)) if args.printstats: gStats.print_stats(args) return 0 if __name__ == "__main__": main()
gpl-2.0
8,132,395,972,646,693,000
34.739385
86
0.578165
false
cynnyx/doormat
utils/measurement_convergence.py
1
1267
#/usr/bin/python from __future__ import division import fileinput import sys import math def parse_measurement_line(line): (name, value) = line.split(':') return name, float(value) all_values = {} averages = {} m2 = {} samples = {} acceptancy = 0.95 for line in sys.stdin.readlines(): (name, value) = parse_measurement_line(line) if name in all_values: all_values[name].append(value) samples[name] += 1 oldavg = averages[name] delta = value - oldavg averages[name] += delta/samples[name] m2[name] += delta * (value - averages[name]) else: all_values[name] = [value] averages[name] = value samples[name] = 1 m2[name] = 0 for name, values_list in all_values.items(): retained = 0 for v in values_list: if abs(v - averages[name]) < 3 * math.sqrt((m2[name]/samples[name])): retained += 1 if retained/samples[name] < acceptancy: accepted = float(retained/samples[name]) print name, "has not reached convergency. Only ", retained, " samples were valid over ", samples[name], "[", accepted*100, "%]" exit(0) output_file = open(sys.argv[1], 'w+') for name, value in averages.items(): st = name st += "\t" + str(value) + "\t" + str(math.sqrt(m2[name]/samples[name]))+"\n" output_file.write(st) output_file.close() exit(1)
mit
2,172,345,804,804,676,400
24.877551
129
0.658248
false
mozilla/ichnaea
ichnaea/api/tests.py
1
8473
import json import time from unittest import mock import colander import pytest from pyramid.request import Request from ichnaea.api.key import get_key, Key from ichnaea.api import exceptions as api_exceptions from ichnaea.api.rate_limit import rate_limit_exceeded from ichnaea.api.schema import RenamingMapping from ichnaea.tests.factories import ApiKeyFactory, KeyFactory class TestKey(object): def test_empty(self, session_tracker): key = Key() assert isinstance(key, Key) assert key.valid_key is None session_tracker(0) def test_get(self, session, session_tracker): api_key = ApiKeyFactory() session.flush() session_tracker(1) result = get_key(session, api_key.valid_key) assert isinstance(result, Key) session_tracker(2) # Test get cache result2 = get_key(session, api_key.valid_key) assert isinstance(result2, Key) session_tracker(2) def test_get_miss(self, session, session_tracker): result = get_key(session, "unknown") assert result is None session_tracker(1) # Test get cache result2 = get_key(session, "unknown") assert result2 is None session_tracker(1) def test_allowed(self): def one(**kw): return KeyFactory(**kw) key = one(allow_locate=True, allow_region=True) assert key.allowed("locate") assert key.allowed("region") assert key.allowed("submit") assert key.allowed("unknown") is None assert not one(allow_locate=None).allowed("locate") assert not one(allow_locate=False).allowed("locate") assert not one(allow_region=None).allowed("region") assert not one(allow_region=False).allowed("region") def test_store_sample(self): key = KeyFactory(store_sample_locate=None, store_sample_submit=None) assert key.store_sample("locate") is False assert key.store_sample("submit") is False assert key.store_sample("region") is False key = KeyFactory(store_sample_locate=0, store_sample_submit=100) assert key.store_sample("locate") is False assert key.store_sample("submit") is True # A global_locate_sample_rate can turn off samples assert key.store_sample("locate", global_locate_sample_rate=0.0) is False # And can raise a sample rate key = KeyFactory(store_sample_locate=50, store_sample_submit=None) assert key.store_sample("locate", global_locate_sample_rate=200.0) is True @mock.patch("ichnaea.api.key.random") def test_store_sample_mock_random(self, mock_random): key = KeyFactory(store_sample_locate=50) mock_random.return_value = 0.1 assert key.store_sample("locate") is True mock_random.return_value = 0.5 assert key.store_sample("locate") is True mock_random.return_value = 0.51 assert key.store_sample("locate") is False mock_random.return_value = 0.9 assert key.store_sample("locate") is False @pytest.mark.parametrize( "global_rate, q1, q2, q3, q4", [ (100.0, 0.1, 0.5, 0.501, 0.7), (50.0, 0.1, 0.25, 0.251, 0.5), (1.0, 0.004, 0.005, 0.006, 1.0), ], ) @mock.patch("ichnaea.api.key.random") def test_store_sample_mock_random_with_global_rate( self, mock_random, global_rate, q1, q2, q3, q4 ): assert 0.0 < (q3 - q2) < 0.1 key = KeyFactory(store_sample_locate=50) mock_random.return_value = q1 assert key.store_sample("locate", global_rate) is True mock_random.return_value = q2 assert key.store_sample("locate", global_rate) is True mock_random.return_value = q3 assert key.store_sample("locate", global_rate) is False mock_random.return_value = q4 assert key.store_sample("locate", global_rate) is False def test_can_fallback(self): def one(**kw): return KeyFactory(**kw) assert one(allow_fallback=True).can_fallback() assert not one(allow_fallback=False).can_fallback() assert not one(allow_fallback=None).can_fallback() assert not (one(allow_fallback=True, fallback_name=None).can_fallback()) assert not (one(allow_fallback=True, fallback_url=None).can_fallback()) assert not (one(allow_fallback=True, fallback_ratelimit=None).can_fallback()) assert one(allow_fallback=True, fallback_ratelimit=0).can_fallback() assert not ( one(allow_fallback=True, fallback_ratelimit_interval=None).can_fallback() ) assert not ( one(allow_fallback=True, fallback_ratelimit_interval=0).can_fallback() ) assert one(allow_fallback=True, fallback_cache_expire=None).can_fallback() assert one(allow_fallback=True, fallback_cache_expire=0).can_fallback() class TestRenamingMapping(object): def test_to_name(self): class SampleSchema(colander.MappingSchema): schema_type = RenamingMapping input_name = colander.SchemaNode(colander.String(), to_name="output_name") name = colander.SchemaNode(colander.String()) def __init__(self, *args, **kwargs): super(SampleSchema, self).__init__(*args, **kwargs) input_data = {"input_name": "foo", "name": "bar"} output_data = SampleSchema().deserialize(input_data) assert output_data["output_name"] == "foo" assert output_data["name"] == "bar" assert "input_name" not in output_data class TestExceptions(object): def _check(self, error, status, json=True, content_type="application/json"): response = Request.blank("/").get_response(error) if content_type: assert response.content_type == content_type assert response.status_code == status if json: assert response.json == error.json_body() return response def test_str(self): error = api_exceptions.LocationNotFound() assert str(error) == "<LocationNotFound>: 404" def test_daily_limit(self): error = api_exceptions.DailyLimitExceeded() response = self._check(error, 403) assert b"dailyLimitExceeded" in response.body def test_invalid_apikey(self): error = api_exceptions.InvalidAPIKey() response = self._check(error, 400) assert b"keyInvalid" in response.body def test_location_not_found(self): error = api_exceptions.LocationNotFound() response = self._check(error, 404) assert b"notFound" in response.body def test_parse_error(self): error = api_exceptions.ParseError() response = self._check(error, 400) assert b"parseError" in response.body def test_parse_error_details(self): error = api_exceptions.ParseError(details=["Details of Error"]) response = self._check(error, 400, json=False) assert b"parseError" in response.body content = json.loads(response.body.decode()) assert content["details"] == ["Details of Error"] def test_upload_success(self): error = api_exceptions.UploadSuccess() response = self._check(error, 200) assert response.body == b"{}" def test_upload_success_v0(self): error = api_exceptions.UploadSuccessV0() response = self._check(error, 204, json=False, content_type=None) assert response.body == b"" class TestLimiter(object): def test_maxrequests(self, redis): rate_key = "apilimit:key_a:v1.geolocate:20150101" maxreq = 5 expire = 1 for i in range(maxreq): assert not rate_limit_exceeded( redis, rate_key, maxreq=maxreq, expire=expire ) assert rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire) def test_expiry(self, redis): rate_key = "apilimit:key_a:v1.geolocate:20150101" maxreq = 100 expire = 1 assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire) time.sleep(1.0) assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire) def test_no_limit(self): rate_key = "apilimit:key_a:v1.geolocate:20150101" broken_redis = None assert not rate_limit_exceeded(broken_redis, rate_key, maxreq=0, expire=1)
apache-2.0
-4,846,981,050,123,046,000
36.162281
86
0.634014
false
openstack/storlets
tests/functional/java/test_thumbnail_storlet.py
1
5625
# Copyright IBM Corp. 2015, 2015 All Rights Reserved # Copyright (c) 2010-2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swiftclient import client as c from tests.functional.java import StorletJavaFunctionalTest import unittest from six.moves.urllib.request import Request, urlopen class TestThumbnailStorlet(StorletJavaFunctionalTest): def setUp(self): self.storlet_log = None self.additional_headers = {} main_class = 'org.openstack.storlet.thumbnail.ThumbnailStorlet' super(TestThumbnailStorlet, self).setUp('ThumbnailStorlet', 'thumbnail-1.0.jar', main_class, 'sample.jpg') def invoke_storlet_on_get(self): headers = {'X-Run-Storlet': self.storlet_name} headers.update(self.additional_headers) resp = dict() resp_headers, gf = c.get_object(self.url, self.token, self.container, self.storlet_file, response_dict=resp, headers=headers) with open('/tmp/sample.jpg', 'wb') as f: f.write(gf) self.assertIn(resp['status'], [200, 202]) def invoke_storlet_on_put(self): headers = {'X-Run-Storlet': self.storlet_name, 'x-object-meta-name': 'thumbnail'} headers.update(self.additional_headers) resp = dict() source_file = '%s/%s' % (self.path_to_bundle, self.storlet_file) with open(source_file, 'rb') as f: c.put_object(self.url, self.token, self.container, 'gen_thumb_on_put.jpg', f, headers=headers, response_dict=resp) status = resp.get('status') self.assertIn(status, [201, 202]) headers = c.head_object(self.url, self.token, self.container, 'gen_thumb_on_put.jpg') self.assertLess(int(headers['content-length']), 1087318) self.assertEqual('thumbnail', headers['x-object-meta-name']) def invoke_storlet_on_copy_from(self): headers = {'X-Run-Storlet': self.storlet_name, 'X-Object-Meta-Name': 'thumbnail', 'X-Copy-From': '%s/%s' % (self.container, self.storlet_file)} headers.update(self.additional_headers) resp = dict() c.put_object(self.url, self.token, self.container, 'gen_thumb_on_copy.jpg', '', headers=headers, response_dict=resp) status = resp.get('status') self.assertIn(status, [201, 202]) rh = resp['headers'] self.assertEqual(rh['x-storlet-generated-from'], '%s/%s' % (self.container, self.storlet_file)) self.assertEqual(rh['x-storlet-generated-from-account'], self.acct) self.assertIn('x-storlet-generated-from-last-modified', rh) headers = c.head_object(self.url, self.token, self.container, 'gen_thumb_on_copy.jpg') self.assertLess(int(headers['content-length']), 1087318) self.assertEqual('thumbnail', headers['x-object-meta-name']) self.assertTrue('x-object-meta-x-timestamp' not in headers) self.assertTrue('x-timestamp' in headers) def invoke_storlet_on_copy_dest(self): # No COPY in swiftclient. Using urllib instead... url = '%s/%s/%s' % (self.url, self.container, self.storlet_file) headers = {'X-Auth-Token': self.token, 'X-Run-Storlet': self.storlet_name, 'X-Object-Meta-Name': 'thumbnail', 'Destination': '%s/gen_thumb_on_copy_.jpg' % self.container} headers.update(self.additional_headers) req = Request(url, headers=headers) req.get_method = lambda: 'COPY' conn = urlopen(req, timeout=10) status = conn.getcode() self.assertIn(status, [201, 202]) headers = c.head_object(self.url, self.token, self.container, 'gen_thumb_on_copy_.jpg') self.assertLess(int(headers['content-length']), 1087318) self.assertEqual('thumbnail', headers['x-object-meta-name']) self.assertTrue('x-object-meta-x-timestamp' not in headers) self.assertTrue('x-timestamp' in headers) def test_get(self): self.invoke_storlet_on_get() def test_put(self): self.invoke_storlet_on_put() def test_copy_put(self): self.invoke_storlet_on_copy_from() def test_copy(self): self.invoke_storlet_on_copy_dest() class TestThumbnailStorletOnProxy(TestThumbnailStorlet): def setUp(self): super(TestThumbnailStorletOnProxy, self).setUp() self.additional_headers = {'X-Storlet-Run-On-Proxy': ''} if __name__ == '__main__': unittest.main()
apache-2.0
8,418,980,704,809,856,000
40.058394
79
0.577244
false
kcsaff/CA
src/algorithms/rivers.py
1
5824
# Copyright (C) 2010 by Kevin Saff # This file is part of the CA scanner. # The CA scanner is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # The CA scanner is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with the CA scanner. If not, see <http://www.gnu.org/licenses/>. """ This algorithm handles double-precision states. Rules are defined using 10 weights, 2 modifiers, 2 limits, and 2 setpoints. The first 9 weights describe the amount of each of the neighbors to use to generate the new value: [ 0][ 1][ 2] [ 3][*4][ 5] [ 6][ 7][ 8] The tenth weight is the amount to weight the cell's second-to-last value. One modifier is the dampener; the total will be multiplied by this. The other modifier is the exciter; this will be added to the total. The limits identify what the minimum and maximum allowed values are. Finally, the two setpoints indicate what to do if the total goes out of range. One indicates what to set the value to if it goes below the min, the other if it goes above the max. """ import generate def rivers_evolve(input, output, lookup): """Evolve it.""" return generate.inline(""" #define ABS(X) ((X) < 0 ? -(X) : +(X)) #define POS(X) ((X) > 0 ? (X) : 0) #define NEG(X) ((X) < 0 ? (X) : 0) #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) #define VS 0.001 #define WS 0.2 #define WIN(V0, W0, V1, W1) (POS(MIN((W1), ((V1)+(W1))-((V0)+(W0)) ))) #define WOUT(V0, W0, V1, W1) (WIN((V1), (W1), (V0), (W0))) #define THRESH 0.2 // These define soil/water when we transform #define TRANSERODE 1.0 #define TRANSDEPOSIT 1.0 // These define rate of erosion/deposition per velocity unit #define RATEERODE 0.01 #define RATEDEPOSIT 0.001 PyArrayObject *input; PyArrayObject *output; PyArrayObject *lookup; char h = 0; unsigned xstride, ystride, zstride; unsigned xa, x0, x1, xM; unsigned ya, y0, y1, yM; double *ind, *oud, *look; double v00, va0, v10, v0a, v01; double w00, wa0, w10, w0a, w01; double win, wout; double wvel, Dw, Dv; if (!PyArg_ParseTuple(args, "O!O!O!", &PyArray_Type, &input, &PyArray_Type, &output, &PyArray_Type, &lookup )) return NULL; xstride = input-> strides[0] >> 3; ystride = input-> strides[1] >> 3; zstride = input-> strides[2] >> 3; xM = (input-> dimensions[0] - 1) * xstride; yM = (input-> dimensions[1] - 1) * ystride; //zM = (input-> dimensions[2] - 1) * zstride; ind = (double*)(input-> data); oud = (double*)(output-> data); look = (double*)(lookup-> data); for (x0 = xstride; x0 < xM; x0 += xstride) { xa = x0 - xstride; x1 = x0 + xstride; for (y0 = ystride; y0 < yM; y0 += ystride) { ya = y0 - ystride; y1 = y0 + ystride; // v00 is sediment, w00 is water. v00 = ind[x0 + y0 + 0*zstride]; va0 = ind[xa + y0 + 0*zstride]; v10 = ind[x1 + y0 + 0*zstride]; v0a = ind[x0 + ya + 0*zstride]; v01 = ind[x0 + y1 + 0*zstride]; w00 = ind[x0 + y0 + 1*zstride]; wa0 = ind[xa + y0 + 1*zstride]; w10 = ind[x1 + y0 + 1*zstride]; w0a = ind[x0 + ya + 1*zstride]; w01 = ind[x0 + y1 + 1*zstride]; // Determine water flow in. win = 0; win += WIN(v00, w00, va0, wa0); win += WIN(v00, w00, v10, w10); win += WIN(v00, w00, v0a, w0a); win += WIN(v00, w00, v01, w01); win *= WS; // Determine water flow out. wout = 0; wout += WOUT(v00, w00, va0, wa0); wout += WOUT(v00, w00, v10, w10); wout += WOUT(v00, w00, v0a, w0a); wout += WOUT(v00, w00, v01, w01); wout *= WS; w00 = POS(w00 + win - wout); Dw = Dv = 0; // Determine water velocity. if (w00 > 0) { wvel = win / w00; // Perform erosion. if (wvel > THRESH) { // Erosion Dw = (wvel - THRESH) * RATEERODE; Dv = -Dw * TRANSERODE; } else { // Deposition Dw = -MIN(w00, (THRESH - wvel) * RATEDEPOSIT); Dv = -Dw * TRANSDEPOSIT; } } // Determine sediment shift. Dv += VS * (va0 + v10 + v0a + v01 - 4 * v00); // Write results. oud[x0 + y0 + 0*zstride] = v00 + Dv; oud[x0 + y0 + 1*zstride] = w00 + Dw; } } return PyFloat_FromDouble(1.0); """)(input, output, lookup) import numpy from _util import register from _algorithm import algorithm @register('compile_rule', type='rivers', quality=1.0) def _rivers(X): lookup = list() return algorithm('floatscan', planes=3, evolve=rivers_evolve, table=numpy.asarray(lookup, dtype = numpy.float))
gpl-3.0
1,988,516,181,111,120,400
31.090909
78
0.519231
false
IsmaelRLG/simpbot
simpbot/commands/requires.py
1
10605
# -*- coding: utf-8 -*- # Simple Bot (SimpBot) # Copyright 2016-2017, Ismael Lugo (kwargs) import logging from simpbot.bottools import irc as irctools from simpbot import localedata requerimentls = {} logging = logging.getLogger('simpbot') failed = True i18n = localedata.get() def get(requeriment): """get('admin:simple')""" args = [] if isinstance(requeriment, tuple) or isinstance(requeriment, list): return requeriment elif ':' in requeriment: requeriment = requeriment.split(':', 1) if requeriment[1] != '': if ',' in requeriment[1]: args.extend(requeriment[1].split(',')) else: args.append(requeriment[1]) requeriment = requeriment[0] if requeriment in requerimentls: return requerimentls[requeriment], args def req_nickserv(vars): irc = vars['self'].irc user = vars['user'] target = vars['target'] args = vars['watchdog'][1] if user.account is None and not 'ninja' in args: irc.error(target, localedata.get(vars['lang'])['not logged']) return failed requerimentls['requires nickserv'] = req_nickserv def only(vars): irc = vars['self'].irc args = vars['watchdog'][1] target = vars['user'].nick if len(args) == 0: logging.error(vars['msg'] % i18n['without params'] % 'only') return failed if args[0] == 'private': if vars['privbot']: return elif len(args) == 1: irc.error(target, localedata.get(vars['lang'])['only private']) return failed elif args[0] == 'channel': if not vars['privbot']: return elif len(args) == 1: irc.error(target, localedata.get(vars['lang'])['only channel']) return failed requerimentls['only'] = only def chan_register(vars): irc = vars['self'].irc msg = vars['msg'] dbstore = irc.dbstore channel = vars['channel'] privbot = vars['privbot'] target = vars['target'] locale = localedata.get(vars['lang']) result = vars['result'] tgroup = {} groupnames = vars['watchdog'][1] non_channel = (len(groupnames) > 0 and groupnames[0] == 'non-channel') for group in groupnames: if not '=' in group: continue try: v, group = group.split('=', 1) except ValueError: continue tgroup[v] = group.split() if privbot and not non_channel: if len(groupnames) == 0: logging.error(msg % i18n['without params'] % 'chan_register') return failed for group in (tgroup['private'] if 'private' in tgroup else groupnames): try: channel = result.group(group) except (IndexError, KeyError): logging.error(msg % i18n['invalid params'] % 'chan_register') return failed else: if channel is None: continue if not irctools.ischannel(channel, irc=irc): irc.error(target, locale['invalid channel'] % channel) return failed if dbstore.get_chan(channel) is None: irc.error(target, locale['unregistered channel'] % channel) return failed vars['channel'] = channel return irc.error(target, locale['channel needed']) return failed elif len(groupnames) > 0: try: if 'channel' in tgroup: if tgroup['channel'][0] == 'non-channel': assert False else: groupname = tgroup['channel'][0] else: groupname = groupnames[0] if not non_channel: channel = vars['result'].group(groupname) except AssertionError: pass except (IndexError, KeyError): irc.verbose('error', msg % i18n['invalid params'] % 'chan_register') return failed vars['channel'] = channel if dbstore.get_chan(channel) is None: if not non_channel: irc.error(target, locale['unregistered channel'] % channel) return failed elif dbstore.get_chan(channel) is None: if not non_channel: irc.error(target, locale['unregistered channel'] % channel) return failed requerimentls['registered chan'] = chan_register def unregistered_chan(vars): irc = vars['self'].irc msg = vars['msg'] dbstore = irc.dbstore channel1 = vars['channel'] channel2 = vars['watchdog'][1] privbot = vars['privbot'] target = vars['target'] locale = localedata.get(vars['lang']) if privbot: if len(channel2) == 0: logging.error(msg % i18n['without params'] % 'unregistered_chan') return failed for group in channel2: try: channel = vars['result'].group(group) except (IndexError, KeyError): logging.error(msg % i18n['invalid params'] % 'unregistered_chan') return failed else: if channel is None: continue if not irctools.ischannel(channel, irc=irc): irc.error(target, locale['invalid channel'] % channel) return failed if dbstore.get_chan(channel) is not None: irc.error(target, locale['registered channel'] % channel) return failed vars['channel'] = channel return irc.error(target, ) return failed elif len(channel2) > 0: try: channel = vars['result'].group(channel2[0]) except (IndexError, KeyError): logging.error(msg % i18n['invalid params'] % 'unregistered_chan') return failed vars['channel'] = channel if dbstore.get_chan(channel) is not None: irc.error(vars['target'], locale['registered channel'] % channel) return failed elif dbstore.get_chan(channel1) is not None: irc.error(vars['target'], locale['registered channel'] % channel1) return failed requerimentls['unregistered chan'] = unregistered_chan def user_register(vars): irc = vars['self'].irc msg = vars['msg'] user = vars['user'] args = vars['watchdog'][1] target = user.nick locale = localedata.get(vars['lang']) dbstore = irc.dbstore if len(args) == 0 or args[0] == 'ninja': if user.account is None: if not 'ninja' in args: irc.error(target, locale['not logged']) return failed if dbstore.get_user(user.account) is None: if not 'ninja' in args: irc.error(target, locale['you are not registered']) return failed return try: usr = vars['result'].group(args[0]) except (IndexError, KeyError): logging.error(msg % i18n['invalid params'] % 'user_register') return failed if usr is None: if 'optional' in args: return else: return failed if '*' in usr or '@' in usr or '!' in usr: irc.error(target, locale['invalid user'] % usr) return failed if dbstore.get_user(usr) is None: irc.error(target, locale['user no registered'] % usr) requerimentls['registered user'] = user_register def unregistered_user(vars): irc = vars['self'].irc user = vars['user'] target = user.nick dbstore = irc.dbstore if dbstore.get_user(user.account) is not None: irc.error(target, localedata.get(vars['lang'])['already registered']) return failed return requerimentls['unregistered user'] = unregistered_user def flags(vars): irc = vars['self'].irc msg = vars['msg'] user = vars['user'] args = vars['watchdog'][1] channel = vars['channel'] target = user.nick dbstore = irc.dbstore if len(args) == 0: logging.error(msg % i18n['without params'] % 'flags') return failed if not args[0].isalnum(): logging.error(msg % i18n['invalid params'] % 'flags') return failed chan = dbstore.get_chan(channel) flag = chan.get_flags(dbstore.get_user(user.account)) error = False if flag is None: flag = chan.get_flags(user.mask) if flag is None or not args[0] in flag: error = True elif not args[0] in flag: error = True if error: locale = localedata.get(vars['lang']) irc.error(target, locale['permission denied'] % args[0]) return failed requerimentls['flags'] = flags def admin(vars): irc = vars['self'].irc msg = vars['msg'] user = vars['user'] args = vars['watchdog'][1] tar = user.nick dbstore = irc.dbstore locale = localedata.get(vars['lang']) usr = dbstore.get_user(user.account) if not usr.isadmin(): # this line has been commented for security reasons, please # uncomment this line if you are sure of what makes #irc.error(tar, locale['only admins']) return failed if len(args) == 0: return for capab in args: if not usr.admin.has_capab(capab): irc.error(tar, locale['no capabs']) _ = vars['_'] _['usr'] = usr irc.verbose('fail use', msg % _(locale['fail use'])) return failed requerimentls['admin'] = admin def channel_status(vars): chan_name = vars['channel'] irc = vars['irc'] target = vars['target'] mode_req = vars['watchdog'][1] locale = localedata.get(vars['lang']) channel = irc.request.get_chan(chan_name) if channel is None: irc.error(target, locale['not on the channel'] % chan_name) return failed status_bot = channel.get_user(irc.nickname).get_status(chan_name) if status_bot == '': irc.error(target, locale['mode required'] % '|+'.join(mode_req)) return failed if not hasattr(irc.features, 'modeprefix'): irc.features.modeprefix = {} for char, mode in irc.features.prefix.items(): irc.features.modeprefix[mode] = char prefix = irc.features.modeprefix for mode in mode_req: if not mode in prefix: continue char = prefix[mode] if char in status_bot: return irc.error(target, locale['mode required'] % '|+'.join(mode_req)) return failed requerimentls['channel_status'] = channel_status
mit
-3,391,833,250,695,264,000
29.831395
81
0.56794
false
shiki0711/cmock
include/generator/generator.py
1
14573
import re TOKEN_TYPE_TEXT = 0 TOKEN_TYPE_VARIABLE = 1 TOKEN_TYPE_BLOCK = 2 TOKEN_TYPE_BLOCK_IF = 3 TOKEN_TYPE_BLOCK_ELSE = 4 TOKEN_TYPE_BLOCK_ENDIF = 5 TOKEN_TYPE_BLOCK_LOOP = 6 TOKEN_TYPE_BLOCK_ENDLOOP = 7 TOKEN_TYPE_BLOCK_INCLUDE = 8 PARSE_STATUS_ROOT = 0 PARSE_STATUS_IF = 1 PARSE_STATUS_ELSE = 2 PARSE_STATUS_LOOP = 3 class ParseError(RuntimeError): def __init__(self, arg): self.args = arg class Token(object): def __init__(self, token_string, token_type): self.token_string = token_string self.token_type = token_type BLOCK_TAG_START = '{% ' BLOCK_TAG_END = ' %}' VARIABLE_TAG_START = '{{ ' VARIABLE_TAG_END = ' }}' class Lexer(object): def __init__(self, template_string): self.template_string = template_string def tokenize(self): result = [] s = 0 status = 'TEXT' for index in range(0, len(self.template_string)): if self.template_string[index:].startswith(VARIABLE_TAG_START): if status == 'TEXT': status = 'VAR_START' result.append(Token(self.template_string[s:index], TOKEN_TYPE_TEXT)) s = index else: errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...' raise ParseError(errmsg) elif self.template_string[index:].startswith(VARIABLE_TAG_END): if status == 'VAR_START': status = 'TEXT' result.append(Token(self.template_string[s:index+3], TOKEN_TYPE_VARIABLE)) s = index+3 else: errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...' raise ParseError(errmsg) elif self.template_string[index:].startswith(BLOCK_TAG_START): if status == 'TEXT': status = 'BLOCK_START' result.append(Token(self.template_string[s:index], TOKEN_TYPE_TEXT)) s = index else: errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...' raise ParseError(errmsg) elif self.template_string[index:].startswith(BLOCK_TAG_END): if status == 'BLOCK_START': status = 'TEXT' result.append(Token(self.template_string[s:index+3], TOKEN_TYPE_BLOCK)) s = index+3 else: errmsg = 'tokenize error: index=' + str(index) + ' token: ' + self.template_string[index:index+30] + '...' raise ParseError(errmsg) else: pass result.append(Token(self.template_string[s:index], TOKEN_TYPE_TEXT)) #for item in result: # print 'token: '+item.token_string+' type: '+str(item.token_type) return result class NodeList(object): def __init__(self): self.container = [] def append(self, node): self.container.append(node) class Node(object): def __init__(self): self.parent = None def render(self, context): pass def resolve(self, variable): node = self while node: if node.context.has_key(variable): return node.context[variable] else: node = node.parent errmsg = 'resolve not found: '+variable raise ParseError(errmsg) return None class RootNode(Node): def __init__(self): Node.__init__(self) self.child_nodelist = NodeList() self.context = {} def addNode(self, node): self.child_nodelist.append(node) node.parent = self def render(self, context): self.context = context result = '' for node in self.child_nodelist.container: r = node.render(context) result += node.render({}) return result class TextNode(Node): def __init__(self, s): Node.__init__(self) self.s = s def render(self, context): self.context = context return self.s class VariableNode(Node): def __init__(self, var_string): Node.__init__(self) self.var_string = var_string def render(self, context): self.context = context resolved_var = self.resolve(self.var_string) if resolved_var: return resolved_var else: return '' class ConditionNode(Node): def __init__(self, condition_var_string): Node.__init__(self) self.condition_var_string = condition_var_string self.true_nodelist = NodeList() self.false_nodelist = NodeList() def render(self, context): self.context = context resolved_var = self.resolve(self.condition_var_string) if resolved_var: self.context[self.condition_var_string] = resolved_var if int(resolved_var) > 0: condition = True else: condition = False else: condition = False if condition: nodelist = self.true_nodelist.container else: nodelist = self.false_nodelist.container result = '' for node in nodelist: result += node.render({}) return result def addTrueNode(self, node): self.true_nodelist.append(node) node.parent = self def addFalseNode(self, node): self.false_nodelist.append(node) node.parent = self class LoopNode(Node): def __init__(self, loop_vars_list): Node.__init__(self) self.loop_vars_list = loop_vars_list self.loop_nodelist = NodeList() def render(self, context): self.context = context result = '' separator = '' resolved_list = self.resolve(self.loop_vars_list[1]) if resolved_list: for item in resolved_list: result += separator if self.loop_vars_list[0] == '_': for (k,v) in item.items(): self.context[k] = v else: self.context[self.loop_vars_list[0]] = item for node in self.loop_nodelist.container: result += node.render({}) separator = self.loop_vars_list[2] return result def addLoopNode(self, node): self.loop_nodelist.container.append(node) node.parent = self class Parser(object): def __init__(self, tokens): self.tokens = tokens self.parse_stack = [] self.root = RootNode() self.parse_stack_init(self.root) def parse(self): while self.tokens: token = self.next_token() if token.token_type == TOKEN_TYPE_TEXT: self.append(token.token_string, TOKEN_TYPE_TEXT) elif token.token_type == TOKEN_TYPE_VARIABLE: if token.token_string: regex_req = ur"\{\{\s([a-zA-Z_][a-zA-Z_0-9]*)\s\}\}" m = re.match(regex_req, token.token_string) if m: var_string = m.group(1) self.append(var_string, TOKEN_TYPE_VARIABLE) else: errmsg = 'parse error: ' + token.token_string raise ParseError(errmsg) elif token.token_type == TOKEN_TYPE_BLOCK: if token.token_string: regex_req_if = ur"\{%\sif\s([a-zA-Z_][a-zA-Z_0-9]*)\s%\}" regex_req_else = ur"\{%\selse\s%\}" regex_req_endif = ur"\{%\sendif\s%\}" regex_req_loop = ur"\{%\sfor\s([a-zA-Z_][a-zA-Z_0-9]*)\sin\s([a-zA-Z_][a-zA-Z_0-9]*)\s%\}" regex_req_loop_with = ur"\{%\sfor\s([a-zA-Z_][a-zA-Z_0-9]*)\sin\s([a-zA-Z_][a-zA-Z_0-9]*)\swith\s([^\s]+)\s%\}" regex_req_endloop = ur"\{%\sendfor\s%\}" regex_req_include = ur"\{%\sinclude\s([^\s]+)\s%\}" m = re.match(regex_req_if, token.token_string) if m: if_var_string = m.group(1) self.append(if_var_string, TOKEN_TYPE_BLOCK_IF) continue m = re.match(regex_req_else, token.token_string) if m: self.append('', TOKEN_TYPE_BLOCK_ELSE) continue m = re.match(regex_req_endif, token.token_string) if m: self.append('', TOKEN_TYPE_BLOCK_ENDIF) continue m = re.match(regex_req_loop, token.token_string) if m: loop_item_string = m.group(1) loop_list_string = m.group(2) self.append([loop_item_string, loop_list_string, ''], TOKEN_TYPE_BLOCK_LOOP) continue m = re.match(regex_req_loop_with, token.token_string) if m: loop_item_string = m.group(1) loop_list_string = m.group(2) loop_with_string = m.group(3) self.append([loop_item_string, loop_list_string, loop_with_string], TOKEN_TYPE_BLOCK_LOOP) continue m = re.match(regex_req_endloop, token.token_string) if m: self.append('', TOKEN_TYPE_BLOCK_ENDLOOP) continue m = re.match(regex_req_include, token.token_string) if m: sub_template_file = m.group(1) self.append(sub_template_file, TOKEN_TYPE_BLOCK_INCLUDE) continue errmsg = 'parse error: ' + token.token_string raise ParseError(errmsg) else: errmsg = 'parse error: ' + token.token_string raise ParseError(errmsg) return self.root def append(self, parsed_content, type): current = self.parse_stack[-1] current_node = current['node'] current_status = current['status'] if type == TOKEN_TYPE_TEXT or type == TOKEN_TYPE_VARIABLE: if type == TOKEN_TYPE_TEXT: node = TextNode(parsed_content) else: node = VariableNode(parsed_content) self.insert_node(current_node, current_status, node) elif type == TOKEN_TYPE_BLOCK_IF: node = ConditionNode(parsed_content) self.insert_node(current_node, current_status, node) self.parse_stack.append({'node':node, 'status':PARSE_STATUS_IF}) elif type == TOKEN_TYPE_BLOCK_ELSE: if current_status != PARSE_STATUS_IF: raise ParseError(parsed_content) else: current['status'] = PARSE_STATUS_ELSE elif type == TOKEN_TYPE_BLOCK_ENDIF: if current_status != PARSE_STATUS_IF and current_status != PARSE_STATUS_ELSE: self.print_parse_stack() raise ParseError(parsed_content) else: self.parse_stack.pop() elif type == TOKEN_TYPE_BLOCK_LOOP: node = LoopNode(parsed_content) self.insert_node(current_node, current_status, node) self.parse_stack.append({'node':node, 'status':PARSE_STATUS_LOOP}) elif type == TOKEN_TYPE_BLOCK_ENDLOOP: if current_status != PARSE_STATUS_LOOP: raise ParseError(parsed_content) else: self.parse_stack.pop() elif type == TOKEN_TYPE_BLOCK_INCLUDE: t = Templete(TEMPLATE_FROM_FILE, parsed_content) node = t.compile() self.insert_node(current_node, current_status, node) else: raise ParseError(parsed_content) def parse_stack_init(self, root): self.parse_stack = [] self.parse_stack.append({'node':root, 'status':PARSE_STATUS_ROOT}) def insert_node(self, parent_node, parent_status, node): if parent_status == PARSE_STATUS_IF: parent_node.addTrueNode(node) elif parent_status == PARSE_STATUS_ELSE: parent_node.addFalseNode(node) elif parent_status == PARSE_STATUS_LOOP: parent_node.addLoopNode(node) else: parent_node.addNode(node) def next_token(self): return self.tokens.pop(0) def print_parse_stack(self): for item in self.parse_stack: print 'status: '+str(item['status']) TEMPLATE_FROM_FILE = 0 TEMPLATE_FROM_STRING = 1 class Templete(object): def __init__(self, source, content): self.parse_node = None self.generated_string = None if source == TEMPLATE_FROM_FILE: try: fp = open(content, 'r') except Exception: errmsg = 'Open template file '+str(content) + ' error!' raise ParseError(errmsg) self.template_string = fp.read() fp.close() else: self.template_string = content def compile(self): lex = Lexer(self.template_string) tokens = lex.tokenize() parser = Parser(tokens) self.parse_node = parser.parse() return self.parse_node def render(self, context): self.generated_string = self.parse_node.render(context) return self.generated_string def to_file(self, filepath): try: fp = open(filepath, 'w') except Exception: errmsg = 'Open output file '+str(filepath) + ' error!' raise ParseError(errmsg) fp.write(self.generated_string) fp.close() def main(): t = Templete(TEMPLATE_FROM_FILE, 'cmock_mock.template') t.compile() function_settings = [] for i in range(0, 20): argrange = range(1, i+1) argrange_str = map(lambda x: str(x), argrange) function_settings.append({'noret':1, 'args':str(i), 'argrange':argrange_str}) function_settings.append({'noret':0, 'args':str(i), 'argrange':argrange_str}) t.render({'function_settings':function_settings}) t.to_file('../cmock_mock.h') if __name__ == '__main__': main() print 'done.'
mit
1,245,056,475,781,457,700
34.457421
131
0.52762
false