{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"))\n self.label_3.setText(_translate(\"StartForm\", \"

Your Memory

\"))\n self.label_5.setText(_translate(\"StartForm\", \"

Уровень сложности:

\"))\n self.edDiff.setText(_translate(\"StartForm\", \"1\"))\n self.edCount.setText(_translate(\"StartForm\", \"1\"))\n self.label_6.setText(_translate(\"StartForm\", \"

Количество ходов:

\"))\n self.pbStart.setText(_translate(\"StartForm\", \"Поехали!\"))\n self.pbExit.setText(_translate(\"StartForm\", \"Выход\"))\n self.label_7.setText(_translate(\"StartForm\", \"

Denisov Foundation (c) 2014

\"))\n\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-7055602198253922000,"string":"-7,055,602,198,253,922,000"},"line_mean":{"kind":"number","value":46.4827586207,"string":"46.482759"},"line_max":{"kind":"number","value":192,"string":"192"},"alpha_frac":{"kind":"number","value":0.6406439119,"string":"0.640644"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108790,"cells":{"repo_name":{"kind":"string","value":"alexheretic/apart-gtk"},"path":{"kind":"string","value":"src/gtktools.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1183"},"content":{"kind":"string","value":"from gi.repository import Gtk\n\n\ndef rows(grid: Gtk.Grid) -> int:\n return max(map(lambda child: grid.child_get_property(child, 'top-attach'), grid.get_children()), default=-1) + 1\n\n\nclass GridRowTenant:\n \"\"\"Tool for managing one-time adding and later removing of exclusive owners of rows of a shared grid\"\"\"\n def __init__(self, grid: Gtk.Grid):\n self.grid = grid\n self.base_row = rows(grid)\n self.attached = []\n\n def attach(self, widget, left=0, top=0, height=1, width=1):\n self.grid.attach(widget, left=left, top=self.base_row + top, height=height, width=width)\n self.attached.append(widget)\n if hasattr(self.grid, 'on_row_change'):\n self.grid.on_row_change()\n\n def all_row_numbers(self):\n return map(lambda c: self.grid.child_get_property(c, 'top-attach'), self.attached)\n\n def evict(self):\n for row in reversed(sorted(set(self.all_row_numbers()))):\n self.grid.remove_row(row)\n if hasattr(self.grid, 'on_row_change'):\n self.grid.on_row_change()\n\n top = self.grid.get_child_at(top=0, left=0)\n if top and type(top) is Gtk.Separator:\n top.hide()\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-3770943297761088500,"string":"-3,770,943,297,761,088,500"},"line_mean":{"kind":"number","value":35.96875,"string":"35.96875"},"line_max":{"kind":"number","value":116,"string":"116"},"alpha_frac":{"kind":"number","value":0.6238377008,"string":"0.623838"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108791,"cells":{"repo_name":{"kind":"string","value":"3dfxsoftware/cbss-addons"},"path":{"kind":"string","value":"lct_hr/report/payslip_report_pdf.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"6517"},"content":{"kind":"string","value":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-TODAY OpenERP S.A. \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom report import report_sxw\nfrom datetime import datetime\n\n\nclass payslip_report_pdf(report_sxw.rml_parse):\n _name = 'payslip_report_pdf'\n _description = \"Employee Payslips\"\n\n def __init__(self, cr, uid, name, context):\n super(payslip_report_pdf, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'payslips': self.get_payslip_data(cr, uid, context=context),\n })\n\n # Not sure how well this will perform on big data sets. The yearly stuff is\n # duplicating a ton of lookups. If it turns out this performs badly, rewrite\n # to use queries instead of ORM.\n def get_payslip_data(self, cr, uid, context=None):\n retval = {}\n payslip_obj = self.pool.get('hr.payslip')\n payslip_ids = context.get('active_ids')\n payslips = payslip_obj.browse(cr, uid, payslip_ids, context=context)\n for payslip in payslips:\n sen_yr, sen_mon, sen_day = self.pool.get('hr.employee')\\\n .get_seniority_ymd(cr, uid, payslip.employee_id.id, context=context)\n seniority = '%dA, %dM, %dJ' % (sen_yr, sen_mon, sen_day)\n\n # Leaves\n leave_obj = self.pool.get('hr.holidays')\n leave_ids = leave_obj.search(cr, uid,\n [('employee_id', '=', payslip.employee_id.id)], context=context)\n leaves = leave_obj.browse(cr, uid, leave_ids, context=context)\n leaves_acquired = sum([x.number_of_days for x in leaves \\\n if x.state == 'validate' \\\n and x.type == 'add'\\\n and x.holiday_status_id.limit == False]) or 0.0\n holidays = [x for x in leaves \\\n if x.state == 'validate' \\\n and x.type == 'remove' \\\n and x.date_from.split()[0] >= payslip.date_from.split()[0] \\\n and x.date_to.split()[0] <= payslip.date_to.split()[0]]\n # leaves_taken = sum([x.number_of_days for x in leaves \\\n # if x.state == 'validate' \\\n # and x.type == 'remove'\\\n # and x.holiday_status_id.limit == False])\n leaves_remaining = sum([x.number_of_days for x in leaves\\\n if x.state == 'validate' \\\n and x.holiday_status_id.limit == False]) or 0.0\n\n\n retval[payslip] = {\n # 'lines': lines,\n 'seniority': seniority,\n 'leaves_acquired': leaves_acquired,\n # 'leaves_taken': leaves_taken,\n 'leaves_remaining': leaves_remaining,\n 'holidays': holidays,\n }\n retval[payslip].update(self.get_salarial_data(cr, uid, payslip,\n yearly=False, context=context))\n # Yearly stuff\n jan_1 = payslip.date_from.split('-')[0] + '-01-01'\n slip_end = payslip.date_to.split()[0]\n yr_slip_ids = payslip_obj.search(cr, uid,\n [('employee_id', '=', payslip.employee_id.id),\n ('date_from', '>=', jan_1),\n ('date_to', '<=', slip_end)], context=context)\n yearly_data = dict.fromkeys(['gross_year',\n 'salarial_costs_year',\n 'patronal_costs_year',\n 'net_salary_year',\n 'benefits_in_kind_year',\n 'worked_hours_year',\n 'worked_days_year'], 0)\n for yr_slip in payslip_obj.browse(cr, uid, yr_slip_ids, context=context):\n data = self.get_salarial_data(cr, uid, yr_slip, yearly=True,\n context=context)\n for key in data.keys():\n yearly_data[key] += data.get(key, 0)\n retval[payslip].update(yearly_data)\n\n return retval\n\n def get_salarial_data(self, cr, uid, payslip, yearly=False, context=None):\n retval = {}\n keys = ['gross', 'salarial_costs', 'patronal_costs',\n 'net_salary', 'benefits_in_kind', 'worked_hours', 'worked_days']\n lines = payslip.get_visible_lines(context=context)\n gross = sum(x.total for x in lines if x.sequence in [1999])\n salarial_costs = sum(x.total for x in lines if x.sequence in [2040])\n patronal_costs = sum(x.total for x in lines if x.sequence in [2041])\n net_salary = sum(x.total for x in lines if x.sequence in [5000])\n benefits_in_kind = sum(x.total for x in lines if x.sequence in [1009])\n # For now, it's 160, except the 1st month, when it's prorata.\n days_in_service = (datetime.strptime(payslip.date_to, '%Y-%m-%d') \\\n - datetime.strptime(payslip.employee_id.start_date, '%Y-%m-%d')).days\n days_in_month = (datetime.strptime(payslip.date_to, '%Y-%m-%d') \\\n - datetime.strptime(payslip.date_from, '%Y-%m-%d')).days\n worked_hours = int(160 * min(1, float(days_in_service) / days_in_month))\n # worked_hours = sum([x.number_of_hours for x in payslip.worked_days_line_ids])\n worked_days = sum([x.number_of_days for x in payslip.worked_days_line_ids])\n if not yearly:\n retval['lines'] = lines\n for key in keys:\n retval[key] = locals().get(key)\n else:\n for key in keys:\n retval[key + '_year'] = locals().get(key)\n return retval\n\n\nreport_sxw.report_sxw('report.webkit.payslip_report_pdf',\n 'hr.payslip',\n 'lct_hr/report/payslip_report.html.mako',\n parser=payslip_report_pdf)\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-818315447750691600,"string":"-818,315,447,750,691,600"},"line_mean":{"kind":"number","value":46.9191176471,"string":"46.919118"},"line_max":{"kind":"number","value":87,"string":"87"},"alpha_frac":{"kind":"number","value":0.5536289704,"string":"0.553629"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108792,"cells":{"repo_name":{"kind":"string","value":"CodeYellowBV/django-binder"},"path":{"kind":"string","value":"binder/management/commands/define_groups.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2410"},"content":{"kind":"string","value":"from django.db import transaction\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom django.utils.translation import gettext as _\nfrom django.contrib.auth.models import Group, Permission, ContentType\n\n\nclass Command(BaseCommand):\n help = _('Define user groups/roles to their required specifications')\n\n @transaction.atomic\n def handle(self, *args, **options):\n # Delete any stale groups\n\n Group.objects.exclude(name__in=settings.GROUP_PERMISSIONS).delete()\n\n for group_name in settings.GROUP_PERMISSIONS:\n group, _ = Group.objects.get_or_create(name=group_name)\n\n # Get all groups that are contained by this group\n groups_to_expand = [group_name]\n groups = set()\n while groups_to_expand:\n group_name = groups_to_expand.pop()\n if group_name not in groups:\n groups.add(group_name)\n groups_to_expand.extend(settings.GROUP_CONTAINS.get(group_name, []))\n\n # Collect all permissions for these groups\n perms = set()\n for group_name in groups:\n for perm_name in settings.GROUP_PERMISSIONS[group_name]:\n try:\n app, other = perm_name.split('.')\n if ':' in other:\n action_and_model, scope = other.split(':')\n else:\n action_and_model = other\n action, model = action_and_model.split('_')\n\n content_type = ContentType.objects.get(\n app_label=app,\n model=model,\n )\n\n perm = Permission.objects.get(\n content_type=content_type,\n codename=other,\n )\n perms.add(perm)\n except ContentType.DoesNotExist:\n raise RuntimeError(\n 'Model for ' + perm_name + ' does not exist'\n )\n except Permission.DoesNotExist:\n raise RuntimeError(\n 'Permission ' + perm_name + ' does not exist'\n )\n\n group.permissions.set(perms)\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-2166528551319578400,"string":"-2,166,528,551,319,578,400"},"line_mean":{"kind":"number","value":39.1666666667,"string":"39.166667"},"line_max":{"kind":"number","value":88,"string":"88"},"alpha_frac":{"kind":"number","value":0.4995850622,"string":"0.499585"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108793,"cells":{"repo_name":{"kind":"string","value":"chantera/biaffineparser"},"path":{"kind":"string","value":"src/utils/training/callbacks.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4715"},"content":{"kind":"string","value":"# fmt: off\n__all__ = [\"ProgressCallback\", \"PrintCallback\", \"MonitorCallback\", \"EarlyStopCallback\", \"SaveCallback\"] # noqa\n# fmt: on\n\nimport operator\nimport os\n\nimport torch\nfrom tqdm import tqdm\n\nfrom utils.training.trainer import Callback # isort: skip\n\n\nclass ProgressCallback(Callback):\n def __init__(self):\n self.training_pbar = None\n self.evaluation_pbar = None\n\n def on_train_begin(self, context):\n self._ensure_close(train=True)\n self.training_pbar = tqdm()\n\n def on_train_end(self, context, metrics):\n self._ensure_close(train=True)\n\n def on_evaluate_begin(self, context):\n self._ensure_close(eval=True)\n self.evaluation_pbar = tqdm(leave=self.training_pbar is None)\n\n def on_evaluate_end(self, context, metrics):\n self._ensure_close(eval=True)\n\n def on_loop_begin(self, context):\n pbar = self.training_pbar if context.train else self.evaluation_pbar\n pbar.reset(context.num_batches)\n if context.train:\n pbar.set_postfix({\"epoch\": context.epoch})\n\n def on_step_end(self, context, output):\n pbar = self.training_pbar if context.train else self.evaluation_pbar\n pbar.update(1)\n\n def _ensure_close(self, train=False, eval=False):\n if train:\n if self.training_pbar is not None:\n self.training_pbar.close()\n self.training_pbar = None\n if eval:\n if self.evaluation_pbar is not None:\n self.evaluation_pbar.close()\n self.evaluation_pbar = None\n\n def __del__(self):\n self._ensure_close(train=True, eval=True)\n\n\nclass PrintCallback(Callback):\n def __init__(self, printer=None):\n self.printer = printer or tqdm.write\n\n def on_loop_end(self, context, metrics):\n label = \"train\" if context.train else \"eval\"\n loss = metrics[f\"{label}/loss\"]\n message = f\"[{label}] epoch {context.epoch} - loss: {loss:.4f}\"\n\n prefix = label + \"/\"\n for key, val in metrics.items():\n if not isinstance(val, float) or not key.startswith(prefix):\n continue\n key = key.split(\"/\", 1)[1]\n if key == \"loss\":\n continue\n message += f\", {key}: {val:.4f}\"\n\n self.printer(message)\n\n\nclass MonitorCallback(Callback):\n def __init__(self, monitor=\"eval/loss\", mode=\"min\"):\n self.monitor = monitor\n self.count = 0\n self.mode = mode\n\n if self.mode == \"min\":\n self.monitor_op = operator.lt\n self.best = float(\"inf\")\n elif self.mode == \"max\":\n self.monitor_op = operator.gt\n self.best = float(\"-inf\")\n else:\n raise ValueError(f\"invalid mode: {self.mode}\")\n\n def on_evaluate_end(self, context, metrics):\n current_val = metrics[self.monitor]\n if self.monitor_op(current_val, self.best):\n self.best = current_val\n self.count = 0\n else:\n self.count += 1\n\n\nclass EarlyStopCallback(MonitorCallback):\n def __init__(self, monitor=\"eval/loss\", patience=3, mode=\"min\"):\n super().__init__(monitor, mode)\n self.patience = patience\n\n def on_evaluate_end(self, context, metrics):\n super().on_evaluate_end(context, metrics)\n if self.count >= self.patience:\n context.trainer.terminate()\n\n\nclass SaveCallback(Callback):\n def __init__(self, output_dir, prefix=\"\", mode=\"latest\", monitor=None):\n if mode not in {\"latest\", \"min\", \"max\"}:\n raise ValueError(f\"invalid mode: {self.mode}\")\n self.output_dir = output_dir\n self.prefix = prefix\n self.monitor = MonitorCallback(monitor, mode) if monitor else None\n self._checkpoints = []\n\n def on_evaluate_end(self, context, metrics):\n if self.monitor:\n self.monitor.on_evaluate_end(context, metrics)\n if self.monitor.count > 0:\n return\n\n trainer = context.trainer\n # TODO: add other configuration\n checkpoint = {\n \"model\": trainer.model.state_dict(),\n \"optimizer\": trainer.optimizer.state_dict(),\n \"scheduler\": trainer.scheduler.state_dict() if trainer.scheduler else None,\n \"trainer_config\": trainer.config,\n \"trainer_state\": trainer._state,\n }\n file = os.path.join(self.output_dir, f\"{self.prefix}step-{context.global_step}.ckpt\")\n torch.save(checkpoint, file)\n\n checkpoints = []\n for ckpt_path in self._checkpoints:\n if os.path.exists(ckpt_path):\n os.remove(ckpt_path)\n checkpoints.append(file)\n self._checkpoints = checkpoints\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-3761235620844864000,"string":"-3,761,235,620,844,864,000"},"line_mean":{"kind":"number","value":31.5172413793,"string":"31.517241"},"line_max":{"kind":"number","value":111,"string":"111"},"alpha_frac":{"kind":"number","value":0.5908801697,"string":"0.59088"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108794,"cells":{"repo_name":{"kind":"string","value":"uclapi/uclapi"},"path":{"kind":"string","value":"backend/uclapi/oauth/scoping.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4409"},"content":{"kind":"string","value":"# Storage of the scope map\n# The purpose of this setup is that the OAuth scope of any app can be stored\n# in a single field. This way, we can easily add more scopes later.\n# We have a BigIntegerField to work with, which means 64 bits of storage.\n# This translates into 64 types of scope, each of which can be checked with a\n# bit mask.\n# We do not have any OAuth scopes needed yet, but the current plan is:\n# roombookings\": (0, \"Private room bookings data\"),\n# \"timetable\": (1, \"Private timetable data\"),\n# \"uclu\": (2, \"Private UCLU data\"),\n# \"moodle\": (3, \"Private Moodle data\")\n# E.g. roombookings has scope 0, which is\n# 0000000000000000000000000000000000000000000000000000000000000001b.\n# This is because the 0th bit (LSB) is set to 1.\n# roombookings + uclu = 101b, or a scope number of 2^2 + 2^0 = 4 + 1 = 5\n\n\nclass Scopes:\n SCOPE_MAP = {\n \"timetable\": (1, \"Personal Timetable\"),\n \"student_number\": (2, \"Student Number\"),\n }\n\n def __init__(self, scope_map=None):\n if scope_map:\n self.SCOPE_MAP = scope_map\n\n # Add a scope to the scope number given and return the new number\n def add_scope(self, current, scope_name):\n try:\n scope_shift = self.SCOPE_MAP[scope_name][0]\n except KeyError:\n return current\n\n return (current | (1 << scope_shift))\n\n # Check whether a scope is present in the current scope number given\n def check_scope(self, current, scope_name):\n try:\n scope_shift = self.SCOPE_MAP[scope_name][0]\n except KeyError:\n return False\n\n return ((1 << scope_shift) & current) > 0\n\n # Remove a scope from the current scope number\n def remove_scope(self, current, scope_name):\n try:\n scope_shift = self.SCOPE_MAP[scope_name][0]\n except KeyError:\n return current\n\n if current & 1 << scope_shift > 0:\n return ~(~current + (1 << scope_shift))\n else:\n return current\n\n # Produce a dictionary with the scope information. Example:\n # {\n # \"roombookings\": True,\n # \"timetable\": False,\n # ...\n # }\n def scope_dict(self, current, pretty_print=True):\n scopes = []\n for x in self.SCOPE_MAP.keys():\n if self.check_scope(current, x):\n if pretty_print:\n scope = {\n \"name\": x,\n \"description\": self.SCOPE_MAP[x][1]\n }\n else:\n scope = {\n \"id\": self.SCOPE_MAP[x][0],\n \"name\": x\n }\n\n scopes.append(scope)\n return scopes\n\n # Same as above, but list all possible scopes along with whether they are\n # included in the current state given.\n # This is used by the dashboard.\n def scope_dict_all(self, current, pretty_print=True):\n scopes = []\n for x in self.SCOPE_MAP.keys():\n if pretty_print:\n scope = {\n \"name\": x,\n \"description\": self.SCOPE_MAP[x][1],\n \"enabled\": self.check_scope(current, x)\n }\n else:\n scope = {\n \"id\": self.SCOPE_MAP[x][0],\n \"name\": x,\n \"enabled\": self.check_scope(current, x)\n }\n\n scopes.append(scope)\n return scopes\n\n # Get available scopes for showing to the user\n def get_all_scopes(self, pretty_print=True):\n scopes = []\n for x in self.SCOPE_MAP.keys():\n if pretty_print:\n scope = {\n \"name\": x,\n \"description\": self.SCOPE_MAP[x][1]\n }\n else:\n scope = {\n \"id\": self.SCOPE_MAP[x][0],\n \"name\": x\n }\n\n scopes.append(scope)\n return scopes\n\n # Dump the scope map so that developers can track scopes with it\n def get_scope_map(self):\n scopes = []\n for x in self.SCOPE_MAP.keys():\n scope = {\n \"name\": x,\n \"id\": self.SCOPE_MAP[x][0],\n \"description\": self.SCOPE_MAP[x][1]\n }\n scopes.append(scope)\n scopes = sorted(scopes, key=lambda k: k[\"id\"])\n return scopes\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":5179115751130359000,"string":"5,179,115,751,130,359,000"},"line_mean":{"kind":"number","value":32.1503759398,"string":"32.150376"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.5180312996,"string":"0.518031"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108795,"cells":{"repo_name":{"kind":"string","value":"dan2082/KSPData"},"path":{"kind":"string","value":"main.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4220"},"content":{"kind":"string","value":"import logging\nimport os\nimport sys\nfrom tkinter import Frame, PanedWindow, Toplevel, Menu\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\n\nfrom ksp_data import KSPData\nfrom models.ksp_object import KSPObject\nfrom views.frame_kspobject import FrameKSPObject\nfrom views.treeview_kspelements import TreeviewKSPElements\nfrom views.treeview_kspobjects import TreeviewKSPObjects\n\n\nclass Main(Frame):\n \n \n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.pack(expand=1, fill='both')\n self.master.geometry('1440x900')\n paned_window = PanedWindow(self)\n self.treeview_kspelements = TreeviewKSPElements(self)\n self.treeview_kspobjects = TreeviewKSPObjects(self)\n paned_window.pack(expand=1, fill='both')\n paned_window.add(self.treeview_kspelements)\n paned_window.add(self.treeview_kspobjects)\n \n menubar = Menu(self)\n \n filemenu = Menu(self)\n filemenu.add_command(label='Open', command=self._open)\n filemenu.add_command(label='Save', command=self._save)\n filemenu.add_command(label='Save As', command=self._save_as)\n filemenu.add_separator()\n filemenu.add_command(label='Exit', command=self.master.destroy)\n menubar.add_cascade(menu=filemenu, label='File')\n \n insertmenu = Menu(self)\n insertmenu.add_command(label='KSP Element', command=self._insert_element, state='disabled')\n insertmenu.add_command(label='KSP Object', command=self._insert_object)\n menubar.add_cascade(menu=insertmenu, label='Insert')\n self.master.config(menu=menubar)\n \n \n def populate(self, kspelement):\n self._game = kspelement\n self._populate()\n \n \n def _populate(self):\n self.treeview_kspelements.populate(self._game)\n self.treeview_kspobjects.populate(self._game)\n\n \n def kspelement_selected(self, id_, kspelement=None):\n if kspelement is None: kspelement = self._game\n kspelement_by_id = KSPData.get_kspelement_by_id(kspelement, id_)\n self.treeview_kspobjects.populate(kspelement_by_id)\n \n \n def kspobject_double_1(self, id_, kspelement=None):\n if kspelement is None: kspelement = self._game\n kspobject_by_id = KSPData.get_kspobject_by_id(kspelement, id_)\n self.create_frame_kspobject(kspobject_by_id)\n \n \n def create_frame_kspobject(self, kspobject):\n logging.info(kspobject.id_)\n toplevel = Toplevel(self)\n FrameKSPObject(toplevel).populate(kspobject)\n \n \n def update_kspobject(self, name, value, id_, kspelement=None):\n if kspelement is None: kspelement = self._game\n kspobject_by_id = KSPData.get_kspobject_by_id(kspelement, id_)\n kspobject_by_id.name = name\n kspobject_by_id.value = value\n self.treeview_kspobjects.populate(kspelement)\n \n \n def delete_kspobject(self, id_, kspelement=None):\n if kspelement is None: kspelement = self._game\n kspobject_by_id = KSPData.get_kspobject_by_id(kspelement, id_)\n kspelement.kspobjects.remove(kspobject_by_id)\n self.treeview_kspobjects.populate(kspelement)\n \n \n def _open(self):\n self._filename = askopenfilename()\n if os.path.isfile(self._filename):\n self._game = KSPData.parse(self._filename)\n self._populate()\n \n \n def _save(self):\n KSPData.save(self._game, self._filename)\n \n \n def _save_as(self):\n filename = asksaveasfilename()\n KSPData.save(self._game, filename)\n \n \n def _insert_element(self):\n pass\n \n \n def _insert_object(self):\n id_ = int(self.treeview_kspelements.selection()[0])\n kspelement_by_id = KSPData.get_kspelement_by_id(self._game, id_)\n kspobject = KSPObject('name', 'value')\n kspelement_by_id.kspobjects.append(kspobject)\n self.create_frame_kspobject(kspobject)\n\n\ndef test():\n Main().mainloop()\n\n\nif __name__ == '__main__':\n level = logging.NOTSET\n stream = sys.stdout\n logging.basicConfig(level=level, stream=stream)\n \n test()"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-1382434338623917300,"string":"-1,382,434,338,623,917,300"},"line_mean":{"kind":"number","value":32.768,"string":"32.768"},"line_max":{"kind":"number","value":99,"string":"99"},"alpha_frac":{"kind":"number","value":0.6414691943,"string":"0.641469"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108796,"cells":{"repo_name":{"kind":"string","value":"jbalogh/zamboni"},"path":{"kind":"string","value":"apps/addons/tests/test_models.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"68227"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom contextlib import nested\nimport itertools\nimport json\nimport os\nfrom datetime import datetime, timedelta\nimport tempfile\nfrom urlparse import urlparse\n\nfrom django import forms\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.cache import cache\nfrom django.db import IntegrityError\nfrom django.utils import translation\n\nfrom mock import patch, Mock\nfrom nose.tools import eq_, assert_not_equal\n\nimport amo\nimport amo.tests\nimport addons.search\nfrom amo import set_user\nfrom amo.helpers import absolutify\nfrom amo.signals import _connect, _disconnect\nfrom addons.models import (Addon, AddonCategory, AddonDependency,\n AddonRecommendation, AddonType, AddonUpsell,\n BlacklistedGuid, Category, Charity, CompatOverride,\n CompatOverrideRange, FrozenAddon, Persona, Preview)\nfrom applications.models import Application, AppVersion\nfrom devhub.models import ActivityLog\nfrom files.models import File, Platform\nfrom files.tests.test_models import TestLanguagePack, UploadTest\nfrom market.models import Price, AddonPremium\nfrom reviews.models import Review\nfrom translations.models import TranslationSequence, Translation\nfrom users.models import UserProfile\nfrom versions.models import ApplicationsVersions, Version\nfrom versions.compare import version_int\nfrom webapps.models import Webapp\n\n\nclass TestAddonManager(amo.tests.TestCase):\n fixtures = ['addons/featured', 'addons/test_manager', 'base/collections',\n 'base/featured', 'bandwagon/featured_collections',\n 'base/addon_5299_gcal']\n\n def setUp(self):\n set_user(None)\n\n @patch.object(settings, 'NEW_FEATURES', False)\n def test_featured(self):\n eq_(Addon.objects.featured(amo.FIREFOX).count(),\n Addon.objects.listed(amo.FIREFOX)\n .filter(feature__application=amo.FIREFOX.id).count())\n\n @patch.object(settings, 'NEW_FEATURES', True)\n def test_new_featured(self):\n # TODO: remove this when NEW_FEATURES goes away. It's here because\n # build() was already called in setUp().\n from addons.cron import reset_featured_addons\n reset_featured_addons()\n eq_(Addon.objects.featured(amo.FIREFOX).count(), 3)\n\n def test_listed(self):\n Addon.objects.filter(id=5299).update(disabled_by_user=True)\n q = Addon.objects.listed(amo.FIREFOX, amo.STATUS_PUBLIC)\n eq_(len(q.all()), 4)\n\n addon = q[0]\n eq_(addon.id, 2464)\n\n # Disabling hides it.\n addon.disabled_by_user = True\n addon.save()\n\n # Should be 3 now, since the one is now disabled.\n eq_(q.count(), 3)\n\n # If we search for public or unreviewed we find it.\n addon.disabled_by_user = False\n addon.status = amo.STATUS_UNREVIEWED\n addon.save()\n eq_(q.count(), 3)\n eq_(Addon.objects.listed(amo.FIREFOX, amo.STATUS_PUBLIC,\n amo.STATUS_UNREVIEWED).count(), 4)\n\n # Can't find it without a file.\n addon.versions.get().files.get().delete()\n eq_(q.count(), 3)\n\n def test_public(self):\n public = Addon.objects.public()\n for a in public:\n assert_not_equal(\n a.id, 3, 'public() must not return unreviewed add-ons')\n\n def test_reviewed(self):\n addons = Addon.objects.reviewed()\n for a in addons:\n assert a.status in amo.REVIEWED_STATUSES, (a.id, a.status)\n\n def test_unreviewed(self):\n \"\"\"\n Tests for unreviewed addons.\n \"\"\"\n exp = Addon.objects.unreviewed()\n\n for addon in exp:\n assert addon.status in amo.UNREVIEWED_STATUSES, (\n \"unreviewed() must return unreviewed addons.\")\n\n def test_valid(self):\n addon = Addon.objects.get(pk=5299)\n addon.update(disabled_by_user=True)\n objs = Addon.objects.valid()\n\n for addon in objs:\n assert addon.status in amo.LISTED_STATUSES\n assert not addon.disabled_by_user\n\n def test_valid_disabled_by_user(self):\n before = Addon.objects.valid_and_disabled().count()\n addon = Addon.objects.get(pk=5299)\n addon.update(disabled_by_user=True)\n eq_(Addon.objects.valid_and_disabled().count(), before)\n\n def test_valid_disabled_by_admin(self):\n before = Addon.objects.valid_and_disabled().count()\n addon = Addon.objects.get(pk=5299)\n addon.update(status=amo.STATUS_DISABLED)\n eq_(Addon.objects.valid_and_disabled().count(), before)\n\n\nclass TestAddonManagerFeatured(amo.tests.TestCase):\n # TODO(cvan): Merge with above once new featured add-ons are enabled.\n fixtures = ['addons/featured', 'bandwagon/featured_collections',\n 'base/collections', 'base/featured']\n\n @patch.object(settings, 'NEW_FEATURES', True)\n def test_new_featured(self):\n f = Addon.objects.featured(amo.FIREFOX)\n eq_(f.count(), 3)\n eq_(sorted(x.id for x in f),\n [2464, 7661, 15679])\n f = Addon.objects.featured(amo.SUNBIRD)\n assert not f.exists()\n\n\nclass TestNewAddonVsWebapp(amo.tests.TestCase):\n\n def test_addon_from_kwargs(self):\n a = Addon(type=amo.ADDON_EXTENSION)\n assert isinstance(a, Addon)\n\n def test_webapp_from_kwargs(self):\n w = Addon(type=amo.ADDON_WEBAPP)\n assert isinstance(w, Webapp)\n\n def test_addon_from_db(self):\n a = Addon.objects.create(type=amo.ADDON_EXTENSION)\n assert isinstance(a, Addon)\n assert isinstance(Addon.objects.get(id=a.id), Addon)\n\n def test_webapp_from_db(self):\n a = Addon.objects.create(type=amo.ADDON_WEBAPP)\n assert isinstance(a, Webapp)\n assert isinstance(Addon.objects.get(id=a.id), Webapp)\n\n\nclass TestAddonModels(amo.tests.TestCase):\n fixtures = ['base/apps',\n 'base/collections',\n 'base/featured',\n 'base/users',\n 'base/addon_5299_gcal',\n 'base/addon_3615',\n 'base/addon_3723_listed',\n 'base/addon_6704_grapple.json',\n 'base/addon_4594_a9',\n 'base/addon_4664_twitterbar',\n 'base/thunderbird',\n 'addons/featured',\n 'addons/invalid_latest_version',\n 'addons/blacklisted',\n 'bandwagon/featured_collections']\n\n def setUp(self):\n TranslationSequence.objects.create(id=99243)\n # TODO(andym): use Mock appropriately here.\n self.old_version = amo.FIREFOX.latest_version\n amo.FIREFOX.latest_version = '3.6.15'\n\n def tearDown(self):\n amo.FIREFOX.latest_version = self.old_version\n\n def test_current_version(self):\n \"\"\"\n Tests that we get the current (latest public) version of an addon.\n \"\"\"\n a = Addon.objects.get(pk=3615)\n eq_(a.current_version.id, 81551)\n\n def test_current_version_listed(self):\n a = Addon.objects.get(pk=3723)\n eq_(a.current_version.id, 89774)\n\n def test_current_version_listed_no_version(self):\n Addon.objects.filter(pk=3723).update(_current_version=None)\n Version.objects.filter(addon=3723).delete()\n a = Addon.objects.get(pk=3723)\n eq_(a.current_version, None)\n\n def test_latest_version(self):\n \"\"\"\n Tests that we get the latest version of an addon.\n \"\"\"\n a = Addon.objects.get(pk=3615)\n eq_(a.latest_version.id, Version.objects.filter(addon=a).latest().id)\n\n def test_latest_version_no_version(self):\n Addon.objects.filter(pk=3723).update(_current_version=None)\n Version.objects.filter(addon=3723).delete()\n a = Addon.objects.get(pk=3723)\n eq_(a.latest_version, None)\n\n def test_latest_version_ignore_beta(self):\n a = Addon.objects.get(pk=3615)\n\n v1 = Version.objects.create(addon=a, version='1.0')\n File.objects.create(version=v1)\n eq_(a.latest_version.id, v1.id)\n\n v2 = Version.objects.create(addon=a, version='2.0beta')\n File.objects.create(version=v2, status=amo.STATUS_BETA)\n eq_(a.latest_version.id, v1.id) # Still should be f1\n\n def test_current_beta_version(self):\n a = Addon.objects.get(pk=5299)\n eq_(a.current_beta_version.id, 50000)\n\n @patch.object(settings, 'NEW_FEATURES', False)\n def test_current_version_mixed_statuses(self):\n \"\"\"Mixed file statuses are evil (bug 558237).\"\"\"\n a = Addon.objects.get(pk=3895)\n # Last version has pending files, so second to last version is\n # considered \"current\".\n eq_(a.current_version.id, 78829)\n\n # Fix file statuses on last version.\n v = Version.objects.get(pk=98217)\n v.files.update(status=amo.STATUS_PUBLIC)\n\n # Wipe caches.\n cache.clear()\n a.update_version()\n\n # Make sure the updated version is now considered current.\n eq_(a.current_version.id, v.id)\n\n def test_delete(self):\n \"\"\"Test deleting add-ons.\"\"\"\n a = Addon.objects.get(pk=3615)\n a.name = u'é'\n a.delete('bye')\n eq_(len(mail.outbox), 1)\n assert BlacklistedGuid.objects.filter(guid=a.guid)\n\n def test_delete_url(self):\n \"\"\"Test deleting addon has URL in the email.\"\"\"\n a = Addon.objects.get(pk=4594)\n a.delete('bye')\n assert absolutify(a.get_url_path()) in mail.outbox[0].body\n\n def test_delete_searchengine(self):\n \"\"\"\n Test deleting searchengines (which have no guids) should not barf up\n the deletion machine.\n \"\"\"\n a = Addon.objects.get(pk=4594)\n a.delete('bye')\n eq_(len(mail.outbox), 1)\n\n def test_delete_status_gone_wild(self):\n \"\"\"\n Test deleting add-ons where the higheststatus is zero, but there's a\n non-zero status.\n \"\"\"\n a = Addon.objects.get(pk=3615)\n a.status = amo.STATUS_UNREVIEWED\n a.highest_status = 0\n a.delete('bye')\n eq_(len(mail.outbox), 1)\n assert BlacklistedGuid.objects.filter(guid=a.guid)\n\n def test_delete_incomplete(self):\n \"\"\"Test deleting incomplete add-ons.\"\"\"\n a = Addon.objects.get(pk=3615)\n a.status = 0\n a.highest_status = 0\n a.save()\n a.delete(None)\n eq_(len(mail.outbox), 0)\n assert not BlacklistedGuid.objects.filter(guid=a.guid)\n\n def test_incompatible_latest_apps(self):\n a = Addon.objects.get(pk=3615)\n eq_(a.incompatible_latest_apps(), [])\n\n av = ApplicationsVersions.objects.get(pk=47881)\n av.max = AppVersion.objects.get(pk=97) # Firefox 2.0\n av.save()\n\n a = Addon.objects.get(pk=3615)\n eq_(a.incompatible_latest_apps(), [amo.FIREFOX])\n\n # Check a search engine addon.\n a = Addon.objects.get(pk=4594)\n eq_(a.incompatible_latest_apps(), [])\n\n def test_incompatible_asterix(self):\n av = ApplicationsVersions.objects.get(pk=47881)\n av.max = AppVersion.objects.create(application_id=amo.FIREFOX.id,\n version_int=version_int('5.*'),\n version='5.*')\n av.save()\n a = Addon.objects.get(pk=3615)\n eq_(a.incompatible_latest_apps(), [])\n\n def test_icon_url(self):\n \"\"\"\n Tests for various icons.\n 1. Test for an icon that exists.\n 2. Test for default THEME icon.\n 3. Test for default non-THEME icon.\n \"\"\"\n a = Addon.objects.get(pk=3615)\n expected = (settings.ADDON_ICON_URL % (3615, 32, 0)).rstrip('/0')\n assert a.icon_url.startswith(expected)\n a = Addon.objects.get(pk=6704)\n a.icon_type = None\n assert a.icon_url.endswith('/icons/default-theme.png'), (\n \"No match for %s\" % a.icon_url)\n a = Addon.objects.get(pk=3615)\n a.icon_type = None\n\n assert a.icon_url.endswith('icons/default-32.png')\n\n def test_icon_url_default(self):\n a = Addon.objects.get(pk=3615)\n a.update(icon_type='')\n default = 'icons/default-32.png'\n eq_(a.icon_url.endswith(default), True)\n eq_(a.get_icon_url(32).endswith(default), True)\n eq_(a.get_icon_url(32, use_default=True).endswith(default), True)\n eq_(a.get_icon_url(32, use_default=False), None)\n\n def test_thumbnail_url(self):\n \"\"\"\n Test for the actual thumbnail URL if it should exist, or the no-preview\n url.\n \"\"\"\n a = Addon.objects.get(pk=4664)\n a.thumbnail_url.index('/previews/thumbs/20/20397.png?modified=')\n a = Addon.objects.get(pk=5299)\n assert a.thumbnail_url.endswith('/icons/no-preview.png'), (\n \"No match for %s\" % a.thumbnail_url)\n\n def test_is_unreviewed(self):\n \"\"\"Test if add-on is unreviewed or not\"\"\"\n # public add-on\n a = Addon.objects.get(pk=3615)\n assert not a.is_unreviewed(), 'public add-on: is_unreviewed=False'\n\n # unreviewed add-on\n a = Addon(status=amo.STATUS_UNREVIEWED)\n assert a.is_unreviewed(), 'sandboxed add-on: is_unreviewed=True'\n\n a.status = amo.STATUS_PENDING\n assert a.is_unreviewed(), 'pending add-on: is_unreviewed=True'\n\n def test_is_selfhosted(self):\n \"\"\"Test if an add-on is listed or hosted\"\"\"\n # hosted\n a = Addon.objects.get(pk=3615)\n assert not a.is_selfhosted(), 'hosted add-on => !is_selfhosted()'\n\n # listed\n a.status = amo.STATUS_LISTED\n assert a.is_selfhosted(), 'listed add-on => is_selfhosted()'\n\n def test_is_no_restart(self):\n a = Addon.objects.get(pk=3615)\n f = a.current_version.all_files[0]\n eq_(f.no_restart, False)\n eq_(a.is_no_restart(), False)\n\n f.update(no_restart=True)\n eq_(Addon.objects.get(pk=3615).is_no_restart(), True)\n\n a.versions.all().delete()\n a._current_version = None\n eq_(a.is_no_restart(), False)\n\n def test_is_featured(self):\n \"\"\"Test if an add-on is globally featured\"\"\"\n a = Addon.objects.get(pk=1003)\n assert a.is_featured(amo.FIREFOX, 'en-US'), (\n 'globally featured add-on not recognized')\n\n def test_has_full_profile(self):\n \"\"\"Test if an add-on's developer profile is complete (public).\"\"\"\n addon = lambda: Addon.objects.get(pk=3615)\n assert not addon().has_full_profile()\n\n a = addon()\n a.the_reason = 'some reason'\n a.save()\n assert not addon().has_full_profile()\n\n a.the_future = 'some future'\n a.save()\n assert addon().has_full_profile()\n\n a.the_reason = ''\n a.the_future = ''\n a.save()\n assert not addon().has_full_profile()\n\n def test_has_profile(self):\n \"\"\"Test if an add-on's developer profile is (partially or entirely)\n completed.\n\n \"\"\"\n addon = lambda: Addon.objects.get(pk=3615)\n assert not addon().has_profile()\n\n a = addon()\n a.the_reason = 'some reason'\n a.save()\n assert addon().has_profile()\n\n a.the_future = 'some future'\n a.save()\n assert addon().has_profile()\n\n a.the_reason = ''\n a.the_future = ''\n a.save()\n assert not addon().has_profile()\n\n def test_has_eula(self):\n addon = lambda: Addon.objects.get(pk=3615)\n assert addon().has_eula\n\n a = addon()\n a.eula = ''\n a.save()\n assert not addon().has_eula\n\n a.eula = 'eula'\n a.save()\n assert addon().has_eula\n\n def newlines_helper(self, string_before):\n addon = Addon.objects.get(pk=3615)\n addon.privacy_policy = string_before\n addon.save()\n return addon.privacy_policy.localized_string_clean\n\n def test_newlines_normal(self):\n before = (\"Paragraph one.\\n\"\n \"This should be on the very next line.\\n\\n\"\n \"Should be two nl's before this line.\\n\\n\\n\"\n \"Should be three nl's before this line.\\n\\n\\n\\n\"\n \"Should be four nl's before this line.\")\n\n after = before # Nothing special; this shouldn't change.\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_ul(self):\n before = (\"
    \\n\\n\"\n \"
  • No nl's between the ul and the li.
  • \\n\\n\"\n \"
  • No nl's between li's.\\n\\n\"\n \"But there should be two before this line.
  • \\n\\n\"\n \"
\")\n\n after = (\"
    \"\n \"
  • No nl's between the ul and the li.
  • \"\n \"
  • No nl's between li's.\\n\\n\"\n \"But there should be two before this line.
  • \"\n \"
\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_ul_tight(self):\n before = (\"There should be one nl between this and the ul.\\n\"\n \"
  • test
  • test
\\n\"\n \"There should be no nl's above this line.\")\n\n after = (\"There should be one nl between this and the ul.\\n\"\n \"
  • test
  • test
\"\n \"There should be no nl's above this line.\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_ul_loose(self):\n before = (\"There should be two nl's between this and the ul.\\n\\n\"\n \"
  • test
  • test
\\n\\n\"\n \"There should be one nl above this line.\")\n\n after = (\"There should be two nl's between this and the ul.\\n\\n\"\n \"
  • test
  • test
\\n\"\n \"There should be one nl above this line.\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_blockquote_tight(self):\n before = (\"There should be one nl below this.\\n\"\n \"
Hi
\\n\"\n \"There should be no nl's above this.\")\n\n after = (\"There should be one nl below this.\\n\"\n \"
Hi
\"\n \"There should be no nl's above this.\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_blockquote_loose(self):\n before = (\"There should be two nls below this.\\n\\n\"\n \"
Hi
\\n\\n\"\n \"There should be one nl above this.\")\n\n after = (\"There should be two nls below this.\\n\\n\"\n \"
Hi
\\n\"\n \"There should be one nl above this.\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_inline(self):\n before = (\"If we end a paragraph w/ a non-block-level tag\\n\\n\"\n \"The newlines should be kept\")\n\n after = before # Should stay the same\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_code_inline(self):\n before = (\"Code tags aren't blocks.\\n\\n\"\n \"alert(test);\\n\\n\"\n \"See?\")\n\n after = before # Should stay the same\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_li_newlines(self):\n before = (\"
  • \\nxx
\")\n after = (\"
  • xx
\")\n eq_(self.newlines_helper(before), after)\n\n before = (\"
  • xx\\n
\")\n after = (\"
  • xx
\")\n eq_(self.newlines_helper(before), after)\n\n before = (\"
  • xx\\nxx
\")\n after = (\"
  • xx\\nxx
\")\n eq_(self.newlines_helper(before), after)\n\n before = (\"
\")\n after = (\"
\")\n eq_(self.newlines_helper(before), after)\n\n # All together now\n before = (\"
  • \\nxx
  • xx\\n
  • xx\\nxx
  • \"\n \"
  • \\n
\")\n\n after = (\"
  • xx
  • xx
  • xx\\nxx
  • \"\n \"
\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_empty_tag(self):\n before = (\"This is a test!\")\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_empty_tag_nested(self):\n before = (\"This is a test!\")\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_empty_tag_block_nested(self):\n b = (\"Test.\\n\\n
\\ntest.\")\n a = (\"Test.\\n\\n
test.\")\n\n eq_(self.newlines_helper(b), a)\n\n def test_newlines_empty_tag_block_nested_spaced(self):\n before = (\"Test.\\n\\n
\\n\\n
    \\n\\n
  • \"\n \"
  • \\n\\n
\\n\\n
\\ntest.\")\n after = (\"Test.\\n\\n
test.\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_li_newlines_inline(self):\n before = (\"
  • \\ntest\\ntest\\n\\ntest\\n
  • \"\n \"
  • Test test test.
\")\n\n after = (\"
  • test\\ntest\\n\\ntest
  • \"\n \"
  • Test test test.
\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_li_all_inline(self):\n before = (\"Test with no newlines and block level \"\n \"stuff to see what happens.\")\n\n after = before # Should stay the same\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_spaced_blocks(self):\n before = (\"
\\n\\n
    \\n\\n
  • \\n\\ntest\\n\\n
  • \\n\\n\"\n \"
\\n\\n
\")\n\n after = \"
  • test
\"\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_spaced_inline(self):\n before = \"Line.\\n\\n\\nThis line is bold.\\n\\n\\nThis isn't.\"\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_nested_inline(self):\n before = \"\\nThis line is bold.\\n\\nThis is also italic\"\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_xss_script(self):\n before = \"\"\n after = \"&lt;script&gt;\\n\\nalert('test');\\n&lt;/script&gt;\"\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_xss_inline(self):\n before = \"test\"\n after = \"test\"\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_attribute_link_doublequote(self):\n before = 'test'\n\n parsed = self.newlines_helper(before)\n\n assert parsed.endswith('google.com\" rel=\"nofollow\">test')\n\n def test_newlines_attribute_singlequote(self):\n before = \"lol\"\n after = 'lol'\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_attribute_doublequote(self):\n before = 'lol'\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_attribute_nestedquotes_doublesingle(self):\n before = 'lol'\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_attribute_nestedquotes_singledouble(self):\n before = 'lol'\n after = before\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_unclosed_b(self):\n before = (\"test\")\n after = (\"test\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_unclosed_b_wrapped(self):\n before = (\"This is a test\")\n after = (\"This is a test\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_unclosed_li(self):\n before = (\"
  • test
\")\n after = (\"
  • test
\")\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_malformed_faketag(self):\n before = \"\"\n after = \"&lt;madonna&gt;\"\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_malformed_tag(self):\n before = \"\"\n\n # Bleach interprets 'of' and 'bleach' as attributes, and strips them.\n # Good? No. Any way around it? Not really.\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_less_than(self):\n before = \"3 < 5\"\n after = \"3 &lt; 5\"\n\n eq_(self.newlines_helper(before), after)\n\n def test_newlines_less_than_tight(self):\n before = \"abc 3<5 def\"\n after = \"abc 3&lt;5 def\"\n\n eq_(self.newlines_helper(before), after)\n\n def test_app_categories(self):\n addon = lambda: Addon.objects.get(pk=3615)\n\n c22 = Category.objects.get(id=22)\n c22.name = 'CCC'\n c22.save()\n c23 = Category.objects.get(id=23)\n c23.name = 'BBB'\n c23.save()\n c24 = Category.objects.get(id=24)\n c24.name = 'AAA'\n c24.save()\n\n cats = addon().all_categories\n eq_(cats, [c22, c23, c24])\n for cat in cats:\n eq_(cat.application.id, amo.FIREFOX.id)\n\n cats = [c24, c23, c22]\n app_cats = [(amo.FIREFOX, cats)]\n eq_(addon().app_categories, app_cats)\n\n tb = Application.objects.get(id=amo.THUNDERBIRD.id)\n c = Category(application=tb, name='XXX', type=addon().type, count=1,\n weight=1)\n c.save()\n AddonCategory.objects.create(addon=addon(), category=c)\n c24.save() # Clear the app_categories cache.\n app_cats += [(amo.THUNDERBIRD, [c])]\n eq_(addon().app_categories, app_cats)\n\n def test_review_replies(self):\n \"\"\"\n Make sure that developer replies are not returned as if they were\n original reviews.\n \"\"\"\n addon = Addon.objects.get(id=3615)\n u = UserProfile.objects.get(pk=999)\n version = addon.current_version\n new_review = Review(version=version, user=u, rating=2, body='hello',\n addon=addon)\n new_review.save()\n new_reply = Review(version=version, user=addon.authors.all()[0],\n addon=addon, reply_to=new_review,\n rating=2, body='my reply')\n new_reply.save()\n\n review_list = [r.pk for r in addon.reviews]\n\n assert new_review.pk in review_list, (\n 'Original review must show up in review list.')\n assert new_reply.pk not in review_list, (\n 'Developer reply must not show up in review list.')\n\n def test_takes_contributions(self):\n a = Addon(status=amo.STATUS_PUBLIC, wants_contributions=True,\n paypal_id='$$')\n assert a.takes_contributions\n\n a.status = amo.STATUS_UNREVIEWED\n assert not a.takes_contributions\n a.status = amo.STATUS_PUBLIC\n\n a.wants_contributions = False\n assert not a.takes_contributions\n a.wants_contributions = True\n\n a.paypal_id = None\n assert not a.takes_contributions\n\n a.charity_id = 12\n assert a.takes_contributions\n\n def test_show_beta(self):\n # Addon.current_beta_version will be empty, so show_beta is False.\n a = Addon(status=amo.STATUS_PUBLIC)\n assert not a.show_beta\n\n @patch('addons.models.Addon.current_beta_version')\n def test_show_beta_with_beta_version(self, beta_mock):\n beta_mock.return_value = object()\n # Fake current_beta_version to return something truthy.\n a = Addon(status=amo.STATUS_PUBLIC)\n assert a.show_beta\n\n # We have a beta version but status has to be public.\n a.status = amo.STATUS_UNREVIEWED\n assert not a.show_beta\n\n def test_update_logs(self):\n addon = Addon.objects.get(id=3615)\n set_user(UserProfile.objects.all()[0])\n addon.versions.all().delete()\n\n entries = ActivityLog.objects.all()\n eq_(entries[0].action, amo.LOG.CHANGE_STATUS.id)\n\n def test_can_request_review_waiting_period(self):\n now = datetime.now()\n a = Addon.objects.create(type=1)\n v = Version.objects.create(addon=a)\n # The first LITE version is only 5 days old, no dice.\n first_f = File.objects.create(status=amo.STATUS_LITE, version=v)\n first_f.update(datestatuschanged=now - timedelta(days=5),\n created=now - timedelta(days=20))\n # TODO(andym): can this go in Addon.objects.create? bug 618444\n a.update(status=amo.STATUS_LITE)\n eq_(a.can_request_review(), ())\n\n # Now the first LITE is > 10 days old, change can happen.\n first_f.update(datestatuschanged=now - timedelta(days=11))\n # Add a second file, to be sure that we test the date\n # of the first created file.\n second_f = File.objects.create(status=amo.STATUS_LITE, version=v)\n second_f.update(datestatuschanged=now - timedelta(days=5))\n eq_(a.status, amo.STATUS_LITE)\n eq_(a.can_request_review(), (amo.STATUS_PUBLIC,))\n\n def test_days_until_full_nomination(self):\n # Normalize to 12am for reliable day subtraction:\n now = datetime.now().date()\n a = Addon.objects.create(type=1)\n v = Version.objects.create(addon=a)\n f = File.objects.create(status=amo.STATUS_LITE, version=v)\n a.update(status=amo.STATUS_LITE)\n f.update(datestatuschanged=now - timedelta(days=4))\n eq_(a.days_until_full_nomination(), 6)\n f.update(datestatuschanged=now - timedelta(days=1))\n eq_(a.days_until_full_nomination(), 9)\n f.update(datestatuschanged=now - timedelta(days=10))\n eq_(a.days_until_full_nomination(), 0)\n f.update(datestatuschanged=now)\n eq_(a.days_until_full_nomination(), 10)\n # Only calculate days from first submitted version:\n f.update(datestatuschanged=now - timedelta(days=2),\n created=now - timedelta(days=2))\n # Ignore this one:\n f2 = File.objects.create(status=amo.STATUS_LITE, version=v)\n f2.update(datestatuschanged=now - timedelta(days=1),\n created=now - timedelta(days=1))\n eq_(a.days_until_full_nomination(), 8)\n # Wrong status:\n a.update(status=amo.STATUS_PUBLIC)\n f.update(datestatuschanged=now - timedelta(days=4))\n eq_(a.days_until_full_nomination(), 0)\n\n def setup_files(self, status):\n addon = Addon.objects.create(type=1)\n version = Version.objects.create(addon=addon)\n File.objects.create(status=status, version=version)\n return addon, version\n\n def test_no_change_disabled_user(self):\n addon, version = self.setup_files(amo.STATUS_UNREVIEWED)\n addon.update(status=amo.STATUS_PUBLIC)\n addon.update(disabled_by_user=True)\n version.save()\n eq_(addon.status, amo.STATUS_PUBLIC)\n assert addon.is_disabled\n\n def test_no_change_disabled(self):\n addon = Addon.objects.create(type=1)\n version = Version.objects.create(addon=addon)\n addon.update(status=amo.STATUS_DISABLED)\n version.save()\n eq_(addon.status, amo.STATUS_DISABLED)\n assert addon.is_disabled\n\n def test_can_alter_in_prelim(self):\n addon, version = self.setup_files(amo.STATUS_LITE)\n addon.update(status=amo.STATUS_LITE)\n version.save()\n eq_(addon.status, amo.STATUS_LITE)\n\n def test_removing_public(self):\n addon, version = self.setup_files(amo.STATUS_UNREVIEWED)\n addon.update(status=amo.STATUS_PUBLIC)\n version.save()\n eq_(addon.status, amo.STATUS_UNREVIEWED)\n\n def test_removing_public_with_prelim(self):\n addon, version = self.setup_files(amo.STATUS_LITE)\n addon.update(status=amo.STATUS_PUBLIC)\n version.save()\n eq_(addon.status, amo.STATUS_LITE)\n\n def test_can_request_review_no_files(self):\n addon = Addon.objects.get(pk=3615)\n addon.versions.all()[0].files.all().delete()\n eq_(addon.can_request_review(), ())\n\n def check(self, status, exp, kw={}):\n addon = Addon.objects.get(pk=3615)\n changes = {'status': status, 'disabled_by_user': False}\n changes.update(**kw)\n addon.update(**changes)\n eq_(addon.can_request_review(), exp)\n\n def test_can_request_review_null(self):\n self.check(amo.STATUS_NULL, (amo.STATUS_LITE, amo.STATUS_PUBLIC))\n\n def test_can_request_review_null_disabled(self):\n self.check(amo.STATUS_NULL, (), {'disabled_by_user': True})\n\n def test_can_request_review_unreviewed(self):\n self.check(amo.STATUS_UNREVIEWED, (amo.STATUS_PUBLIC,))\n\n def test_can_request_review_nominated(self):\n self.check(amo.STATUS_NOMINATED, (amo.STATUS_LITE,))\n\n def test_can_request_review_public(self):\n self.check(amo.STATUS_PUBLIC, ())\n\n def test_can_request_review_disabled(self):\n self.check(amo.STATUS_DISABLED, ())\n\n def test_can_request_review_lite(self):\n self.check(amo.STATUS_LITE, (amo.STATUS_PUBLIC,))\n\n def test_can_request_review_lite_and_nominated(self):\n self.check(amo.STATUS_LITE_AND_NOMINATED, ())\n\n def test_can_request_review_purgatory(self):\n self.check(amo.STATUS_PURGATORY, (amo.STATUS_LITE, amo.STATUS_PUBLIC,))\n\n def test_none_homepage(self):\n # There was an odd error when a translation was set to None.\n Addon.objects.create(homepage=None, type=amo.ADDON_EXTENSION)\n\n def test_slug_isdigit(self):\n a = Addon.objects.create(type=1, name='xx', slug='123')\n eq_(a.slug, '123~')\n\n a.slug = '44'\n a.save()\n eq_(a.slug, '44~')\n\n def test_slug_isblacklisted(self):\n # When an addon is uploaded, it doesn't use the form validation,\n # so we'll just mangle the slug if its blacklisted.\n a = Addon.objects.create(type=1, name='xx', slug='validate')\n eq_(a.slug, 'validate~')\n\n a.slug = 'validate'\n a.save()\n eq_(a.slug, 'validate~')\n\n def delete(self):\n addon = Addon.objects.get(id=3615)\n eq_(len(mail.outbox), 0)\n addon.delete('so long and thanks for all the fish')\n eq_(len(mail.outbox), 1)\n\n def test_delete_to(self):\n self.delete()\n eq_(mail.outbox[0].to, [settings.FLIGTAR])\n\n def test_delete_by(self):\n try:\n user = Addon.objects.get(id=3615).authors.all()[0]\n set_user(user)\n self.delete()\n assert 'DELETED BY: 55021' in mail.outbox[0].body\n finally:\n set_user(None)\n\n def test_delete_by_unknown(self):\n self.delete()\n assert 'DELETED BY: Unknown' in mail.outbox[0].body\n\n def test_view_source(self):\n # view_source should default to True.\n a = Addon.objects.create(type=1)\n assert a.view_source\n\n @patch('files.models.File.hide_disabled_file')\n def test_admin_disabled_file_hidden(self, hide_mock):\n a = Addon.objects.get(id=3615)\n a.status = amo.STATUS_PUBLIC\n a.save()\n assert not hide_mock.called\n\n a.status = amo.STATUS_DISABLED\n a.save()\n assert hide_mock.called\n\n @patch('files.models.File.hide_disabled_file')\n def test_user_disabled_file_hidden(self, hide_mock):\n a = Addon.objects.get(id=3615)\n a.disabled_by_user = False\n a.save()\n assert not hide_mock.called\n\n a.disabled_by_user = True\n a.save()\n assert hide_mock.called\n\n def test_set_nomination(self):\n a = Addon.objects.get(id=3615)\n a.update(status=amo.STATUS_NULL)\n for s in (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED):\n a.versions.latest().update(nomination=None)\n a.update(status=s)\n assert a.versions.latest().nomination\n\n def test_new_version_inherits_nomination(self):\n a = Addon.objects.get(id=3615)\n ver = 10\n for st in (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED):\n a.update(status=st)\n old_ver = a.versions.latest()\n v = Version.objects.create(addon=a, version=str(ver))\n eq_(v.nomination, old_ver.nomination)\n ver += 1\n\n def test_beta_version_does_not_inherit_nomination(self):\n a = Addon.objects.get(id=3615)\n a.update(status=amo.STATUS_LISTED)\n v = Version.objects.create(addon=a, version='1.0')\n v.nomination = None\n v.save()\n a.update(status=amo.STATUS_NOMINATED)\n File.objects.create(version=v, status=amo.STATUS_BETA,\n filename='foobar.xpi')\n v.version = '1.1'\n v.save()\n eq_(v.nomination, None)\n\n def test_lone_version_does_not_inherit_nomination(self):\n a = Addon.objects.get(id=3615)\n Version.objects.all().delete()\n v = Version.objects.create(addon=a, version='1.0')\n eq_(v.nomination, None)\n\n def test_reviwed_addon_does_not_inherit_nomination(self):\n a = Addon.objects.get(id=3615)\n ver = 10\n for st in (amo.STATUS_PUBLIC, amo.STATUS_BETA, amo.STATUS_LISTED):\n a.update(status=st)\n v = Version.objects.create(addon=a, version=str(ver))\n eq_(v.nomination, None)\n ver += 1\n\n def test_nomination_no_version(self):\n # Check that the on_change method still works if there are no versions.\n a = Addon.objects.get(id=3615)\n a.versions.all().delete()\n a.update(status=amo.STATUS_NOMINATED)\n\n def test_nomination_already_set(self):\n addon = Addon.objects.get(id=3615)\n earlier = datetime.today() - timedelta(days=2)\n addon.versions.latest().update(nomination=earlier)\n addon.update(status=amo.STATUS_NOMINATED)\n eq_(addon.versions.latest().nomination.date(), earlier.date())\n\n def test_category_transform(self):\n addon = Addon.objects.get(id=3615)\n cats = addon.categories.filter(application=amo.FIREFOX.id)\n names = [c.name for c in cats]\n assert addon.get_category(amo.FIREFOX.id).name in names\n\n\nclass TestAddonGetURLPath(amo.tests.TestCase):\n\n def test_get_url_path(self):\n addon = Addon(slug='woo')\n eq_(addon.get_url_path(), '/en-US/firefox/addon/woo/')\n\n def test_get_url_path_more(self):\n addon = Addon(slug='woo')\n eq_(addon.get_url_path(more=True), '/en-US/firefox/addon/woo/more')\n\n\nclass TestAddonModelsFeatured(amo.tests.TestCase):\n fixtures = ['addons/featured', 'bandwagon/featured_collections',\n 'base/addon_3615', 'base/collections', 'base/featured']\n\n def setUp(self):\n # Addon._featured keeps an in-process cache we need to clear.\n if hasattr(Addon, '_featured'):\n del Addon._featured\n\n def _test_featured_random(self):\n f = Addon.featured_random(amo.FIREFOX, 'en-US')\n eq_(sorted(f), [1001, 1003, 2464, 3481, 7661, 15679])\n f = Addon.featured_random(amo.FIREFOX, 'fr')\n eq_(sorted(f), [1001, 1003, 2464, 7661, 15679])\n f = Addon.featured_random(amo.SUNBIRD, 'en-US')\n eq_(f, [])\n\n @patch.object(settings, 'NEW_FEATURES', False)\n def test_featured_random(self):\n self._test_featured_random()\n\n @patch.object(settings, 'NEW_FEATURES', True)\n def test_new_featured_random(self):\n self._test_featured_random()\n\n\nclass TestBackupVersion(amo.tests.TestCase):\n fixtures = ['addons/update']\n\n def setUp(self):\n self.version_1_2_0 = 105387\n self.addon = Addon.objects.get(pk=1865)\n set_user(None)\n\n def setup_new_version(self):\n for version in Version.objects.filter(pk__gte=self.version_1_2_0):\n appversion = version.apps.all()[0]\n appversion.min = AppVersion.objects.get(version='4.0b1')\n appversion.save()\n\n def test_no_backup_version(self):\n self.addon.update_version()\n eq_(self.addon.backup_version, None)\n eq_(self.addon.current_version.version, '1.2.2')\n\n def test_no_current_version(self):\n Version.objects.all().delete()\n self.addon.update(_current_version=None)\n eq_(self.addon.backup_version, None)\n eq_(self.addon.current_version, None)\n\n def test_has_backup_version(self):\n self.setup_new_version()\n assert self.addon.update_version()\n eq_(self.addon.backup_version.version, '1.1.3')\n eq_(self.addon.current_version.version, '1.2.2')\n\n def test_backup_version(self):\n self.setup_new_version()\n assert self.addon.update_version()\n eq_(self.addon.backup_version.version, '1.1.3')\n\n def test_firefox_versions(self):\n self.setup_new_version()\n assert self.addon.update_version()\n backup = self.addon.backup_version.compatible_apps[amo.FIREFOX]\n eq_(backup.max.version, '3.7a5pre')\n eq_(backup.min.version, '3.0.12')\n current = self.addon.current_version.compatible_apps[amo.FIREFOX]\n eq_(current.max.version, '4.0b8pre')\n eq_(current.min.version, '3.0.12')\n\n def test_version_signals(self):\n self.setup_new_version()\n version = self.addon.versions.all()[0]\n assert not self.addon.backup_version\n version.save()\n assert Addon.objects.get(pk=1865).backup_version\n\n\nclass TestCategoryModel(amo.tests.TestCase):\n\n def test_category_url(self):\n \"\"\"Every type must have a url path for its categories.\"\"\"\n for t in amo.ADDON_TYPE.keys():\n if t == amo.ADDON_DICT:\n continue # Language packs don't have categories.\n cat = Category(type=AddonType(id=t), slug='omg')\n assert cat.get_url_path()\n\n\nclass TestPersonaModel(amo.tests.TestCase):\n\n def test_image_urls(self):\n mypersona = Persona(id=1234, persona_id=9876)\n assert mypersona.thumb_url.endswith('/7/6/9876/preview.jpg')\n assert mypersona.preview_url.endswith('/7/6/9876/preview_large.jpg')\n\n def test_update_url(self):\n p = Persona(id=1234, persona_id=9876)\n assert p.update_url.endswith('9876')\n\n\nclass TestPreviewModel(amo.tests.TestCase):\n\n fixtures = ['base/previews']\n\n def test_as_dict(self):\n expect = ['caption', 'full', 'thumbnail']\n reality = sorted(Preview.objects.all()[0].as_dict().keys())\n eq_(expect, reality)\n\n\nclass TestAddonRecommendations(amo.tests.TestCase):\n fixtures = ['base/addon-recs']\n\n def test_scores(self):\n ids = [5299, 1843, 2464, 7661, 5369]\n scores = AddonRecommendation.scores(ids)\n q = AddonRecommendation.objects.filter(addon__in=ids)\n for addon, recs in itertools.groupby(q, lambda x: x.addon_id):\n for rec in recs:\n eq_(scores[addon][rec.other_addon_id], rec.score)\n\n\nclass TestAddonDependencies(amo.tests.TestCase):\n fixtures = ['base/addon_5299_gcal',\n 'base/addon_3615',\n 'base/addon_3723_listed',\n 'base/addon_6704_grapple',\n 'base/addon_4664_twitterbar']\n\n def test_dependencies(self):\n ids = [3615, 3723, 4664, 6704]\n a = Addon.objects.get(id=5299)\n\n for dependent_id in ids:\n AddonDependency(addon=a,\n dependent_addon=Addon.objects.get(id=dependent_id)).save()\n\n eq_(sorted([a.id for a in a.dependencies.all()]), sorted(ids))\n eq_(list(a.dependencies.all()), a.all_dependencies)\n\n def test_unique_dependencies(self):\n a = Addon.objects.get(id=5299)\n b = Addon.objects.get(id=3615)\n AddonDependency.objects.create(addon=a, dependent_addon=b)\n try:\n AddonDependency.objects.create(addon=a, dependent_addon=b)\n except IntegrityError:\n pass\n eq_(list(a.dependencies.values_list('id', flat=True)), [3615])\n\n\nclass TestListedAddonTwoVersions(amo.tests.TestCase):\n fixtures = ['addons/listed-two-versions']\n\n def test_listed_two_versions(self):\n Addon.objects.get(id=2795) # bug 563967\n\n\nclass TestFlushURLs(amo.tests.TestCase):\n fixtures = ['base/addon_5579',\n 'base/previews',\n 'base/addon_4664_twitterbar',\n 'addons/persona']\n\n def setUp(self):\n settings.ADDON_ICON_URL = (\n '%s/%s/%s/images/addon_icon/%%d-%%d.png?modified=%%s' % (\n settings.STATIC_URL, settings.LANGUAGE_CODE, settings.DEFAULT_APP))\n settings.PREVIEW_THUMBNAIL_URL = (settings.STATIC_URL +\n '/img/uploads/previews/thumbs/%s/%d.png?modified=%d')\n settings.PREVIEW_FULL_URL = (settings.STATIC_URL +\n '/img/uploads/previews/full/%s/%d.png?modified=%d')\n _connect()\n\n def tearDown(self):\n _disconnect()\n\n def is_url_hashed(self, url):\n return urlparse(url).query.find('modified') > -1\n\n @patch('amo.tasks.flush_front_end_cache_urls.apply_async')\n def test_addon_flush(self, flush):\n addon = Addon.objects.get(pk=159)\n addon.icon_type = \"image/png\"\n addon.save()\n\n for url in (addon.thumbnail_url, addon.icon_url):\n assert url in flush.call_args[1]['args'][0]\n assert self.is_url_hashed(url), url\n\n @patch('amo.tasks.flush_front_end_cache_urls.apply_async')\n def test_preview_flush(self, flush):\n addon = Addon.objects.get(pk=4664)\n preview = addon.previews.all()[0]\n preview.save()\n for url in (preview.thumbnail_url, preview.image_url):\n assert url in flush.call_args[1]['args'][0]\n assert self.is_url_hashed(url), url\n\n\nclass TestAddonFromUpload(UploadTest):\n fixtures = ('base/apps', 'base/users')\n\n def setUp(self):\n super(TestAddonFromUpload, self).setUp()\n u = UserProfile.objects.get(pk=999)\n set_user(u)\n self.platform = Platform.objects.create(id=amo.PLATFORM_MAC.id)\n for version in ('3.0', '3.6.*'):\n AppVersion.objects.create(application_id=1, version=version)\n self.addCleanup(translation.deactivate)\n\n def webapp(self):\n return os.path.join(settings.ROOT,\n 'apps/devhub/tests/addons/mozball.webapp')\n\n def test_blacklisted_guid(self):\n BlacklistedGuid.objects.create(guid='guid@xpi')\n with self.assertRaises(forms.ValidationError) as e:\n Addon.from_upload(self.get_upload('extension.xpi'),\n [self.platform])\n eq_(e.exception.messages, ['Duplicate UUID found.'])\n\n def test_xpi_attributes(self):\n addon = Addon.from_upload(self.get_upload('extension.xpi'),\n [self.platform])\n eq_(addon.name, 'xpi name')\n eq_(addon.guid, 'guid@xpi')\n eq_(addon.type, amo.ADDON_EXTENSION)\n eq_(addon.status, amo.STATUS_NULL)\n eq_(addon.homepage, 'http://homepage.com')\n eq_(addon.summary, 'xpi description')\n eq_(addon.description, None)\n eq_(addon.slug, 'xpi-name')\n\n def test_manifest_url(self):\n upload = self.get_upload(abspath=self.webapp())\n addon = Addon.from_upload(upload, [self.platform])\n assert addon.is_webapp()\n eq_(addon.manifest_url, upload.name)\n\n def test_xpi_version(self):\n addon = Addon.from_upload(self.get_upload('extension.xpi'),\n [self.platform])\n v = addon.versions.get()\n eq_(v.version, '0.1')\n eq_(v.files.get().platform_id, self.platform.id)\n eq_(v.files.get().status, amo.STATUS_UNREVIEWED)\n\n def test_xpi_for_multiple_platforms(self):\n platforms = [Platform.objects.get(pk=amo.PLATFORM_LINUX.id),\n Platform.objects.get(pk=amo.PLATFORM_MAC.id)]\n addon = Addon.from_upload(self.get_upload('extension.xpi'),\n platforms)\n v = addon.versions.get()\n eq_(sorted([f.platform.id for f in v.all_files]),\n sorted([p.id for p in platforms]))\n\n def test_search_attributes(self):\n addon = Addon.from_upload(self.get_upload('search.xml'),\n [self.platform])\n eq_(addon.name, 'search tool')\n eq_(addon.guid, None)\n eq_(addon.type, amo.ADDON_SEARCH)\n eq_(addon.status, amo.STATUS_NULL)\n eq_(addon.homepage, None)\n eq_(addon.description, None)\n eq_(addon.slug, 'search-tool')\n eq_(addon.summary, 'Search Engine for Firefox')\n\n def test_search_version(self):\n addon = Addon.from_upload(self.get_upload('search.xml'),\n [self.platform])\n v = addon.versions.get()\n eq_(v.version, datetime.now().strftime('%Y%m%d'))\n eq_(v.files.get().platform_id, amo.PLATFORM_ALL.id)\n eq_(v.files.get().status, amo.STATUS_UNREVIEWED)\n\n def test_no_homepage(self):\n addon = Addon.from_upload(self.get_upload('extension-no-homepage.xpi'),\n [self.platform])\n eq_(addon.homepage, None)\n\n def test_default_locale(self):\n # Make sure default_locale follows the active translation.\n addon = Addon.from_upload(self.get_upload('search.xml'),\n [self.platform])\n eq_(addon.default_locale, 'en-US')\n\n translation.activate('es-ES')\n addon = Addon.from_upload(self.get_upload('search.xml'),\n [self.platform])\n eq_(addon.default_locale, 'es-ES')\n\n def test_webapp_default_locale_override(self):\n with nested(tempfile.NamedTemporaryFile('w', suffix='.webapp'),\n open(self.webapp())) as (tmp, mf):\n mf = json.load(mf)\n mf['default_locale'] = 'gb'\n tmp.write(json.dumps(mf))\n tmp.flush()\n upload = self.get_upload(abspath=tmp.name)\n addon = Addon.from_upload(upload, [self.platform])\n eq_(addon.default_locale, 'gb')\n\n def test_browsing_locale_does_not_override(self):\n translation.activate('gb')\n upload = self.get_upload(abspath=self.webapp()) # en-US default\n addon = Addon.from_upload(upload, [self.platform])\n eq_(addon.default_locale, 'en-US') # not gb\n\n\nREDIRECT_URL = 'http://outgoing.mozilla.org/v1/'\n\n\nclass TestCharity(amo.tests.TestCase):\n fixtures = ['base/charity.json']\n\n @patch.object(settings, 'REDIRECT_URL', REDIRECT_URL)\n def test_url(self):\n charity = Charity(name=\"a\", paypal=\"b\", url=\"http://foo.com\")\n charity.save()\n assert charity.outgoing_url.startswith(REDIRECT_URL)\n\n @patch.object(settings, 'REDIRECT_URL', REDIRECT_URL)\n def test_url_foundation(self):\n foundation = Charity.objects.get(pk=amo.FOUNDATION_ORG)\n assert not foundation.outgoing_url.startswith(REDIRECT_URL)\n\n\nclass TestFrozenAddons(amo.tests.TestCase):\n\n def test_immediate_freeze(self):\n # Adding a FrozenAddon should immediately drop the addon's hotness.\n a = Addon.objects.create(type=1, hotness=22)\n FrozenAddon.objects.create(addon=a)\n eq_(Addon.objects.get(id=a.id).hotness, 0)\n\n\nclass TestRemoveLocale(amo.tests.TestCase):\n\n def test_remove(self):\n a = Addon.objects.create(type=1)\n a.name = {'en-US': 'woo', 'el': 'yeah'}\n a.description = {'en-US': 'woo', 'el': 'yeah', 'he': 'ola'}\n a.save()\n a.remove_locale('el')\n qs = (Translation.objects.filter(localized_string__isnull=False)\n .values_list('locale', flat=True))\n eq_(sorted(qs.filter(id=a.name_id)), ['en-US'])\n eq_(sorted(qs.filter(id=a.description_id)), ['en-US', 'he'])\n\n def test_remove_version_locale(self):\n addon = Addon.objects.create(type=amo.ADDON_THEME)\n version = Version.objects.create(addon=addon)\n version.releasenotes = {'fr': 'oui'}\n version.save()\n addon.remove_locale('fr')\n assert not (Translation.objects.filter(localized_string__isnull=False)\n .values_list('locale', flat=True))\n\n\nclass TestAddonWatchDisabled(amo.tests.TestCase):\n\n def setUp(self):\n self.addon = Addon(type=amo.ADDON_THEME, disabled_by_user=False,\n status=amo.STATUS_PUBLIC)\n self.addon.save()\n\n @patch('addons.models.File.objects.filter')\n def test_no_disabled_change(self, file_mock):\n mock = Mock()\n file_mock.return_value = [mock]\n self.addon.save()\n assert not mock.unhide_disabled_file.called\n assert not mock.hide_disabled_file.called\n\n @patch('addons.models.File.objects.filter')\n def test_disable_addon(self, file_mock):\n mock = Mock()\n file_mock.return_value = [mock]\n self.addon.update(disabled_by_user=True)\n assert not mock.unhide_disabled_file.called\n assert mock.hide_disabled_file.called\n\n @patch('addons.models.File.objects.filter')\n def test_admin_disable_addon(self, file_mock):\n mock = Mock()\n file_mock.return_value = [mock]\n self.addon.update(status=amo.STATUS_DISABLED)\n assert not mock.unhide_disabled_file.called\n assert mock.hide_disabled_file.called\n\n @patch('addons.models.File.objects.filter')\n def test_enable_addon(self, file_mock):\n mock = Mock()\n file_mock.return_value = [mock]\n self.addon.update(status=amo.STATUS_DISABLED)\n mock.reset_mock()\n self.addon.update(status=amo.STATUS_PUBLIC)\n assert mock.unhide_disabled_file.called\n assert not mock.hide_disabled_file.called\n\n\nclass TestSearchSignals(amo.tests.ESTestCase):\n es = True\n\n def setUp(self):\n super(TestSearchSignals, self).setUp()\n addons.search.setup_mapping()\n self.addCleanup(self.cleanup)\n\n def cleanup(self):\n for index in settings.ES_INDEXES.values():\n self.es.delete_index_if_exists(index)\n\n def test_no_addons(self):\n eq_(Addon.search().count(), 0)\n\n def test_create(self):\n addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name='woo')\n self.refresh()\n eq_(Addon.search().count(), 1)\n eq_(Addon.search().query(name='woo')[0].id, addon.id)\n\n def test_update(self):\n addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name='woo')\n self.refresh()\n eq_(Addon.search().count(), 1)\n\n addon.name = 'yeah'\n addon.save()\n self.refresh()\n\n eq_(Addon.search().count(), 1)\n eq_(Addon.search().query(name='woo').count(), 0)\n eq_(Addon.search().query(name='yeah')[0].id, addon.id)\n\n def test_delete(self):\n addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name='woo')\n self.refresh()\n eq_(Addon.search().count(), 1)\n\n addon.delete('woo')\n self.refresh()\n eq_(Addon.search().count(), 0)\n\n\nclass TestLanguagePack(TestLanguagePack):\n\n def setUp(self):\n super(TestLanguagePack, self).setUp()\n self.platform = Platform.objects.create(id=amo.PLATFORM_ANDROID.id)\n\n def test_extract(self):\n File.objects.create(platform=self.platform, version=self.version,\n filename=self.xpi_path('langpack-localepicker'))\n assert 'title=Select a language' in self.addon.get_localepicker()\n\n def test_extract_no_file(self):\n File.objects.create(platform=self.platform, version=self.version,\n filename=self.xpi_path('langpack'))\n eq_(self.addon.get_localepicker(), '')\n\n def test_extract_no_files(self):\n eq_(self.addon.get_localepicker(), '')\n\n def test_extract_not_language_pack(self):\n self.addon.update(type=amo.ADDON_LPAPP)\n eq_(self.addon.get_localepicker(), '')\n\n def test_extract_not_platform_all(self):\n self.mac = Platform.objects.create(id=amo.PLATFORM_MAC.id)\n File.objects.create(platform=self.mac, version=self.version,\n filename=self.xpi_path('langpack'))\n eq_(self.addon.get_localepicker(), '')\n\n\nclass TestMarketplace(amo.tests.TestCase):\n\n def setUp(self):\n self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)\n\n def test_is_premium(self):\n assert not self.addon.is_premium()\n self.addon.update(premium_type=amo.ADDON_PREMIUM)\n assert self.addon.is_premium()\n\n def test_can_be_premium_status(self):\n for status in amo.STATUS_CHOICES.keys():\n self.addon.update(status=status)\n if status in amo.PREMIUM_STATUSES:\n assert self.addon.can_become_premium()\n else:\n assert not self.addon.can_become_premium()\n\n def test_webapp_can_become_premium(self):\n self.addon.update(type=amo.ADDON_WEBAPP)\n for status in amo.STATUS_CHOICES.keys():\n self.addon.update(status=status)\n assert self.addon.can_become_premium(), status\n\n def test_can_be_premium_type(self):\n for type in amo.ADDON_TYPES.keys():\n self.addon.update(type=type)\n if type in [amo.ADDON_EXTENSION, amo.ADDON_WEBAPP,\n amo.ADDON_LPAPP, amo.ADDON_DICT, amo.ADDON_THEME]:\n assert self.addon.can_become_premium()\n else:\n assert not self.addon.can_become_premium()\n\n def test_can_not_be_purchased(self):\n assert not self.addon.can_be_purchased()\n\n def test_can_still_not_be_purchased(self):\n self.addon.update(premium_type=amo.ADDON_PREMIUM)\n assert not self.addon.can_be_purchased()\n\n def test_can_be_purchased(self):\n for status in amo.REVIEWED_STATUSES:\n self.addon.update(premium_type=amo.ADDON_PREMIUM,\n status=status)\n assert self.addon.can_be_purchased()\n\n def test_transformer(self):\n other = Addon.objects.create(type=amo.ADDON_EXTENSION)\n price = Price.objects.create(price='1.00')\n\n self.addon.update(type=amo.ADDON_PREMIUM)\n AddonPremium.objects.create(addon=self.addon, price=price)\n\n assert getattr(Addon.objects.get(pk=self.addon.pk), 'premium')\n assert not getattr(Addon.objects.get(pk=other.pk), 'premium')\n\n\nclass TestAddonUpsell(amo.tests.TestCase):\n\n def setUp(self):\n self.one = Addon.objects.create(type=amo.ADDON_EXTENSION, name='free')\n self.two = Addon.objects.create(type=amo.ADDON_EXTENSION,\n name='premium')\n self.upsell = AddonUpsell.objects.create(free=self.one,\n premium=self.two, text='yup')\n\n def test_create_upsell(self):\n eq_(self.one.upsell.premium, self.two)\n eq_(self.one.upsell.text, 'yup')\n eq_(self.two.upsell, None)\n\n\nclass TestAddonPurchase(amo.tests.TestCase):\n fixtures = ['base/users']\n\n def setUp(self):\n self.user = UserProfile.objects.get(pk=999)\n self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION,\n premium_type=amo.ADDON_PREMIUM,\n name='premium')\n\n def test_no_premium(self):\n self.addon.addonpurchase_set.create(user=self.user)\n self.addon.update(premium_type=amo.ADDON_FREE)\n assert not self.addon.has_purchased(self.user)\n\n def test_has_purchased(self):\n self.addon.addonpurchase_set.create(user=self.user)\n assert self.addon.has_purchased(self.user)\n\n def test_not_purchased(self):\n assert not self.addon.has_purchased(self.user)\n\n def test_anonymous(self):\n assert not self.addon.has_purchased(None)\n assert not self.addon.has_purchased(AnonymousUser)\n\n\nclass TestWatermarkHash(amo.tests.TestCase):\n fixtures = ['base/addon_3615', 'base/users']\n\n def setUp(self):\n self.addon = Addon.objects.get(pk=3615)\n self.user = UserProfile.objects.get(email='regular@mozilla.com')\n\n def test_watermark_change_email(self):\n hsh = self.addon.get_watermark_hash(self.user)\n self.user.update(email='foo@bar.com')\n eq_(hsh, self.addon.get_watermark_hash(self.user))\n\n def test_check_hash(self):\n hsh = self.addon.get_watermark_hash(self.user)\n eq_(self.user, self.addon.get_user_from_hash(self.user.email, hsh))\n\n def test_check_hash_messed(self):\n hsh = self.addon.get_watermark_hash(self.user)\n hsh = hsh + 'asd'\n eq_(None, self.addon.get_user_from_hash(self.user.email, hsh))\n\n def test_check_user_change(self):\n self.user.update(email='foo@bar.com')\n hsh = self.addon.get_watermark_hash(self.user)\n eq_(self.user,\n self.addon.get_user_from_hash('regular@mozilla.com', hsh))\n\n def test_check_user_multiple(self):\n hsh = self.addon.get_watermark_hash(self.user)\n self.user.update(email='foo@bar.com')\n UserProfile.objects.create(email='regular@mozilla.com')\n eq_(self.user,\n self.addon.get_user_from_hash('regular@mozilla.com', hsh))\n\n def test_cant_takeover(self):\n hsh = self.addon.get_watermark_hash(self.user)\n self.user.delete()\n UserProfile.objects.create(email='regular@mozilla.com')\n eq_(None, self.addon.get_user_from_hash('regular@mozilla.com', hsh))\n\n\nclass TestCompatOverride(amo.tests.TestCase):\n\n def setUp(self):\n app = Application.objects.create(id=1)\n\n one = CompatOverride.objects.create(guid='one')\n CompatOverrideRange.objects.create(compat=one, app=app)\n\n two = CompatOverride.objects.create(guid='two')\n CompatOverrideRange.objects.create(compat=two, app=app,\n min_version='1', max_version='2')\n CompatOverrideRange.objects.create(compat=two, app=app,\n min_version='1', max_version='2',\n min_app_version='3',\n max_app_version='4')\n\n def check(self, obj, **kw):\n \"\"\"Check that key/value pairs in kw match attributes of obj.\"\"\"\n for key, expected in kw.items():\n actual = getattr(obj, key)\n eq_(actual, expected, '[%s] %r != %r' % (key, actual, expected))\n\n def test_is_hosted(self):\n c = CompatOverride.objects.create(guid='a')\n assert not c.is_hosted()\n\n a = Addon.objects.create(type=1, guid='b')\n c = CompatOverride.objects.create(guid='b')\n assert c.is_hosted()\n\n def test_override_type(self):\n one = CompatOverride.objects.get(guid='one')\n\n # The default is incompatible.\n c = CompatOverrideRange.objects.create(compat=one, app_id=1)\n eq_(c.override_type(), 'incompatible')\n\n c = CompatOverrideRange.objects.create(compat=one, app_id=1, type=0)\n eq_(c.override_type(), 'compatible')\n\n def test_guid_match(self):\n # We hook up the add-on automatically if we see a matching guid.\n addon = Addon.objects.create(id=1, guid='oh yeah', type=1)\n c = CompatOverride.objects.create(guid=addon.guid)\n eq_(c.addon_id, addon.id)\n\n c = CompatOverride.objects.create(guid='something else')\n assert c.addon is None\n\n def test_transformer(self):\n compats = list(CompatOverride.objects\n .transform(CompatOverride.transformer))\n ranges = list(CompatOverrideRange.objects.all())\n # If the transformer works then we won't have any more queries.\n with self.assertNumQueries(0):\n for c in compats:\n eq_(c.compat_ranges,\n [r for r in ranges if r.compat_id == c.id])\n\n def test_collapsed_ranges(self):\n # Test that we get back the right structures from collapsed_ranges().\n c = CompatOverride.objects.get(guid='one')\n r = c.collapsed_ranges()\n\n eq_(len(r), 1)\n compat_range = r[0]\n self.check(compat_range, type='incompatible', min='*', max='*')\n\n eq_(len(compat_range.apps), 1)\n self.check(compat_range.apps[0], app=amo.FIREFOX, min='*', max='*')\n\n def test_collapsed_ranges_multiple_versions(self):\n c = CompatOverride.objects.get(guid='one')\n CompatOverrideRange.objects.create(compat=c, app_id=1,\n min_version='1', max_version='2',\n min_app_version='3',\n max_app_version='3.*')\n r = c.collapsed_ranges()\n\n eq_(len(r), 2)\n\n self.check(r[0], type='incompatible', min='*', max='*')\n eq_(len(r[0].apps), 1)\n self.check(r[0].apps[0], app=amo.FIREFOX, min='*', max='*')\n\n self.check(r[1], type='incompatible', min='1', max='2')\n eq_(len(r[1].apps), 1)\n self.check(r[1].apps[0], app=amo.FIREFOX, min='3', max='3.*')\n\n def test_collapsed_ranges_different_types(self):\n # If the override ranges have different types they should be separate\n # entries.\n c = CompatOverride.objects.get(guid='one')\n CompatOverrideRange.objects.create(compat=c, app_id=1, type=0,\n min_app_version='3',\n max_app_version='3.*')\n r = c.collapsed_ranges()\n\n eq_(len(r), 2)\n\n self.check(r[0], type='compatible', min='*', max='*')\n eq_(len(r[0].apps), 1)\n self.check(r[0].apps[0], app=amo.FIREFOX, min='3', max='3.*')\n\n self.check(r[1], type='incompatible', min='*', max='*')\n eq_(len(r[1].apps), 1)\n self.check(r[1].apps[0], app=amo.FIREFOX, min='*', max='*')\n\n def test_collapsed_ranges_multiple_apps(self):\n c = CompatOverride.objects.get(guid='two')\n r = c.collapsed_ranges()\n\n eq_(len(r), 1)\n compat_range = r[0]\n self.check(compat_range, type='incompatible', min='1', max='2')\n\n eq_(len(compat_range.apps), 2)\n self.check(compat_range.apps[0], app=amo.FIREFOX, min='*', max='*')\n self.check(compat_range.apps[1], app=amo.FIREFOX, min='3', max='4')\n\n def test_collapsed_ranges_multiple_apps(self):\n c = CompatOverride.objects.get(guid='two')\n r = c.collapsed_ranges()\n\n eq_(len(r), 1)\n compat_range = r[0]\n self.check(compat_range, type='incompatible', min='1', max='2')\n\n eq_(len(compat_range.apps), 2)\n self.check(compat_range.apps[0], app=amo.FIREFOX, min='*', max='*')\n self.check(compat_range.apps[1], app=amo.FIREFOX, min='3', max='4')\n\n def test_collapsed_ranges_multiple_versions_and_apps(self):\n c = CompatOverride.objects.get(guid='two')\n CompatOverrideRange.objects.create(min_version='5', max_version='6',\n compat=c, app_id=1)\n r = c.collapsed_ranges()\n\n eq_(len(r), 2)\n self.check(r[0], type='incompatible', min='1', max='2')\n\n eq_(len(r[0].apps), 2)\n self.check(r[0].apps[0], app=amo.FIREFOX, min='*', max='*')\n self.check(r[0].apps[1], app=amo.FIREFOX, min='3', max='4')\n\n self.check(r[1], type='incompatible', min='5', max='6')\n eq_(len(r[1].apps), 1)\n self.check(r[1].apps[0], app=amo.FIREFOX, min='*', max='*')\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":4149593830496497700,"string":"4,149,593,830,496,497,700"},"line_mean":{"kind":"number","value":35.0031662269,"string":"35.003166"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.5953595404,"string":"0.59536"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108797,"cells":{"repo_name":{"kind":"string","value":"I-sektionen/i-portalen"},"path":{"kind":"string","value":"wsgi/iportalen_django/exchange_portal/urls.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1753"},"content":{"kind":"string","value":"from django.conf.urls import url, include\nfrom . import views\n\napp_name = 'exchange_portal'\n\n\nexchange_portal_patterns = [\n url(r'^$', view=views.Exchange_Portal, name=\"exchange_portal\"),\n url(r'^admin/$', view=views.Admin, name=\"admin\"),\n url(r'^feedback/$', view=views.Add_Feedback, name=\"feedback\"),\n url(r'^important_dates/$', view=views.Important_Dates, name='important_dates'),\n url(r'^contact/$', view=views.Contact, name='contact'),\n url(r'^school/(?P[0-9]+)/$', view=views.Exchange_School, name='school'),\n url(r'^search-autocomplete/$', view=views.Search_Autocomplete.as_view(), name='search_autocomplete'),\n url(r'^travel_stories/$', view=views.Travel_Stories, name=\"travel_stories\"),\n url(r'^travel_story/(?P[0-9]+)/$', view=views.single_travel_story, name='travel_story'),\n url(r'^(?P\\w{0,50})/$', view=views.continent, name=\"continent\"),\n url(r'asien/(?P\\w{0,50})$', view=views.continent_filtered, name=\"country\"),\n url(r'nordamerika/(?P\\w{0,50})$', view=views.continent_filtered, name=\"country\"),\n url(r'europa/(?P\\w{0,50})$', view=views.continent_filtered, name=\"country\"),\n url(r'afrika/(?P\\w{0,50})$', view=views.continent_filtered, name=\"country\"),\n url(r'oceanien/(?P\\w{0,50})$', view=views.continent_filtered, name=\"country\"),\n url(r'sydamerika/(?P\\w{0,50})$', view=views.continent_filtered, name=\"country\")\n\n]\n\n\nurlpatterns = [url(r'^', include(exchange_portal_patterns, namespace=app_name))]\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-6569624389402226000,"string":"-6,569,624,389,402,226,000"},"line_mean":{"kind":"number","value":61.6071428571,"string":"61.607143"},"line_max":{"kind":"number","value":106,"string":"106"},"alpha_frac":{"kind":"number","value":0.5847119224,"string":"0.584712"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108798,"cells":{"repo_name":{"kind":"string","value":"chapmanb/svtyper"},"path":{"kind":"string","value":"scripts/vcf_paste.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"4673"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport argparse, sys\nfrom argparse import RawTextHelpFormatter\n\n__author__ = \"Colby Chiang (cc2qe@virginia.edu)\"\n__version__ = \"$Revision: 0.0.1 $\"\n__date__ = \"$Date: 2015-04-13 14:31 $\"\n\n# --------------------------------------\n# define functions\n\ndef get_args():\n parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description=\"\\\nvcf_paste.py\\n\\\nauthor: \" + __author__ + \"\\n\\\nversion: \" + __version__ + \"\\n\\\ndescription: Paste VCFs from multiple samples\")\n # parser.add_argument('-a', '--argA', metavar='argA', type=str, required=True, help='description of argument')\n # parser.add_argument('-b', '--argB', metavar='argB', required=False, help='description of argument B')\n # parser.add_argument('-c', '--flagC', required=False, action='store_true', help='sets flagC to true')\n parser.add_argument('-m', '--master', type=argparse.FileType('r'), default=None, help='VCF file to set first 8 columns of variant info [first file in vcf_list]')\n parser.add_argument('-q', '--sum_quals', required=False, action='store_true', help='Sum QUAL scores of input VCFs as output QUAL score')\n parser.add_argument('vcf_list', metavar='vcf', nargs='*', type=argparse.FileType('r'), default=None, help='VCF file(s) to join')\n\n # parse the arguments\n args = parser.parse_args()\n\n if len(args.vcf_list) < 1:\n parser.print_help()\n exit(1)\n\n # send back the user input\n return args\n\n# primary function\ndef svt_join(master, sum_quals, vcf_list):\n # if master not provided, set as first VCF\n if master is None:\n master = open(vcf_list[0].name)\n sample_list = []\n\n # print header\n while 1:\n master_line = master.readline()\n if not master_line:\n break\n if master_line[:2] != \"##\":\n break\n print (master_line.rstrip())\n\n # get sample names\n for vcf in vcf_list:\n while 1:\n line = vcf.readline()\n if not line:\n break\n if line[:2] == \"##\":\n continue\n if line[0] == \"#\":\n line_v = line.rstrip().split('\\t')\n for sample in line_v[9:]:\n sample_list.append(sample)\n break\n print '\\t'.join(master_line.rstrip().split('\\t')[:8] + ['FORMAT'] + sample_list)\n \n # iterate through VCF body\n while 1:\n master_line = master.readline()\n if not master_line:\n break\n master_v = master_line.rstrip().split('\\t')\n master_chrom = master_v[0]\n master_pos = master_v[1]\n\n out_v = master_v[:8] # output array of fields\n qual = float(out_v[5])\n format = None # column 9, VCF format field.\n\n for vcf in vcf_list:\n line = vcf.readline()\n if not line:\n sys.stderr.write('\\nError: VCF files differ in length\\n')\n exit(1)\n line_v = line.rstrip().split('\\t')\n line_chrom = line_v[0]\n line_pos = line_v[1]\n\n # set FORMAT field as format in first VCF.\n # cannot extract this from master, since it may have\n # been altered in the processing of the VCFs.\n if format is None:\n format = line_v[8]\n out_v.append(format)\n \n # ensure that each VCF position agrees with the master\n if (master_chrom != line_chrom or\n master_pos != line_pos):\n sys.stderr.write('\\nError: variant in %s (%s:%s) conflicts with master (%s:%s)\\n' %\n (vcf.name, line_chrom, line_pos, master_chrom, master_pos))\n exit(1)\n\n # ensure that the format for all VCFs agree with the first\n if (format != line_v[8]):\n sys.stderr.write('\\nError: format in %s (%s) conflicts with first VCF (%s)\\n' %\n (vcf.name, line_v[8], format))\n exit(1)\n\n qual += float(line_v[5])\n out_v = out_v + line_v[9:]\n if sum_quals:\n out_v[5] = qual\n sys.stdout.write( '\\t'.join(map(str, out_v)) + '\\n')\n \n # close files\n master.close()\n for vcf in vcf_list:\n vcf.close()\n \n return\n\n# --------------------------------------\n# main function\n\ndef main():\n # parse the command line args\n args = get_args()\n\n # call primary function\n svt_join(args.master, args.sum_quals, args.vcf_list)\n\n# initialize the script\nif __name__ == '__main__':\n try:\n sys.exit(main())\n except IOError, e:\n if e.errno != 32: # ignore SIGPIPE\n raise\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":4319263748814037000,"string":"4,319,263,748,814,037,000"},"line_mean":{"kind":"number","value":32.8623188406,"string":"32.862319"},"line_max":{"kind":"number","value":165,"string":"165"},"alpha_frac":{"kind":"number","value":0.5377701691,"string":"0.53777"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108799,"cells":{"repo_name":{"kind":"string","value":"jinzekid/codehub"},"path":{"kind":"string","value":"python/练习/练习-三级菜单.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2092"},"content":{"kind":"string","value":"# Author: Jason Lu\n\nmenu = {\n '北京':{\n '海淀':{\n '五道口':{\n 'soho':{},\n '网易':{},\n 'google':{}\n },\n '中关村':{\n '爱奇艺':{},\n '汽车之家':{},\n 'youku':{},\n },\n '上地':{\n '百度':{},\n },\n },\n '昌平':{\n '沙河':{\n '老男孩':{},\n '北航':{},\n },\n '天通苑':{},\n '回龙观':{},\n },\n '朝阳':{},\n '东城':{},\n },\n '上海':{\n '闵行':{\n \"人民广场\":{\n '炸鸡店':{}\n }\n },\n '闸北':{\n '火车战':{\n '携程':{}\n }\n },\n '浦东':{},\n },\n '山东':{},\n}\n\nexit_flag = False\n\n# 第一版本\nwhile not exit_flag:\n for i in menu:\n print(i)\n choice = input(\"选择进入>>:\")\n if choice in menu:\n\n while not exit_flag:\n for i2 in menu[choice]:\n print(i2)\n choice2 = input(\"选择进入>>:\")\n if choice2 in menu[choice]:\n\n while not exit_flag:\n for i3 in menu[choice][choice2]:\n print(i3)\n choice3 = input(\"选择进入>>:\")\n if choice3 in menu[choice][choice2]:\n\n for i4 in menu[choice][choice2][choice3]:\n print(\"\\t\\t\", i4)\n choice4 = input(\"最后一层, 按b返回>>:\")\n\n if choice4 == \"b\":\n pass\n elif choice4 == \"q\":\n exit_flag = True\n\n if choice3 == \"b\":\n break\n elif choice3 == \"q\":\n exit_flag = True\n\n if choice2 == \"b\":\n break\n elif choice2 == \"q\":\n exit_flag = True\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":-9118270210886829000,"string":"-9,118,270,210,886,829,000"},"line_mean":{"kind":"number","value":21.5647058824,"string":"21.564706"},"line_max":{"kind":"number","value":65,"string":"65"},"alpha_frac":{"kind":"number","value":0.2700729927,"string":"0.270073"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1087,"numItemsPerPage":100,"numTotalItems":110960,"offset":108700,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODMxMjg1Miwic3ViIjoiL2RhdGFzZXRzL2NvZGVwYXJyb3QvY29kZXBhcnJvdC12YWxpZC1uZWFyLWRlZHVwbGljYXRpb24iLCJleHAiOjE3NTgzMTY0NTIsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.WlS3mZ9Fuv2auVF_7LmDZe1mMXVRrTJeZmKaVEH3UlLYcMi_LuI1nTstx1QBPBwxtkxvZ0TXtTaB8HqMXa8VAQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
22 values
size
stringlengths
4
7
content
stringlengths
626
1.05M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
5.21
99.9
line_max
int64
12
999
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
stackforge/monasca-statsd
monascastatsd/client.py
2
5091
# (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (c) 2012, Datadog <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Datadog nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL DATADOG BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Monasca-Statsd is a Python client for Statsd that adds dimensions. """ from monascastatsd import common from monascastatsd.connection import Connection from monascastatsd.counter import Counter from monascastatsd.gauge import Gauge from monascastatsd.timer import Timer class Client(object): def __init__(self, name=None, host='localhost', port=8125, connection=None, max_buffer_size=50, dimensions=None): """Initialize a Client object. >>> monascastatsd = MonascaStatsd() :name: the name for this client. Everything sent by this client will be prefixed by name :param host: the host of the MonascaStatsd server. :param port: the port of the MonascaStatsd server. :param max_buffer_size: Maximum number of metric to buffer before sending to the server if sending metrics in batch """ self._max_buffer_size = max_buffer_size self._set_connection(connection, host, port) self._dimensions = dimensions self._client_name = name def _set_connection(self, connection, host, port): if connection is None: self.connection = Connection(host=host, port=port, max_buffer_size=self._max_buffer_size) else: self.connection = connection def get_counter(self, name, connection=None, dimensions=None): """Gets a Counter object. """ return self._get_statsd_object_by_type(Counter, name, connection, dimensions) def get_gauge(self, name=None, connection=None, dimensions=None): """Gets a Gauge object. """ return self._get_statsd_object_by_type(Gauge, name, connection, dimensions) def get_timer(self, name=None, connection=None, dimensions=None): """Gets a Timer object. """ return self._get_statsd_object_by_type(Timer, name, connection, dimensions) def _get_statsd_object_by_type(self, object_type, name, connection, dimensions): return object_type(name=self._update_metric_name(name), connection=connection or self.connection, dimensions=self._update_dimensions(dimensions)) def _update_metric_name(self, metric_name): """Update the metric name with the client name that was passed in on instantiation. """ return common.update_name(self._client_name, metric_name) def _update_dimensions(self, dimensions): """Update the dimensions list with the default dimensions that were passed in on instantiation. """ return common.update_dimensions(self._dimensions, dimensions)
bsd-3-clause
8,999,998,228,434,976,000
42.512821
81
0.670988
false
it-projects-llc/website-addons
portal_event_tickets/models/event_mail.py
1
3646
# pylint: disable=api-one-deprecated from datetime import datetime from dateutil.relativedelta import relativedelta from odoo import api, fields, models, tools _INTERVALS = { "hours": lambda interval: relativedelta(hours=interval), "days": lambda interval: relativedelta(days=interval), "weeks": lambda interval: relativedelta(days=7 * interval), "months": lambda interval: relativedelta(months=interval), "now": lambda interval: relativedelta(hours=0), } class EventMailScheduler(models.Model): _inherit = "event.mail" interval_type = fields.Selection( selection_add=[ ("transferring_started", "Transferring started"), ("transferring_finished", "Transferring finished"), ] ) @api.depends( "event_id.state", "event_id.date_begin", "interval_type", "interval_unit", "interval_nbr", ) def _compute_scheduled_date(self): for rself in self: if rself.interval_type not in [ "transferring_started", "transferring_finished", ]: return super(EventMailScheduler, rself)._compute_scheduled_date() if rself.event_id.state not in ["confirm", "done"]: rself.scheduled_date = False else: date, sign = rself.event_id.create_date, 1 rself.scheduled_date = datetime.strptime( date, tools.DEFAULT_SERVER_DATETIME_FORMAT ) + _INTERVALS[rself.interval_unit](sign * rself.interval_nbr) def execute(self, registration=None): for rself in self: if rself.interval_type not in [ "transferring_started", "transferring_finished", ]: return super(EventMailScheduler, rself).execute() if registration: rself.write( { "mail_registration_ids": [ (0, 0, {"registration_id": registration.id}) ] } ) # execute scheduler on registrations rself.mail_registration_ids.filtered( lambda reg: reg.scheduled_date and reg.scheduled_date <= datetime.strftime( fields.datetime.now(), tools.DEFAULT_SERVER_DATETIME_FORMAT ) ).execute() return True class EventMailRegistration(models.Model): _inherit = "event.mail.registration" @api.one @api.depends( "registration_id", "scheduler_id.interval_unit", "scheduler_id.interval_type" ) def _compute_scheduled_date(self): # keep for-block event though it's api.one now (it was api.multi but it didn't work -- scheduled_date was empty) # When base module "event" will be updated we simply change api.one to api.multi without changing method body for rself in self: if rself.scheduler_id.interval_type not in [ "transferring_started", "transferring_finished", ]: return super(EventMailRegistration, rself)._compute_scheduled_date() if rself.registration_id: # date_open is not corresponded to its meaining, # but keep because it's copy-pasted code date_open_datetime = fields.datetime.now() rself.scheduled_date = date_open_datetime + _INTERVALS[ rself.scheduler_id.interval_unit ](rself.scheduler_id.interval_nbr)
mit
1,332,811,090,665,423,600
35.46
120
0.570214
false
CitrineInformatics/mifkit
mifkit/objects/phase_diagram.py
1
3612
from mif_object import MifObject from line import Line from phase import Phase from point import Point from person import Person from reference import Reference class PhaseDiagram(MifObject): """ Class to store information about a phase diagram. The following fields must be defined: At least one of boundary, label, or phase """ def __init__(self, vertex=None, boundary=None, label=None, phase=None, reference=None, contact=None, license=None, data_type=None, **kwargs): """ Constructor. :param vertex: Names of the vertices of the phase diagram (the order of these names should be consistent with the order of coordinates, e.g. in 2 dimensions, ["x", "y"] would place "x" at (1, 0) and "y" and (0, 2)). :type vertex: List of strings. :param boundary: List of lines that define boundaries within the phase diagram. :type boundary: Single Line object or list of Line objects. :param label: List of labels to add to the phase diagram (not including labels at vertices). :type label: Single Point object or list of Point objects. :param phase: List of phases that appear within the phase diagram. This is used when additional information about a phase is know besides a name or label. :type phase: Single Sample object or list of Sample objects. :param reference: References for the phase diagram. :type reference: Single Reference object or list of Reference objects. :param contact: List of people that worked on the phase diagram. :type contact: Single Person object or list of Person objects. :param license: One of more licenses to apply to the phase diagram. :type license: Single string or list of strings. :param data_type: Type of the data to add. :type data_type: String (either "Experimental" or "Computational") """ super(PhaseDiagram, self).__init__(**kwargs) self._boundary = None self._label = None self._phase = None self._reference = None self._contact = None self.vertex = vertex self.boundary = boundary self.label = label self.phase = phase self.reference = reference self.contact = contact self.license = license self.data_type = data_type @property def boundary(self): return self._boundary @boundary.setter def boundary(self, value): self._boundary = self._get_object(Line, value) @boundary.deleter def boundary(self): del self._boundary @property def label(self): return self._label @label.setter def label(self, value): self._label = self._get_object(Point, value) @label.deleter def label(self): del self._label @property def phase(self): return self._phase @phase.setter def phase(self, value): self._phase = self._get_object(Phase, value) @phase.deleter def phase(self): del self._phase @property def reference(self): return self._reference @reference.setter def reference(self, value): self._reference = self._get_object(Reference, value) @reference.deleter def reference(self): del self._reference @property def contact(self): return self._contact @contact.setter def contact(self, value): self._contact = self._get_object(Person, value) @contact.deleter def contact(self): del self._contact
apache-2.0
-4,642,239,946,330,157,000
28.85124
117
0.638981
false
DigitalCampus/django-oppia
tests/reports/views/test_search_terms.py
1
2777
import datetime from django.urls import reverse from django.utils import timezone from oppia.test import OppiaTestCase from tests.reports import utils class SearchTermsViewTest(OppiaTestCase): fixtures = ['tests/test_user.json', 'tests/test_oppia.json', 'tests/test_quiz.json', 'tests/test_permissions.json', 'tests/test_cohort.json', 'tests/test_course_permissions.json', 'tests/test_usercoursesummary.json', 'default_gamification_events.json', 'tests/test_search_tracker.json'] template = 'reports/search_terms.html' url = reverse('reports:search_terms') def setUp(self): super(SearchTermsViewTest, self).setUp() self.allowed_users = [self.admin_user, self.staff_user] self.disallowed_users = [self.teacher_user, self.normal_user] def test_search_terms_get(self): # fix dates to be in the last month utils.update_tracker_dates() for allowed_user in self.allowed_users: self.client.force_login(user=allowed_user) response = self.client.get(self.url) self.assertTemplateUsed(response, self.template) self.assertEqual(response.status_code, 200) for disallowed_user in self.disallowed_users: self.client.force_login(user=disallowed_user) response = self.client.get(self.url) self.assertRedirects(response, '/admin/login/?next=' + self.url, 302, 200) def test_search_terms_previous_date(self): self.client.force_login(self.admin_user) start_date = timezone.now() - datetime.timedelta(days=31) response = self.client.post(self.url, data={'start_date': start_date}) self.assertTemplateUsed(response, self.template) self.assertEqual(response.status_code, 200) def test_search_terms_future_date(self): self.client.force_login(self.admin_user) start_date = timezone.now() + datetime.timedelta(days=31) response = self.client.post(self.url, data={'start_date': start_date}) self.assertTemplateUsed(response, self.template) self.assertEqual(response.status_code, 200) def test_search_terms_invalid_date(self): self.client.force_login(self.admin_user) start_date = "not a valid date" response = self.client.post(self.url, data={'start_date': start_date}) self.assertTemplateUsed(response, self.template) self.assertEqual(200, response.status_code)
gpl-3.0
7,353,058,303,033,937,000
38.671429
69
0.603169
false
itcropper/tanks
bzagents/potentialFields.py
1
11035
#!/usr/bin/env python '''This is a demo on how to use Gnuplot for potential fields. We've intentionally avoided "giving it all away." ''' from __future__ import division from itertools import cycle from bzrc import BZRC from wanderingagent import * from Gnuplot import GnuplotProcess import sys __bzrc__ = None # This is stolen from numpy. If numpy is installed, you don't # need this: def linspace(start, stop, num=200, endpoint=True, retstep=False): """Return evenly spaced numbers. Return num evenly spaced samples from start to stop. If endpoint is True, the last sample is stop. If retstep is True then return the step value used. """ num = int(num) if num <= 0: return [] if endpoint: if num == 1: return [float(start)] step = (stop-start)/float((num-1)) y = [x * step + start for x in xrange(0, num - 1)] y.append(stop) else: step = (stop-start)/float(num) y = [x * step + start for x in xrange(0, num)] if retstep: return y, step else: return y ######################################################################## # Constants # Output file: FILENAME = 'fields.gpi' # Size of the world (one of the "constants" in bzflag): WORLDSIZE = 1000 # How many samples to take alplotToFileong each dimension: SAMPLES = 50 # Change spacing by changing the relative length of the vectors. It looks # like scaling by 0.75 is pretty good, but this is adjustable: VEC_LEN = 0.75 * WORLDSIZE / SAMPLES # Animation parameters: ANIMATION_MIN = 0 ANIMATION_MAX = 500 ANIMATION_FRAMES = 50 AGENT = None class Plot(): def __init__(self): pass ######################################################################## # Field and Obstacle Definitions def generate_field_function(self, scale): def function(x, y): '''User-defined field function.''' sqnorm = (x**2 + y**2) if sqnorm == 0.0: return 0, 0 else: return x*scale/sqnorm, y*scale/sqnorm return function ######################################################################## # Helper Functions def gpi_point(self, x, y, vec_x, vec_y): '''Create the centered gpi data point (4-tuple) for a position and vector. The vectors are expected to be less than 1 in magnitude, and larger values will be scaled down.''' r = (vec_x ** 2 + vec_y ** 2) ** 0.5 if r > 1: vec_x /= r vec_y /= r return (x - vec_x * VEC_LEN / 2, y - vec_y * VEC_LEN / 2, vec_x * VEC_LEN, vec_y * VEC_LEN) def gnuplot_header(self, minimum, maximum): '''Return a string that has all of the gnuplot sets and unsets.''' s = '' s += 'set xrange [%s: %s]\n' % (minimum, maximum) s += 'set yrange [%s: %s]\n' % (minimum, maximum) # The key is just clutter. Get rid of it: s += 'unset key\n' # Make sure the figure is square since the world is square: s += 'set size square\n' # Add a pretty title (optional): #s += "set title 'Potential Fields'\n" return s def draw_line(self, p1, p2): '''Return a string to tell Gnuplot to draw a line from point p1 to point p2 in the form of a set command.''' x1, y1 = p1 x2, y2 = p2 return 'set arrow from %s, %s to %s, %s nohead lt 3\n' % (x1, y1, x2, y2) def draw_obstacles(self, obstacles): '''Return a string which tells Gnuplot to draw all of the obstacles.''' s = 'unset arrow\n' for obs in obstacles: last_point = obs[0] for cur_point in obs[1:]: s += self.draw_line(last_point, cur_point) last_point = cur_point s += self.draw_line(last_point, obs[0]) return s def drawSquares(self, x, y): arrow = "set arrow from " end = " nohead lt 2" left = arrow + str(x - 10) + ", "+ str(y - 10) + " to " + str(x - 10) + ", "+ str(y + 10) + end + "\n" top = arrow + str(x - 10) + ", "+ str(y + 10) + " to " + str(x + 10) + ", "+ str(y + 10) + end + "\n" right = arrow + str(x + 10) + ", "+ str(y + 10) + " to " + str(x + 10) + ", "+ str(y - 10) + end + "\n" bottom = arrow + str(x + 10) + ", "+ str(y - 10) + " to " + str(x - 10) + ", "+ str(y - 10) + end + "\n" return left + top + right + bottom + "\n" def drawExes(self, x, y): arrow = "set arrow from " end = " nohead lt 4" left = arrow + str(x - 10) + ", "+ str(y - 10) + " to " + str(x + 10) + ", "+ str(y + 10) + end + "\n" top = arrow + str(x - 10) + ", "+ str(y + 10) + " to " + str(x + 10) + ", "+ str(y - 10) + end + "\n" return left + top + "\n" def draw_points(self, points, element): ''' set arrow from 0.0, 0.0 to 0.0, 20.0 nohead lt 4 set arrow from 0.0, 20.0 to 20.0, 20.0 nohead lt 4 set arrow from 20.0, 20.0 to 20.0, 0.0 nohead lt 4 set arrow from 20.0, 0.0 to 0.0, 0.0 nohead lt 4 ''' s = '' for p in points: if element == "tanks": #tanks are squares s += self.drawSquares(p.x, p.y) elif element == "flags": #flags are circles s += self.drawExes(p.x, p.y) return s def attracitve_planes(self, attratants): pass def plot_field(self, function): '''Return a Gnuplot command to plot a field.''' s = "plot '-' with vectors head\n" separation = WORLDSIZE / SAMPLES end = WORLDSIZE / 2 - separation / 2 start = -end #---------------------------------------------------------Here is where you need to change stuff up points = ((x, y) for x in linspace(start, end, SAMPLES) for y in linspace(start, end, SAMPLES)) for x, y in points: f_x, f_y = function(x + 50, y + 50) plotvalues = self.gpi_point(x, y, f_x, f_y) if plotvalues is not None: x1, y1, x2, y2 = plotvalues s += '%s %s %s %s\n' % (x1, y1, x2, y2) s += 'e\n' return s def plotToFile(self, obstacles): ######################################################################## # Plot the potential fields to a file outfile = open(FILENAME, 'w') print >>outfile, self.gnuplot_header(-WORLDSIZE / 2, WORLDSIZE / 2) print >>outfile, self.draw_obstacles(obstacles) def appendToFile(self, flags, tanks): #print "appending to file" outfile = open(FILENAME, 'a') print >>outfile, self.draw_points(flags, "flags") print >>outfile, self.draw_points(tanks, "tanks") field_function = AGENT. print >>outfile, self.plot_field(field_function) ######################################################################## # Animate a changing field, if the Python Gnuplot library is present def animate(self, obstacles): forward_list = list(linspace(ANIMATION_MIN, ANIMATION_MAX, ANIMATION_FRAMES/2)) backward_list = list(linspace(ANIMATION_MAX, ANIMATION_MIN, ANIMATION_FRAMES/2)) #print forward_list anim_points = forward_list + backward_list gp = GnuplotProcess(persist=False) gp.write(self.gnuplot_header(-WORLDSIZE / 4, WORLDSIZE / 4)) gp.write(self.draw_obstacles(obstacles)) #gp.write(self.draw_points(tanks, "tanks")) #for scale in cycle(anim_points): # field_function = self.generate_field_function(scale) # gp.write(self.plot_field(field_function)) def get_vector(x, y): #Here, create a vector by iterating through flags, obstacles and other tanks vectors = [] for obstacle in self.obstacles: avgx = 0 avgy = 0 for corner in obstacle: avgx += corner[0] avgy += corner[1] avgx /= 4 avgy /= 4 vectors.append(self.repel(tank.x, tank.y, avgx, avgy)) for othertank in self.mytanks + self.othertanks: if othertank.x != tank.x and othertank.y != tank.y: vectors.append(self.repel(tank.x, tank.y, othertank.x, othertank.y, 10, 20)) if tank.flag == '-': bestflag = None distbest = 2000 for flag in self.flags: if flag.color in self.enemycolors and math.sqrt((flag.x - tank.x)**2 + (flag.y - tank.y)**2) < distbest: distbest = math.sqrt((flag.x - tank.x)**2 + (flag.y - tank.y)**2) bestflag = flag if bestflag != None: vectors.append(self.attract(tank.x, tank.y, bestflag.x, bestflag.y)) else: for base in self.bases: if base.color not in self.enemycolors: center = ((base.corner1_x + base.corner2_x + base.corner3_x + base.corner4_x)/4, (base.corner1_y + base.corner2_y + base.corner3_y + base.corner4_y)/4) vectors.append(self.attract(tank.x, tank.y, center[0], center[1], 0, 2000)) overallvector = [0, 0] for vector in vectors: overallvector[0] += vector[0] overallvector[1] += vector[1] mag = math.sqrt(overallvector[0]**2 + overallvector[1]**2) overallvector[0] /= mag overallvector[1] /= mag return math.atan2(overallvector[1], overallvector[0]), 1 #math.sqrt(overallvector[0]**2 + overallvector[1]**2) #Angle, Velocity def attract(self, targetx, targety, originx, originy, radius = 0, spread = 800): theta = math.atan2((originy - targety), (originx - targetx)) dist = math.sqrt((originy - targety)**2 + (originx - targetx)**2) if dist < radius: return 0, 0 elif dist > radius + spread: mag = spread return mag * math.cos(theta), mag * math.sin(theta) else: mag = (dist - radius) * 5 return mag * math.cos(theta), mag * math.sin(theta) def repel(self, targetx, targety, originx, originy, radius = 40, spread = 150): theta = math.atan2(-(originy - targety), -(originx - targetx)) dist = math.sqrt((originy - targety)**2 + (originx - targetx)**2) mag = (spread + radius - dist) * 4 if dist > radius + spread: return 0, 0 elif dist < radius: mag = 1000 return mag * math.cos(theta), mag * math.sin(theta) # def tangent(self, targetx, targety, originx, originy, radius = 0, spread = 100): # theta = self.normalize_angle(math.atan2((originy - targety), (originx - targetx)) + math.pi / 2) # dist = math.sqrt((originy - targety)**2 + (originx - targetx)**2) # if dist > radius + spread: # return 0, 0 # else: # return (radius + spread - dist) * math.cos(theta), (radius + spread - dist) * math.sin(theta) class main(): def __init__(self): # Process CLI arguments. try: execname, host, port = sys.argv except ValueError: execname = sys.argv[0] print >>sys.stderr, '%s: incorrect number of arguments' % execname print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0] sys.exit(-1) # Connect. #bzrc = BZRC(host, int(port), debug=True) __bzrc__ = BZRC(host, int(port)) realobs = __bzrc__.get_obstacles() enemies = __bzrc__.get_othertanks() bases = __bzrc__.get_bases() flags = __bzrc__.get_flags() AGENT = Agent(__bzrc__) plotter = Plot() plotter.plotToFile(realobs) plotter.appendToFile(flags, enemies) self.obstacles = __bzrc__.get_obstacles() self.mytanks = __bzrc__.get_mytanks() self.othertanks = __bzrc__.get_othertanks() self.flags = __bzrc__.get_flags() self.bases = __bzrc__.get_bases() self.enemycolors = [] for tank in othertanks: if tank.color not in self.enemycolors: self.enemycolors.append(tank.color) vector = self.get_vector(0, 0) #s = raw_input(tanks) #plotter.plotToFile(plotter.draw_points(flags, "flags")) plotter.animate(realobs) if __name__ == '__main__': main()
gpl-3.0
8,187,148,771,526,595,000
28.58445
129
0.597734
false
shuiruge/nn4post
nn4post/utils/euclideanization.py
1
4254
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Helper functions for euclideanization.""" import numpy as np import tensorflow as tf from tensorflow.contrib.distributions import Distribution def get_size(shape): """Get tensor size from tensor shape. Args: shape: List of `int`s, as the shape of tensor. Returns: `int` as the size of the tensor. """ return int(np.prod(shape)) def get_param_shape(param_dict): """Parse shapes from instances of `np.array`, `tf.Tensor`, or `Distribution`. Args: param_dict: `dict`, like `{'w': ..., 'b': ...}, with values being instances of `np.array`, `tf.Tensor`, or `Distribution`. Returns: `dict` with keys the keys in `param_dict` and values the assocated shapes (as lists). """ param_shape = { name: val.batch_shape.as_list() \ if isinstance(val, Distribution) \ else val.shape.as_list() for name, val in param_dict.items() } return param_shape def get_parse_param(param_shape): """Returns a parser of parameters that parses a tensor of the shape `[n_d]`, which is an element in the Euclidean parameter-space :math:`\mathbb{R}^{n_d}`, to the tensors of the shapes provided by `param_shape`. Args: param_shape: `dict` with keys the keys in `param_dict` and values the assocated shapes (as lists). Returns: Callable, with Args: theta: Tensor with shape `[param_space_dim]`, as one element in the parameter-space, obtained by flattening the `param` in the arguments of the `model`, and then concatenating by the order of the `param_names_in_order`. Returns: `dict` with keys the same as `param_shape`, and values Tensors with shape the same as the values of `param_shape`. """ # Get list of shapes and sizes of parameters ordered by name param_names_in_order = sorted(param_shape.keys()) param_shapes = [param_shape[z] for z in param_names_in_order] param_sizes = [get_size(shape) for shape in param_shapes] def parse_param(theta): """ Args: theta: Tensor with shape `[param_space_dim]`, as one element in the parameter-space, obtained by flattening the `param` in the arguments of the `model`, and then concatenating by the order of the `param_names_in_order`. Returns: `dict` with keys the same as `param_shape`, and values Tensors with shape the same as the values of `param_shape`. """ with tf.name_scope('parse_parameter'): splited = tf.split(theta, param_sizes) reshaped = [tf.reshape(flat, shape) for flat, shape in list(zip(splited, param_shapes))] return {z: reshaped[i] for i, z in enumerate(param_names_in_order)} return parse_param def get_param_space_dim(param_shape): """ Args: param_shape: `dict` with keys the keys in `param_dict` and values the assocated shapes (as lists). Returns: `int` as the dimension of parameter-space. """ param_sizes = [get_size(shape) for shape in param_shape.values()] param_space_dim = sum(param_sizes) return param_space_dim def euclideanize(param_shape): """Returns a decorator that euclideanize a function, implemented by TensorFlow, on general parameter-space to that on the associated Euclidean parameter-space. Example: ```python: param_shape = {'param_1': [2, 5], 'param_2': [3], ...} @euclideanize(param_shape) def fn(param_1, param_2, ...): ''' Args: param_1: An instance of `tf.Tensor` with shape `[2, 5]`. param_2: An instance of `tf.Tensor` with shape `[3]`. ... Returns: Any. ''' # Your implementation. ``` Args: param_shape: `dict` with keys the keys in `param_dict` and values the assocated shapes (as lists). Returns: A decorator. """ parse_param = get_parse_param(param_shape) def decorator(fn): def euclideanized_fn(euclidean_param): param_dict = parse_param(euclidean_param) return fn(**param_dict) return euclideanized_fn return decorator
gpl-3.0
862,482,280,620,375,300
25.09816
80
0.623413
false
imapp-pl/golem
golem/testutils.py
1
3497
import logging import os import shutil import tempfile import unittest from os import path from golem.core.common import is_windows, is_osx from golem.core.simpleenv import get_local_datadir from golem.model import Database from golem.ethereum import Client class TempDirFixture(unittest.TestCase): __root_dir = None @classmethod def setUpClass(cls): logging.basicConfig(level=logging.DEBUG) if is_windows(): import win32api tmppath = win32api.GetLongPathName(tempfile.gettempdir()) elif is_osx(): tmppath = get_local_datadir('.tests') else: tmppath = tempfile.gettempdir() cls.__root_dir = path.join(tmppath, 'golem') if not os.path.exists(cls.__root_dir): os.makedirs(cls.__root_dir, mode=0770) # Concurrent tests will fail # @classmethod # def tearDownClass(cls): # if os.path.exists(cls.__root_dir): # shutil.rmtree(cls.__root_dir) def setUp(self): dir_name = self._temp_dir_name() self.tempdir = tempfile.mkdtemp(prefix=dir_name, dir=self.__root_dir) self.path = self.tempdir # Alias for legacy tests if not is_windows(): os.chmod(self.tempdir, 0770) def tearDown(self): # Firstly kill Ethereum node to clean up after it later on. # FIXME: This is temporary solution. Ethereum node should always be # the explicit dependency and users should close it correctly. Client._kill_node() if path.isdir(self.tempdir): shutil.rmtree(self.tempdir) def temp_file_name(self, name): return path.join(self.tempdir, name) def additional_dir_content(self, file_num_list, dir_=None, results=None, sub_dir=None): """ Create recursively additional temporary files in directories in given directory For example file_num_list in format [5, [2], [4, []]] will create 5 files in self.tempdir directory, and 2 subdirectories - first one will contain 2 tempfiles, second will contain 4 tempfiles and an empty subdirectory :param file_num_list: list containing number of new files that should be created in this directory or list describing file_num_list for new inner directories :param dir_: directory in which files should be created :param results: list of created temporary files :return: """ if dir_ is None: dir_ = self.tempdir if sub_dir: dir_ = os.path.join(dir_, sub_dir) if not os.path.exists(dir_): os.makedirs(dir_) if results is None: results = [] for el in file_num_list: if isinstance(el, int): for i in range(el): t = tempfile.NamedTemporaryFile(dir=dir_, delete=False) results.append(t.name) else: new_dir = tempfile.mkdtemp(dir=dir_) self.additional_dir_content(el, new_dir, results) return results def _temp_dir_name(self): return self.id().rsplit('.', 1)[1] # Use test method name class DatabaseFixture(TempDirFixture): """ Setups temporary database for tests.""" def setUp(self): super(DatabaseFixture, self).setUp() self.database = Database(self.tempdir) def tearDown(self): self.database.db.close() super(DatabaseFixture, self).tearDown()
gpl-3.0
441,202,488,280,668,900
33.97
109
0.619102
false
scrapinghub/exporters
exporters/filters/key_value_filters.py
1
3308
import re import six from exporters.filters.base_filter import BaseFilter from exporters.utils import nested_dict_value from exporters.utils import dict_list import operator class InvalidOperator(ValueError): """ Exception to be trown when an invalid operator is set in filter keys """ OPERATORS = { 'in': lambda a, b: operator.contains(b, a), 'contains': lambda a, b: b in a, '==': lambda a, b: a == b, 're_match': lambda a, b: bool(re.match(b, u'%s' % a)) } DEFAULT_OPERATOR = '==' class KeyValueBaseFilter(BaseFilter): "Base class to key-value filters" supported_options = { 'keys': {'type': dict_list}, 'nested_field_separator': {'type': six.string_types, 'default': '.'} } def __init__(self, *args, **kwargs): super(KeyValueBaseFilter, self).__init__(*args, **kwargs) self.keys = self.read_option('keys') self.nested_field_separator = self.read_option('nested_field_separator') self._validate_keys_operator() self.logger.info('{} has been initiated. Keys: {}'.format( self.__class__.__name__, self.keys)) def _validate_keys_operator(self): for key in self.keys: op = key.get('operator') if op and op not in OPERATORS: raise InvalidOperator('{} operator not valid in key {}'.format(op, key)) def filter(self, item): for key in self.keys: if self.nested_field_separator: nested_fields = key['name'].split(self.nested_field_separator) try: value = nested_dict_value(item, nested_fields) except KeyError: self.logger.debug('Missing path {} from item. Item dismissed'.format( nested_fields)) return False else: value = item[key['name']] if not self._match_value( value, key['value'], OPERATORS[key.get('operator', DEFAULT_OPERATOR)]): return False return True def _match_value(self, value_found, value_expected, op=None): """Return True if value found matches the expected. Should be overriden by derived classes implementing custom match. """ raise NotImplementedError class KeyValueFilter(KeyValueBaseFilter): """ Filter items depending on keys and values - keys (list) It is a list of dicts with the following structure: {"key": "value"}. The filter will delete those items that do not contain a key "key" or, if they do, that key is not the same as "value". """ def _match_value(self, found, expected, op): return op(found, expected) class KeyValueRegexFilter(KeyValueBaseFilter): """ Filter items depending on keys and values using regular expressions - keys (list) It is a list of dicts with the following structure: {"key": "regex"}. The filter will delete those items that do not contain a key "key" or, if they do, that key value does not match "regex". """ def _match_value(self, found, expected, op): if found is None: return False return OPERATORS['re_match'](found, expected)
bsd-3-clause
-4,708,341,413,093,398,000
33.103093
91
0.595224
false
basbloemsaat/dartsense
tests/test_event_Event.py
1
5777
#!/usr/bin/env python3 import pytest import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "../lib")) import dartsense.competition import dartsense.event def test_event_Event_init(setup_db): event = dartsense.event.Event() assert isinstance(event, dartsense.event.Event) assert hasattr(event, 'id') assert event.id == None assert hasattr(event, 'type') assert event.type == None assert hasattr(event, 'name') assert event.name == None assert hasattr(event, 'competition') assert event.competition == None def test_event_LeagueRound_init(setup_db): event = dartsense.event.LeagueRound() assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.LeagueRound) assert hasattr(event, 'type') assert event.type == 'league_round' assert event.name == None event = dartsense.event.Event( id=pytest.setup_vars['testcompetition1_round1_id']) assert event.id != None assert event.id == pytest.setup_vars['testcompetition1_round1_id'] assert event.name == 'test competition 1 round 1' assert event.type == 'league_round' assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.LeagueRound) assert isinstance(event.competition, dartsense.competition.Competition) assert event.competition.id == pytest.setup_vars['testleague1_id'] # event matches has a list of the event's matches as found in the db # it's a MatchList, without structure of particular order assert hasattr(event, 'matches') assert hasattr(event.matches, '__iter__') assert len(event.matches) == 3 def test_event_LeagueRound_update(setup_db): event = dartsense.event.Event( id=pytest.setup_vars['testcompetition1_round1_id']) assert isinstance(event.competition, dartsense.competition.Competition) assert event.competition.id == pytest.setup_vars['testleague1_id'] event.competition = pytest.setup_vars['testleague2_id'] assert event.competition.id == pytest.setup_vars['testleague2_id'] assert hasattr(event, 'save') event.save() event = None event = dartsense.event.Event( id=pytest.setup_vars['testcompetition1_round1_id']) assert event.competition.id == pytest.setup_vars['testleague2_id'] event.competition = dartsense.competition.Competition( id=pytest.setup_vars['testleague1_id']) assert event.competition.id == pytest.setup_vars['testleague1_id'] event.save() event = None event = dartsense.event.Event( id=pytest.setup_vars['testcompetition1_round1_id']) assert event.competition.id == pytest.setup_vars['testleague1_id'] def test_event_LeagueRound_save_new(setup_db): event = dartsense.event.LeagueRound( competition=pytest.setup_vars['testleague2_id']) assert event.id == None assert event.save() == None event.name = 'testround' assert event.name == 'testround' new_id = event.save() assert isinstance(new_id, int) assert new_id > 0 assert event.id == new_id event = None event = dartsense.event.Event(id=new_id) assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.LeagueRound) assert event.id == new_id assert event.name == 'testround' def test_event_LeagueAdjust_init(setup_db): event = dartsense.event.LeagueAdjust() assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.LeagueAdjust) assert hasattr(event, 'type') assert event.type == 'league_adjust' assert event.name == None event = dartsense.event.Event( id=pytest.setup_vars['testcompetition2_adjustment_id']) assert event.id != None assert event.id == pytest.setup_vars['testcompetition2_adjustment_id'] assert event.name == 'test competition 2 adjustment' assert event.type == 'league_adjust' assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.LeagueAdjust) assert isinstance(event.competition, dartsense.competition.Competition) assert event.competition.id == pytest.setup_vars['testleague2_id'] def test_event_Poule_init(setup_db): event = dartsense.event.Poule() assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.Poule) assert hasattr(event, 'type') assert event.type == 'poule' assert event.name == None event = dartsense.event.Event(id=pytest.setup_vars['testpoule1_id']) assert event.id != None assert event.id == pytest.setup_vars['testpoule1_id'] assert event.name == 'test poule 1' assert event.type == 'poule' assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.Poule) assert isinstance(event.competition, dartsense.competition.Competition) assert event.competition.id == pytest.setup_vars['testtournament1_id'] def test_event_Knockout_init(setup_db): event = dartsense.event.Knockout() assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.Knockout) assert hasattr(event, 'type') assert event.type == 'knockout' assert event.name == None event = dartsense.event.Event(id=pytest.setup_vars['testknockout1_id']) assert event.id != None assert event.id == pytest.setup_vars['testknockout1_id'] assert event.name == 'test knockout 1' assert event.type == 'knockout' assert isinstance(event, dartsense.event.Event) assert isinstance(event, dartsense.event.Knockout) assert isinstance(event.competition, dartsense.competition.Competition) assert event.competition.id == pytest.setup_vars['testtournament1_id'] #
mit
1,687,279,007,639,855,000
32.011429
75
0.709019
false
YunoHost/yunohost
src/yunohost/data_migrations/0008_ssh_conf_managed_by_yunohost_step2.py
1
3419
import os import re from moulinette import m18n from moulinette.utils.log import getActionLogger from moulinette.utils.filesystem import chown from yunohost.tools import Migration from yunohost.regenconf import _get_conf_hashes, _calculate_hash from yunohost.regenconf import regen_conf from yunohost.settings import settings_set, settings_get from yunohost.utils.error import YunohostError from yunohost.backup import ARCHIVES_PATH logger = getActionLogger('yunohost.migration') SSHD_CONF = '/etc/ssh/sshd_config' class MyMigration(Migration): """ In this second step, the admin is asked if it's okay to use the recommended SSH configuration - which also implies disabling deprecated DSA key. This has important implications in the way the user may connect to its server (key change, and a spooky warning might be given by SSH later) A disclaimer explaining the various things to be aware of is shown - and the user may also choose to skip this migration. """ dependencies = ["ssh_conf_managed_by_yunohost_step1"] def run(self): settings_set("service.ssh.allow_deprecated_dsa_hostkey", False) regen_conf(names=['ssh'], force=True) # Update local archives folder permissions, so that # admin can scp archives out of the server if os.path.isdir(ARCHIVES_PATH): chown(ARCHIVES_PATH, uid="admin", gid="root") @property def mode(self): # If the conf is already up to date # and no DSA key is used, then we're good to go # and the migration can be done automatically # (basically nothing shall change) ynh_hash = _get_conf_hashes('ssh').get(SSHD_CONF, None) current_hash = _calculate_hash(SSHD_CONF) dsa = settings_get("service.ssh.allow_deprecated_dsa_hostkey") if ynh_hash == current_hash and not dsa: return "auto" return "manual" @property def disclaimer(self): if self.mode == "auto": return None # Detect key things to be aware of before enabling the # recommended configuration dsa_key_enabled = False ports = [] root_login = [] port_rgx = r'^[ \t]*Port[ \t]+(\d+)[ \t]*(?:#.*)?$' root_rgx = r'^[ \t]*PermitRootLogin[ \t]([^# \t]*)[ \t]*(?:#.*)?$' dsa_rgx = r'^[ \t]*HostKey[ \t]+/etc/ssh/ssh_host_dsa_key[ \t]*(?:#.*)?$' for line in open(SSHD_CONF): ports = ports + re.findall(port_rgx, line) root_login = root_login + re.findall(root_rgx, line) if not dsa_key_enabled and re.match(dsa_rgx, line) is not None: dsa_key_enabled = True custom_port = ports != ['22'] and ports != [] root_login_enabled = root_login and root_login[-1] != 'no' # Build message message = m18n.n("migration_0008_general_disclaimer") if custom_port: message += "\n\n" + m18n.n("migration_0008_port") if root_login_enabled: message += "\n\n" + m18n.n("migration_0008_root") if dsa_key_enabled: message += "\n\n" + m18n.n("migration_0008_dsa") if custom_port or root_login_enabled or dsa_key_enabled: message += "\n\n" + m18n.n("migration_0008_warning") else: message += "\n\n" + m18n.n("migration_0008_no_warning") return message
agpl-3.0
-4,893,049,037,462,025,000
31.561905
81
0.620357
false
jezjestem/sysadmin
MQTT/read_statistics.py
1
17667
#!/usr/bin/env /usr/bin/python2.7 # -*- coding: utf-8 -*- import os,sys,time import grp import signal import string import getopt import glob import random import sqlite3 import datetime import rrdtool class GrapherFault(Exception): em = {301: "Incorrect programmer's input", \ 302: "Problem with RRD database" } class ProbeFault(Exception): em = {201: "Database connection problem", \ 102: "Programming error", \ 103: "Incorrect data given", \ 104: "Incorrect data received", \ 202: "Database data corrupted" } class ProbeError(ProbeFault): pass class ProbeWarning(ProbeFault): pass class ProbeDateTime: timdat = None; epoch = 0; timdat_str = "" timdat_fmt = '%H:%M:%S %d-%b-%Y(%Z)' def __init__(self, epocht, fmt=None): self.epoch = epocht; self.timdat = time.localtime(epocht); if fmt == None: fmt = self.timdat_fmt; self.timdat_str = time.strftime(fmt, self.timdat); def __str__(self): return (self.timdat_str); def fprint(self, fmt): return (time.strftime(fmt, self.timdat)); def reformat(self, fmt): self.timdat_fmt = fmt; self.timdat_str = time.strftime(fmt, self.timdat); return (self.timdat_str); def now(self): tm = time.localtime(time.time()) return (time.strftime(self.timdat_fmt, tm)); def timenow(): return (ProbeDateTime(time.time(), "%x %X %z")); def epoch2str(estr, fmt=None): return (ProbeDateTime(int(estr), fmt)) #return (rstr) class SimpleProbe: sp_data = 0.00; # Data sp_timdat = None # Class for expressing date and time sp_cnt = 0; # counter def __init__(self, pdata, epocht): self.sp_timdat = ProbeDateTime(epocht); self.sp_data = pdata; self.sp_cnt+=1; def __str__(self): pr = self.sp_timdat.fprint('%H:%M %d-%b'); retstr = "%.2f - %s" %(self.sp_data, pr); return (retstr) def toRRD(self): retstr = "%d:%.2f" % (self.sp_timdat.epoch, self.sp_data); return (retstr); class ProbeMadre: DEBUG=False DEBUG_LEVEL=0 VERBOSE=False def __init__(self, dbg, dbg_lvl, verbose): self.DEBUG=dbg; self.DEBUG_LEVEL=dbg_lvl; self_VERBOSE=verbose def dprint(self, msg): msgbuf="=> %s "%(msg) if self.DEBUG: print msgbuf return (msgbuf); class ProbeSet(ProbeMadre): ps_type = "" ps_cnt = 0; ps_sensorset = {}; ps_cur_sensor = ""; ps_sensor_names = []; ps_DEBUG=False; def __init__(self, ptype, snames, debug=False): self.ps_type = ptype; self.index = 0; self.ps_cnt = 0; self.ps_DEBUG=debug; self.ps_sensor_names = snames; self.ps_sensorset[ptype] = {}; for sn in snames: self.ps_sensorset[ptype][sn] = []; self.ps_cur_sensor = sn; def add_sensor(self, sname, sdata): """ """ datas = False # lookup name of the sensor hit = False for snam in self.ps_sensor_names: if sname == snam: hit = True break; if hit != True: raise(ProbeFault(104)); return None; self.ps_cur_sensor = sname; try: datas = self.ps_sensorset[self.ps_type][sname].append(sdata); except Exception as e: print("!-> add_sensor() failure for \"%s\"" %(self.ps_type)); return (None); self.dprint("add_sensor(self, %s, [%s])" %(sname, sdata)) self.ps_cnt += 1; return (datas); def dprint(self, msg): ''' Debugging output ''' toprint = "%d) %s ~> %s" %(self.ps_cnt, self.ps_type, msg) if self.ps_DEBUG == True: print(toprint) return (toprint); class HumiditySensor(ProbeSet): HS_sensors = None; HS_cnt = 0; HS_db_rows = 0; HS_DEBUG = False; def __init__(self, sensor_lst, Hdebug=False): self.HS_sensors = ProbeSet("HUMIDITY", sensor_lst, debug=Hdebug); self.HS_cnt = 0; self.HS_DEBUG = Hdebug; def add_probe(self, hum, epocht, s_name): """ Add entry returns probe """ sp = SimpleProbe(hum, epocht) self.HS_sensors.add_sensor(s_name, sp); self.HS_sensors.dprint("Adding probe %s:%f %s" %(s_name, hum, sp.sp_timdat)) self.HS_cnt+=1 return (sp); def proc_db_row(self, row): """ Function to process DB entries to internal structures """ hum = 0; epocht = 0; sensor = ""; probe = None; self.HS_db_rows+=1; if (len(row) == 3): hum = float(row[2]); sensor = row[1]; epocht = int(row[0]); probe = self.add_probe(hum, epocht, sensor); else: print("!!!> Incorrect database schema! Expected 3 entries found %d" %(len(row))); raise ProbeFault(103); return(probe); class TempSensor(ProbeSet): TS_lastrec = None TS_sensors = None; TS_cnt = 0; TS_DEBUG = False; def __init__(self, sensor_lst, debug=False): self.TS_lastrec = None; self.TS_DEBUG = debug; self.TS_sensors = ProbeSet("TEMPERATURE", sensor_lst, debug=False); self.TS_cnt = 0; self.TS_db_rows = 0; def add_probe(self, temp, epocht, s_name): """ Add entry returns probe """ sp = SimpleProbe(temp, epocht) self.TS_sensors.add_sensor(s_name, sp); self.TS_cnt+=1 self.TS_sensors.dprint("Adding probe: \"%s\" [%s]" %(s_name, sp)) return (sp); def proc_db_row(self, row): """ Function to process DB entries to internal structures """ temp = 0.0; epocht = 0; sensor = ""; probe = None; self.TS_db_rows+=1; if (len(row) == 3): temp = float(row[2]); sensor = row[1]; epocht = int(row[0]); probe = self.add_probe(temp, epocht, sensor); else: print("!!!> Incorrect database schema! Expected 3 entries found %d" %(len(row))); raise ProbeFault(103); return(probe); class LightSensor(ProbeSet): LP_sensors = None; LP_cnt = 0; LP_db_rows=0; LP_DEBUG=False; def __init__(self, sensor_lst, Ldebug=False): self.LP_sensors = ProbeSet("LIGHT", sensor_lst, debug=False); self.LP_cnt = 0; self.LP_db_rows=0; self.LP_DEBUG=Ldebug; def add_probe(self, light, epocht, s_name): """ Add entry returns probe """ Lsensor = self.LP_sensors.add_sensor(s_name, SimpleProbe(light, epocht)); self.LP_cnt+=1 return (Lsensor); def proc_db_row(self, row): """ Function to process DB entries to internal structures """ probe = None; self.LP_db_rows+=1; if (len(row) == 3): light = float(row[2]); sensor = row[1]; epocht = int(row[0]); if self.LP_DEBUG == True: print("%d) -> adding Lprobe ('%s', %d, %d)" %(self.LP_cnt, sensor, int(row[2]),epocht)) probe = self.add_probe(light, epocht, sensor); else: print("!!!> Incorrect database schema! Expected 3 entries found %d" %(len(row))); raise ProbeFault(103); return(probe); ################# # ## from distutils.util import strtobool def AskUser(question): sys.stdout.write('%s [y/n]\n' % question) while True: try: return strtobool(raw_input().lower()) except ValueError: sys.stdout.write('Please respond with \'y\' or \'n\'.\n') def ProbeRRD_Create_temp(g_path, rrddef=None, rrdrra=None): DS_str = '''DS:mtemp:GAUGE:600:U:U''' RRA_avg_str = '''RRA:AVERAGE:0.5:1:240''' RRA_max_str = '''RRA:MAX:0.5:1:228''' RRA_min_str = '''RRA:MIN:0.5:1:228''' rrdh = rrdtool.create(g_path, '--start', '1416510706', '--step','300', DS_str, RRA_avg_str, RRA_max_str, RRA_min_str); return (rrdh); def ProbeRRD_Create_light(g_path, rrddef=None, rrdrra=None): DS_str = '''DS:light:GAUGE:600:U:U''' RRA_avg_str = '''RRA:AVERAGE:0.5:1:220''' RRA_max_str = '''RRA:MAX:0.5:1:228''' RRA_min_str = '''RRA:MIN:0.5:1:228''' rrdh = rrdtool.create(g_path, '--start', '1416510706', '--step','300', DS_str, RRA_avg_str, RRA_max_str, RRA_min_str); return (rrdh); def ProbeRRD_Create_hum(g_path, rrddef=None, rrdrra=None): DS_str = '''DS:hum:GAUGE:600:U:U''' RRA_avg_str = '''RRA:AVERAGE:0.5:1:200''' RRA_max_str = '''RRA:MAX:0.5:1:228''' RRA_min_str = '''RRA:MIN:0.5:1:228''' rrdh = rrdtool.create(g_path, '--start', '1416510706', '--step','300', DS_str, RRA_avg_str, RRA_max_str, RRA_min_str); return (rrdh); def ProbeRRD_Update(g_path, SP): ret = None; errbuf = [] cnt = 0; try: ret = rrdtool.update(g_path, str(SP.toRRD())); except Exception as e: errbuf.append("rrdtool.update(%s) failed to update %s: %s | %s\n" %(g_path, SP, e, str(e))); if cnt > 3: print("=>Done with %d errors" %(len(errbuf))) errlog = open("errlog", "w+") print("==> Writing %d lines to \"errorlog\"" %(cnt)); for ln in errbuf: errlog.write(ln); errlog.close(); finally: cnt+=1; return (ret); def ProbeRRD_Graph_temp(f_name): graph_f=(f_name[:-3]+"png"); ret = None; try: ret = rrdtool.graph(graph_f, \ '--start', 'now-20h', '--end', 'now' , '--step','60', \ '--width', '650', '--height', '180', '--title', 'Temperature - inside', \ '--vertical-label', '°C', \ '--grid-dash', '2:1',\ '--border', '0', \ '-E', '--graph-render-mode', 'normal', \ '-W', 'created: %s' %(timenow()), \ '--font','TITLE:11:Dorsa', \ '--font','AXIS:7:Dorsa', \ '--font','UNIT:7:Dorsa', \ '--font','LEGEND:8:Dorsa', \ "--color=SHADEB#9999CC", \ "--color=BACK#D8D8D8", \ "--color=FONT#080808", \ '--color=CANVAS#202020', \ '''DEF:mtemp=temperature.rrd:mtemp:AVERAGE''',\ '''VDEF:tempmax=mtemp,MAXIMUM''', \ '''VDEF:tempmin=mtemp,MINIMUM''', \ '''VDEF:tempavg=mtemp,AVERAGE''', \ '''VDEF:templst=mtemp,LAST''', \ '''COMMENT:Max\: ''', \ '''AREA:mtemp#6633FF''', \ '''GPRINT:tempmax:%3.2lf°C%s\l''',\ '''COMMENT:Min\: ''', \ '''LINE2:tempmax#FF3300''', \ '''GPRINT:tempmin:%3.2lf°C%s\l''',\ '''COMMENT:Avg\: ''', \ '''GPRINT:tempavg:%3.2lf°C%s\l''',\ '''COMMENT:Last\: ''', \ '''GPRINT:templst:%3.2lf°C%s\l''',\ '''TEXTALIGN:right''', \ #'''GPRINT:mtemp:MAX:%lf%s\g''',\ #'''COMMENT:Temperature measured by DS18B20 ''' ); except Exception as e: print("=!> Failed to graph \"%s\":%s" %(graph_f, e)); try: os.stat(f_name) except: pass; else: print("==> File \"%s\" still exists but rrdgraph failed!" %(f_name)); # finally: # if AskUser("####> Found stale file \"%s\". Removing?"%(f_name)) == "y": # os.unlink(f_name); return(ret) def ProbeRRD_Graph_light(f_name): ''' plotting light ''' graph_f=(f_name[:-3]+"png"); ret = None; try: ret = rrdtool.graph(graph_f, '--start', 'now-20h', '--step','300', \ '--end', 'now', \ '--width', '600', '--height', '200', '--title', 'Light', \ '--border', '0', \ '--font','TITLE:11:Ubuntu',\ '--font','AXIS:7:Ubuntu',\ '--font','UNIT:7:Ubuntu',\ '--font','LEGEND:9:Ubuntu',\ '--color=CANVAS#696969', \ '--color=BACK#E8E8E8', \ '--color=SHADEA#080808', \ '-W', 'created: %s' %(timenow()), \ '''DEF:light=light.rrd:light:AVERAGE''', \ '''VDEF:lmax=light,MAXIMUM''', \ '''VDEF:lmin=light,MINIMUM''', \ '''VDEF:lavg=light,AVERAGE''', \ '''VDEF:llst=light,LAST''', \ '''COMMENT: Max\:''', \ '''GPRINT:lmax:%3.0lf\l''',\ '''COMMENT: Min\:''', \ '''GPRINT:lmin:%3.0lf%s\l''',\ '''AREA:light#FFF000''',\ '''COMMENT: Avg\:''', \ '''GPRINT:lavg:%3.1lf%s\l''',\ '''COMMENT: Last\:''', \ '''GPRINT:llst:%3.1lf%s\l''',\ '''TEXTALIGN:right''', \ '''COMMENT: Light intensivity!''' ); except Exception as e: print("=!> Failed to graph \"%s\":%s" %(graph_f, e)); try: os.stat(f_name) except: pass; else: print("==> File \"%s\" still exists but rrdgraph failed!" %(f_name)); # finally: # if AskUser("####> Found stale file \"%s\". Removing?"%(f_name)) == "y": # os.unlink(f_name); return(ret) def ProbeRRD_Graph_humidity(f_name): graph_f=(f_name[:-3]+"png"); ret = None; try: ret = rrdtool.graph(graph_f, \ '--start', 'now-20h', '--end','now','--step','300', \ '--width', '500', '--height', '200', \ '--title', 'Humidity', '--vertical-label', '%', \ '--border', '0', \ '-E', \ #'-P',\ '--color=CANVAS#101010', \ '--color=BACK#D8D8D8', \ #'--color=FONT#696969', \ '--font','TITLE:11:Sans',\ '--font','UNIT:8:Sans',\ '--font','AXIS:8:Sans',\ '--font','LEGEND:8:Sans',\ '-W', 'Created: %s' %(timenow()), \ '''DEF:hum=humidity.rrd:hum:AVERAGE''',\ '''VDEF:hummax=hum,MAXIMUM''', \ '''VDEF:hummin=hum,MINIMUM''', \ '''VDEF:humlst=hum,LAST''', \ '''COMMENT:Max\:''',\ '''GPRINT:hummax:%3.0lf%% %s\l''',\ '''COMMENT:Min\:''',\ '''GPRINT:hummin:%3.0lf%% \l''',\ '''COMMENT:Last\:''',\ '''GPRINT:humlst:%3.0lf%% \l''',\ '''AREA:hum#33FF00''',\ #'''COMMENT: <span foreground="blue">heh</span>''',\ ); except Exception as e: print("=!> Failed to graph \"%s\":%s" %(graph_f, e)); try: os.stat(f_name) except: pass; else: print("==> File \"%s\" still exists but rrdgraph failed!" %(f_name)); # finally: # if AskUser("####> Found stale file \"%s\". Removing?"%(f_name)) == "y": # os.unlink(f_name); return(ret) def get_db_row(db_conn_h, t_name, db_cond): query = '''SELECT * from %s %s''' % (t_name, db_cond) row = []; try: row = db_conn_h.execute(query); except Exception as e: print("!> Failed to fetch rows from %s:%s" % (t_name, e)); return (row); def read_db(dbpath): """ Read DB entries and map them to internal structures """ print("=> Connecting to sqlite3 DB \"%s\"" %(dbpath)); try: conn = sqlite3.connect(dbpath); c = conn.cursor(); except Exception as e: print("!!!> Failed to connect to DB"); sys.exit(3); #----------- # Temperature #-------------------------------------------------------------------- Tsensors = TempSensor(['board', 'outside']); # First read one dataset for row in get_db_row(c, "temperature", '''WHERE sondname="board"'''): Tsensors.proc_db_row(row); RRD_db = 'temperature.rrd' tgraph = ProbeRRD_Create_temp(RRD_db); for ts in Tsensors.TS_sensors.ps_sensorset['TEMPERATURE']['board']: ProbeRRD_Update(RRD_db, ts); ProbeRRD_Graph_temp(RRD_db); #print("rrdupdate temperature.rrd %s"%(str(ts.toRRD()))) # Now we read data from photoresistor #################### #------- # Light #--------------------------------------------------------------- RRD_db = 'light.rrd'; ProbeRRD_Create_light(RRD_db); Lsensors = LightSensor(["general"], Ldebug=False); for row in get_db_row(c, "light", ''' WHERE desc="general"'''): Lsensors.proc_db_row(row); for ts in Lsensors.LP_sensors.ps_sensorset['LIGHT']['general']: #print("LIGHT) %s" %(str(ts))); ProbeRRD_Update(RRD_db, ts); ProbeRRD_Graph_light('light.rrd'); #--------- # Humidity #------------------------------------------------------------------ RRD_db = 'humidity.rrd'; ProbeRRD_Create_hum(RRD_db); Hsensors = HumiditySensor(["board"], Hdebug=False); for row in get_db_row(c, "humidity", ''' WHERE desc="board"'''): Hsensors.proc_db_row(row); probes_total=0 for ts in Hsensors.HS_sensors.ps_sensorset['HUMIDITY']['board']: ProbeRRD_Update(RRD_db, ts); probes_total+=1; #print("HUMIDITY) %s" %(str(ts))); ProbeRRD_Graph_humidity('humidity.rrd'); return (conn) ############################################################## # main ### DBH = read_db('./mqtt.db'); DBH.close();
bsd-3-clause
3,881,897,543,832,172,000
29.347079
122
0.487204
false
dmsurti/reynolds-blender
reynolds_blender/block_regions.py
1
8435
#------------------------------------------------------------------------------ # Reynolds-Blender | The Blender add-on for Reynolds, an OpenFoam toolbox. #------------------------------------------------------------------------------ # Copyright| #------------------------------------------------------------------------------ # Deepak Surti ([email protected]) # Prabhu R (IIT Bombay, [email protected]) # Shivasubramanian G (IIT Bombay, [email protected]) #------------------------------------------------------------------------------ # License # # This file is part of reynolds-blender. # # reynolds-blender is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # reynolds-blender is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License # along with reynolds-blender. If not, see <http://www.gnu.org/licenses/>. #------------------------------------------------------------------------------ # ----------- # bpy imports # ----------- import bpy, bmesh from bpy.props import (StringProperty, BoolProperty, IntProperty, FloatProperty, EnumProperty, PointerProperty, IntVectorProperty, FloatVectorProperty, CollectionProperty ) from bpy.types import (Panel, Operator, PropertyGroup, UIList ) from bpy.path import abspath from mathutils import Matrix, Vector # -------------- # python imports # -------------- import operator import os # ------------------------ # reynolds blender imports # ------------------------ from reynolds_blender.gui.register import register_classes, unregister_classes from reynolds_blender.gui.attrs import set_scene_attrs, del_scene_attrs from reynolds_blender.gui.custom_operator import create_custom_operators from reynolds_blender.gui.renderer import ReynoldsGUIRenderer # ---------------- # reynolds imports # ---------------- from reynolds.dict.parser import ReynoldsFoamDict from reynolds.foam.cmd_runner import FoamCmdRunner # ------------------------------------------------------------------------ # operators # ------------------------------------------------------------------------ def assign_region(self, context): print("Assigning region") scene = context.scene obj = context.active_object r_faces = [] if scene.select_front_face: r_faces.append("Front") if scene.select_back_face: r_faces.append("Back") if scene.select_top_face: r_faces.append("Top") if scene.select_bottom_face: r_faces.append("Bottom") if scene.select_left_face: r_faces.append("Left") if scene.select_right_face: r_faces.append("Right") print(r_faces) item = scene.bmd_regions[scene.bmd_rindex] region_name = scene.region_name face_str = region_name + " : " + ' '.join(str(f) for f in r_faces) # empty {} for time properties p, U, T etc r = (scene.region_name, scene.region_type, r_faces, {}) item.name = face_str scene.regions[region_name] = r print(scene.regions) # reset selections scene.select_front_face = False scene.select_back_face = False scene.select_top_face = False scene.select_bottom_face = False scene.select_right_face = False scene.select_left_face = False return{'FINISHED'} def remove_region(self, context): scene = context.scene obj = context.active_object print("Before: Removing region") print(scene.bmd_regions) print(scene.bmd_rindex, scene.bmd_regions[scene.bmd_rindex]) item = scene.bmd_regions[scene.bmd_rindex] r_name, _ = item.name.split(" : ", 1) scene.regions.pop(r_name, None) print("After: Removing region") print(scene.bmd_regions) item.name = "" return{'FINISHED'} def load_regions(self, context): print("Initial loading of regions") scene = context.scene if not scene.regions_loaded: item_coll = scene.bmd_regions item_coll .clear() faces = ["Front", "Back", "Top", "Bottom", "Left", "Right"] index = 0 for f in faces: r_faces = [f] item = item_coll.add() scene.bmd_rindex = index region_name = f face_str = region_name + " : " + region_name region_type = 'patch' r = (region_name, region_type, r_faces, {}) item.name = face_str scene.regions[region_name] = r index += 1 scene.regions_loaded = True def assign_time_prop(self, context): print("Assigning time property") scene = context.scene obj = context.active_object # Store the distint time properties if scene.time_props is None: scene.time_props = [] time_prop_type = scene.time_prop_type if not scene.time_prop_type in scene.time_props: scene.time_props.append(time_prop_type) # Store the dimensions for a time property if scene.time_props_dimensions is None: scene.time_props_dimensions = {} if not time_prop_type in scene.time_props_dimensions: scene.time_props_dimensions[time_prop_type] = scene.time_prop_dimensions # Store the internal field for a time property if scene.time_props_internal_field is None: scene.time_props_internal_field = {} if not time_prop_type in scene.time_props_internal_field: scene.time_props_internal_field[time_prop_type] = scene.time_prop_internal_field # Store the time property type and value for the patch item = scene.bmd_regions[scene.bmd_rindex] print ('Select region: ' + item.name) region_name = item.name.split(':')[0].strip() print(' Region data: ') _, _, _, time_prop_info = scene.regions[region_name] if not time_prop_type in time_prop_info: time_prop_info[time_prop_type] = {} time_prop_info[time_prop_type]['type'] = scene.time_prop_patch_type time_prop_info[time_prop_type]['value'] = scene.time_prop_value print(scene.time_props) print(scene.time_props_dimensions) print(scene.time_props_internal_field) for _, r in scene.regions.items(): print(r) return {'FINISHED'} # ------------------------------------------------------------------------ # Panel # ------------------------------------------------------------------------ class BlockMeshRegionsOperator(bpy.types.Operator): bl_idname = "reynolds.of_bmd_regions_panel" bl_label = "Regions" bl_space_type = "VIEW_3D" bl_region_type = "TOOLS" bl_category = "Tools" bl_context = "objectmode" @classmethod def poll(cls, context): return True def execute(self, context): return {'FINISHED'} # Return True to force redraw def check(self, context): return True def invoke(self, context, event): load_regions(self, context) return context.window_manager.invoke_props_dialog(self, width=750) def draw(self, context): layout = self.layout scene = context.scene # --------------------------------------- # Render Block Panel using YAML GUI Spec # --------------------------------------- gui_renderer = ReynoldsGUIRenderer(scene, layout, 'block_regions.yaml') gui_renderer.render() # ------------------------------------------------------------------------ # register and unregister # ------------------------------------------------------------------------ def register(): register_classes(__name__) set_scene_attrs('block_regions.yaml') create_custom_operators('block_regions.yaml', __name__) def unregister(): unregister_classes(__name__) del_scene_attrs('block_regions.yaml') if __name__ == "__main__": register()
gpl-3.0
-8,903,011,082,152,992,000
32.472222
88
0.550919
false
paulscherrerinstitute/pshell
src/main/assembly/script/Lib/sessions.py
1
9398
from startup import get_context, set_exec_pars import ch.psi.utils.SciCat as SciCat import java.lang.Boolean def _sm(): return get_context().sessionManager def session_start(name, metadata=None): """ Starts new session. If a session os open, completes it first. Args: name(str): Session name. metadata(dict): Map of initial metadata parameters If None(Default) use the default metadata definition. Returns: session id (int) """ set_exec_pars(open=False) return _sm().start(name, metadata) def session_complete(): """ Completes current session, if started. """ set_exec_pars(open=False) return _sm().stop() def session_pause(): """ Pauses current session, if started. """ return _sm().pause() def session_resume(): """ Resumes current session, if paused. """ return _sm().resume() def session_cancel(): """ Cancels current session, if started and empty (no generated data). """ return _sm().cancel() def session_restart(id): """ Reopens a completed if not yet archived and if belongs to the same user. Args: id(int): Session id. """ return _sm().restart(id) def session_move(origin, files, dest): """ Moves a list of run files (relative to root) to another session. Sessions must not be archived and belong to the same user. Args: origin(int): Origin session id. files(list): file names dest(int): Destination session id. """ return _sm().move(origin, files, dest) def session_detach(name, id, files): """ Detaches a list of run files (relative to root) to a new session. Session must not be archived and belong to the same user. Args: name(str): Name of new session. id(int): Session id. files(list): file names Returns: New session id (int) """ return _sm().detach(name, id, files) def session_create(name, files, metadata=None, root=None): """ Create a session from existing data files. Args: name(str): Name of new session. files(list): file names relative to root metadata(dict): Map of initial metadata parameters If None(Default) use the default metadata definition. root(str): data root path. If None(Default) uses default data path. Returns: New session id (int) """ return _sm().create(name, files, metadata, root) def session_id(): """ Returns current session id (0 if no session is started). Returns: session id (int) """ return _sm().getCurrentSession() def session_name(): """ Returns current session name ("unknown" if no session is started) Returns: session name(str) """ return _sm().getCurrentName() def session_started(): """ Returns true if a session is started. Returns: bool """ return _sm().isStarted() def session_paused(): """ Returns true if current session is paused. Returns: bool """ return _sm().isPaused() def session_add_file(path): """ Adds additional file to session, if started. Args: path(str): Relative to data path or absolute. """ return _sm().addAdditionalFile(path) def session_ids(): """ Returns list of completed sessions. Returns: list of int """ return _sm().getIDs() def session_get_name(id=None): """ Return the name of a session. Args: id(int): Session id. Default (None) is the current session. Returns: session name (str) """ return _sm().getName() if id is None else _sm().getName(id) def session_get_state(id=None): """ Returns the session state Args: id(int): Session id. Default (None) is the current session. Returns: session state (str) """ return _sm().getState() if id is None else _sm().getState(id) def session_get_start(id=None): """ Returns the start timestamp of a session. Args: id(int): Session id. Default (None) is the current session. Returns: long """ return _sm().getStart() if id is None else _sm().getStart(id) def session_get_stop(id): """ Returns the stop timestamp of a completed session. Args: id(int): Session id. Returns: Timestamp (long) """ return _sm().getStop(id) def session_get_root(id=None): """ Returns the root data path of a session. Args: id(int): Session id. Default (None) is the current session. Returns: str """ return _sm().getRoot() if id is None else _sm().getRoot(id) def session_get_info(id=None): """ Returns the session information. Args: id(int): Session id. Default (None) is the current session. Returns: session info (dict) """ return _sm().getInfo() if id is None else _sm().getInfo(id) def session_get_metadata(id=None): """ Returns a session info metadata. Args: id(int): Session id. Default (None) is the current session. Returns: session metadata (dict) """ return _sm().getMetadata() if id is None else _sm().getMetadata(id) def session_set_metadata(key, value,id=None): """ Set session metadata entry. Args: key(str): Metadata key value(obj): Metadata value id(int): Session id. Default (None) is the current session. """ return _sm().setMetadata(key, value) if id is None else _sm().setMetadata(id,key, value) def session_get_metadata_keys(): """ Return the default metadata definition for samples. Returns: list of map entries """ return [str(e.key) for e in _sm().getMetadataDefinition()] def session_get_metadata_type(key): """ Return the metadata type for a given key: String, Integer, Double, Boolean, List or Map. Args: key(str): Metadata key. Returns: str """ return str(_sm().getMetadataType(key)) def session_get_metadata_default(key): """ Return the metadata default value for a given key. Args: key(str): Metadata key. Returns: Object """ return _sm().getMetadataDefault(key) def session_get_runs(id=None, relative=True): """ Return the runs of a session. Args: id(int): Session id. Default (None) is the current session. relative(bool): if True use relative file names (for files under the data root path) Returns: List of dicts """ return _sm().getRuns(java.lang.Boolean(relative)) if id is None else _sm().getRuns(id, relative) def session_set_run_enabled(enabled, id=None, index=-1): """ Enable or disable a run. Args: enabled(bool): true for enabling, false for disabling id(int): Session id. Default (None) is the current session. index: Index of the run. Default (-1) for the last run. Returns: Object """ return _sm().setRunEnabled(index, enabled) if id is None else _sm().setRunEnabled(id, index, enabled) def session_get_additional_files(id=None, relative=True): """ Return additional files of a session. Args: id(int): Session id. Default (None) is the current session. relative(bool): if True use relative file names (for files under the data root path) Returns: List of str """ return _sm().getAdditionalFiles(java.lang.Boolean(relative)) if id is None else _sm().getAdditionalFiles(id, relative) def session_get_file_list(id=None, relative=True): """ Return complete list of data files of a session. Args: id(int): Session id. Default (None) is the current session. relative(bool): if True use relative file names (for files under the data root path) Returns: List of str """ return _sm().getFileList(java.lang.Boolean(relative)) if id is None else _sm().getFileList(id, relative) def session_create_zip(file_name, id=None, preserve_folder_structure=True): """ Create ZIP file with session contents Args: file_name(str): name of the zip file id(int): Session id. Default (None) is the current session. preserve_folder_structure: if False all data files are added to the root of the file. if True the folder structure under data root is preserved. """ return _sm().createZipFile(file_name, preserve_folder_structure) if id is None else _sm().createZipFile(id, file_name, preserve_folder_structure) def session_ingest_scicat(id, matadata={}): """ Ingest a completed session to SciCat Args: id(int): Session id. matadata(dict): session metadata Returns: Tuple (Dataset Name, Dataset ID) in case of success. Otherwise throws an exception. """ sciCat= SciCat() result = sciCat.ingest(id, matadata) print result.output if not result.success: raise Exception ("Error ingesting session " + str(id)) return result.datasetName, result.datasetId
gpl-3.0
-3,406,247,952,026,302,500
26.005747
149
0.605022
false
agoragames/torus
torus/aggregates.py
1
2228
''' Copyright (c) 2013, Agora Games, LLC All rights reserved. https://github.com/agoragames/torus/blob/master/LICENSE.txt ''' import re STAR = '[a-zA-Z0-9_-]+' class Aggregates(object): ''' Implements the aggregating of stats through pattern matches. ''' def __init__(self, rules=[]): ''' Initialize with a list of aggregate rules. ''' self._rules = [] self.add( rules ) def add(self, rules): ''' Add a set of aggregate rules. ''' for target,source in rules: self._rules.append( Aggregate(source,target) ) def __iter__(self): ''' Return an iterator on the rules. ''' for rule in self._rules: yield rule def match(self, stat): ''' Return the name of any aggregates which should be generated from the stat ''' return filter(None, (r.match(stat) for r in self._rules)) class Aggregate(object): ''' A single aggregate. ''' def __init__(self, source, target): self._count = 0 self._source = source self._target = target source = source.split('.') target = target.split('.') source_pattern = [] for src_comp in source: if src_comp.startswith('<') and src_comp.endswith('>'): source_pattern.append( '(?P%s%s)'%(src_comp, STAR) ) elif src_comp=='*': source_pattern.append( STAR ) else: source_pattern.append( src_comp ) source_pattern = "\.".join( source_pattern ) self._pattern = re.compile('^%s$'%(source_pattern)) target_format = [] for target_comp in target: if target_comp.startswith('<') and target_comp.endswith('>'): name = target_comp[1:-1] target_format.append( '%%(%s)s'%( name ) ) else: target_format.append( target_comp ) self._target_format = '.'.join( target_format ) @property def source(self): return self._source @property def count(self): return self._count def __repr__(self): return self._source def match(self, stat): ''' If the stat matches, return the name of the aggregate, else return None ''' res = self._pattern.match(stat) if res: self._count += 1 return self._target_format%res.groupdict() return None
bsd-3-clause
4,730,220,539,690,182,000
22.702128
77
0.597846
false
alessandrothea/ginger
ginger/plotter/smallstyle.py
1
2513
import ROOT from .stylebase import _baseratiostyle # -------------------------------------------------------------------------- class smallstyle(_baseratiostyle): '''Class holding all ratio-plot style settings for small plots ''' # ---------------------------------------------------------------------- @classmethod def __lazy_init__(cls): if hasattr(self, 'plotratio'): return from ROOT import kRed, kOrange, kAzure from ROOT import kFullCircle, kOpenCircle cls.scalemax = 1. cls.scalemin = 1. cls.ltitle = '' cls.rtitle = '' cls.ytitle2 = 'ratio' cls.colors = [kRed + 1 , kOrange + 7 , kAzure - 6 , kAzure + 9 , kOrange + 7 , kOrange - 2] cls.markers = [kFullCircle , kOpenCircle , kFullCircle , kOpenCircle , kFullCircle , kFullCircle] cls.fills = [0 , 0 , 0 , 0 , 0 , 0 ] cls.plotratio = True # lenghts cls.left = 50 cls.right = 35 cls.top = 35 cls.bottom = 50 cls.gap = 5 cls.width = 250 cls.heighttop = 250 cls.heightbot = 100 cls.linewidth = 1 cls.markersize = 5 cls.textsize = 15 cls.titley = 30 cls.legmargin = 12 cls.legboxsize = 25 cls.legtextsize = 15 cls.axsty = { 'labelfamily' : 4, 'labelsize' : 15, 'labeloffset' : 2, 'titlefamily' : 4, 'titlesize' : 15, 'titleoffset' : 35, 'ticklength' : 10, 'ndivisions' : 505, } cls.xaxsty = {} cls.yaxsty = {} cls.errsty = 3005 cls.errcol = ROOT.kGray + 1 cls.logx = False cls.logy = False cls.morelogx = False cls.morelogy = False cls.userrangex = (0., 0.) cls.yrange = (0., 0.) # something more active cls._legalign = ('l', 't') # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- def __lazy_init__(self): self.__lazy_init__() # ---------------------------------------------------------------------- # --------------------------------------------------------------------------
gpl-3.0
4,515,272,852,488,685,000
28.916667
107
0.374453
false
crosscent/terrarium
terrarium/utility/pot.py
1
6813
import math class Cluster(object): """Cluster class The basic class of a cluster of seeds. Attributes: radius: The radius of a cluster of seeds """ def __init__(self, radius): self.radius = radius self.area = math.pi * radius * radius def get_radius(self): return self.radius class Pot(object): """Pot class The basic class of a pot for planting. Attributes: radius: The radius of the pot """ def __init__(self): """Initialize the Pot class with cluster """ self.cluster = [] def add_cluster(self, cluster): """Add a cluster of seeds to the pot Args: cluster: A cluster instance """ self.cluster.append(cluster) class CircularPot(Pot): """Class for a circular pot """ def __init__(self, radius): """Initializes radius and area """ super(CircularPot, self).__init__() self.radius = radius self.area = math.pi * radius * radius def get_radius(self): """Returns the radius of the pot """ return self.radius def cluster_calculation(self, seed_cluster_radius, radius_difference): """Returns the maximum number of clusters of the type of seed inputted that can be planted in the Pot. This algorithm is taken from The Engineering ToolBox """ # calculate the number of clusters number_of_circles = math.floor((2 * math.pi * radius_difference) / (2 * seed_cluster_radius)) if radius_difference == 0: self.optimal_clusters.append({"radius": seed_cluster_radius, "x": 0, "y": 0}) return 1 x0 = radius_difference * math.cos(0 * 2 * math.pi / number_of_circles) x1 = radius_difference * math.cos(1 * 2 * math.pi / number_of_circles) y0 = radius_difference * math.sin(0 * 2 * math.pi / number_of_circles) y1 = radius_difference * math.sin(1 * 2 * math.pi / number_of_circles) distance = math.pow((math.pow(x0 - x1, 2)) + (math.pow(y0 - y1, 2)), 0.5) if distance < 2 * seed_cluster_radius: number_of_circles -=1 # add cluster to optimal_clusters for image manipulation for i in xrange(int(number_of_circles)): self.optimal_clusters.append({"radius": seed_cluster_radius, "x": radius_difference * math.cos(i*2*math.pi/number_of_circles), "y": radius_difference * math.sin(i*2*math.pi/number_of_circles) }) new_radius_difference = radius_difference - (2 * seed_cluster_radius) if new_radius_difference >= seed_cluster_radius: return number_of_circles + self.cluster_calculation(seed_cluster_radius, new_radius_difference) elif radius_difference > (2 * seed_cluster_radius): self.optimal_clusters.append({"radius": seed_cluster_radius, "x": 0, "y": 0}) return number_of_circles + 1 else: return number_of_circles def num_cluster_available(self, cluster): """Returns the maximum number of clusters of the type of seed inputted that can be planted in the given Pot. """ self.optimal_clusters = [] if self.get_radius() < cluster.get_radius(): return (0, self.optimal_clusters) radius_difference = self.get_radius() - cluster.get_radius() return (int(self.cluster_calculation(cluster.get_radius(), radius_difference)), self.optimal_clusters) class RectangularPot(Pot): """ Class for rectangular pot """ def __init__(self, length, width): """Initializes area, length, and width """ super(RectangularPot, self).__init__() self.length = length self.width = width self.area = length * width def get_length(self): """Return length of the pot """ return self.length def get_width(self): """Return width of the pot """ return self.width def cluster_calculation(self, seed_cluster_radius): # calculate the number of clusters seed_cluster_x_count = int(self.get_length() / (seed_cluster_radius * 2)) seed_cluster_y_count = int(self.get_width() / (seed_cluster_radius* 2)) # add cluster to .optimal_clusters for image manipulation for x_count in xrange(seed_cluster_x_count): for y_count in xrange(seed_cluster_y_count): self.optimal_clusters.append({"radius": seed_cluster_radius, "x": seed_cluster_radius + (x_count * seed_cluster_radius * 2), "y": seed_cluster_radius + (y_count * seed_cluster_radius * 2) }) return seed_cluster_x_count * seed_cluster_y_count def num_cluster_available(self, cluster): """Returns the maximum number of clusters of the type of seed inputted that can be planted in the given Pot. """ self.optimal_clusters = [] if (self.get_length() < cluster.get_radius() or self.get_width < cluster.get_radius()): return (0, self.optimal_clusters) return (self.cluster_calculation(cluster.get_radius()), self.optimal_clusters) def circular_pot_calculation(pot_radius, cluster_radius): """Calculate the number of seed clusters that can be planted in a given pot Sometimes we want to see how many clusters of seeds we can plant in a large circular pot for a home garden, and this is useful for determining that. Args: pot_radius: Radius of a pot cluster_radius: Optimal radius of a cluster of seeds Returns: A tuple (cluster_count, cluster_location) """ pot = CircularPot(radius=float(pot_radius)) seeds = Cluster(radius=float(cluster_radius)) return pot.num_cluster_available(seeds) def rectangular_pot_calculation(pot_length, pot_width, cluster_radius): """Calculate the number of seed clusters that can be planted in a rectangular pot Args: pot_length: the length of the pot pot_width: the width of the pot cluster_radius: optimal radius of a cluster of seeds Returns: A tuple (cluster_count, cluster_location) """ pot = RectangularPot(float(pot_length), float(pot_width)) seeds = Cluster(radius=float(cluster_radius)) return pot.num_cluster_available(seeds)
bsd-3-clause
-2,459,453,519,322,078,000
34.857895
109
0.57948
false
markjeee/gammu
python/gammu/exception.py
1
1171
# -*- coding: UTF-8 -*- # vim: expandtab sw=4 ts=4 sts=4: ''' Gammu exceptions. ''' __author__ = 'Michal Čihař' __email__ = '[email protected]' __license__ = ''' Copyright © 2003 - 2010 Michal Čihař This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ''' import gammu._gammu # Import base exception from gammu._gammu import GSMError # Import all exceptions for _name in dir(gammu._gammu): if not _name.startswith('ERR_'): continue _temp = __import__('gammu._gammu', globals(), locals(), [_name], -1) locals()[_name] = getattr(_temp, _name) # Cleanup del _name del _temp del gammu
gpl-2.0
9,202,723,884,486,397,000
28.15
76
0.716981
false
ethanluoyc/statsnba-playbyplay
tests/models/conftest.py
1
3194
import pytest import sys from os import path from statsnba.models import Game SAMPLEDATA_DIR = path.join(path.dirname(__file__), 'sample_data/') def pytest_configure(config): sys._called_from_test = True def pytest_unconfigure(config): del sys._called_from_test def pytest_collect_file(parent, path): if path.ext == ".yml" and path.basename.startswith("test"): return YamlFile(path, parent) @pytest.fixture(scope='session') def sample_boxscore(): import json with open(path.join(SAMPLEDATA_DIR, 'sample_boxscore.json')) as f: return json.load(f) @pytest.fixture(scope='session') def sample_playbyplay(): import json with open(path.join(SAMPLEDATA_DIR, 'sample_playbyplay.json')) as f: return json.load(f) @pytest.fixture(scope='session') def sample_game_id(): return '0020901030' @pytest.fixture(scope='session') def game(sample_game_id, sample_playbyplay, sample_boxscore): return Game(sample_game_id, sample_boxscore, sample_playbyplay) class YamlFile(pytest.File): def collect(self): # Our spec is tree like, so we have to run recursively till bottom import yaml import json with open(path.join(SAMPLEDATA_DIR, 'sample_playbyplay.json'), 'r') as f: playbyplay = json.load(f) with open(path.join(SAMPLEDATA_DIR, 'sample_boxscore.json')) as f: boxscore = json.load(f) raw = yaml.safe_load(self.fspath.open()) tests = [] for name, spec in raw.items(): game = Game('0020901030', boxscore=boxscore, playbyplays=playbyplay) tests.append(YamlCollector(self, spec, name, game)) return tests class YamlCollector(pytest.Collector): def __init__(self, parent, target, name, ctx): super(YamlCollector, self).__init__(name, parent=parent) self.ctx = ctx self.target = target def _get_ctx_val(self, key): # Some fields are recorded as dicts, so use this to retrieve value if isinstance(self.ctx, dict): return self.ctx[key] else: return getattr(self.ctx, key) def collect(self): tests = [] for name, value in self.target.items(): if not isinstance(value, dict): tests.append(YamlItem(name, self, value, self._get_ctx_val(name))) else: tests = tests + YamlCollector(self, value, name, getattr(self.ctx, name)).collect() return tests class YamlItem(pytest.Item): # The atomic test def __init__(self, name, parent, expected, actual): super(YamlItem, self).__init__(name, parent) self.expected = expected self.actual = actual def runtest(self): assert self.expected == self.actual @pytest.fixture(autouse=True) def use_pytest_tmp_dir(monkeypatch, tmpdir_factory): tmp_dir = tmpdir_factory.getbasetemp() monkeypatch.setattr('tempfile.mkdtemp', lambda: str(tmp_dir)) return tmp_dir @pytest.fixture(scope='session', autouse=True) def use_requests_cache(): import requests_cache requests_cache.install_cache('test_cache')
mit
-5,725,707,007,558,226,000
27.265487
82
0.636819
false
gltn/stdm
stdm/ui/customcontrols/multi_select_view.py
1
6046
""" /*************************************************************************** Name : MultipleSelectTreeView Description : Custom QListView implementation that displays checkable items. Date : 21/June/2016 copyright : (C) 2016 by UN-Habitat and implementing partners. See the accompanying file CONTRIBUTORS.txt in the root email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from qgis.PyQt.QtCore import ( Qt ) from qgis.PyQt.QtGui import ( QStandardItem, QStandardItemModel, ) from qgis.PyQt.QtWidgets import ( QAbstractItemView, QListView ) from stdm.data.configuration import entity_model from stdm.data.configuration.columns import MultipleSelectColumn class MultipleSelectTreeView(QListView): """ Custom QListView implementation that displays checkable items from a multiple select column type. """ def __init__(self, column, parent=None): """ Class constructor. :param column: Multiple select column object. :type column: MultipleSelectColumn :param parent: Parent widget for the control. :type parent: QWidget """ QListView.__init__(self, parent) # Disable editing of lookup values self.setEditTriggers(QAbstractItemView.NoEditTriggers) self.column = column self._item_model = QStandardItemModel(self) self._value_list = self.column.value_list # Stores lookup objects based on primary keys self._lookup_cache = {} self._initialize() self._association = self.column.association self._first_parent_col = self._association.first_reference_column.name self._second_parent_col = self._association.second_reference_column.name # Association model self._assoc_cls = entity_model(self._association) def reset_model(self): """ Resets the item model. """ self._item_model.clear() self._item_model.setColumnCount(2) def clear(self): """ Clears all items in the model. """ self._item_model.clear() @property def association(self): """ :return: Returns the association object corresponding to the column. :rtype: AssociationEntity """ return self._association @property def value_list(self): """ :return: Returns the ValueList object corresponding to the configured column object. :rtype: ValueList """ return self._value_list @property def item_model(self): """ :return: Returns the model corresponding to the checkable items. :rtype: QStandardItemModel """ return self._item_model def _add_item(self, id, value): """ Adds a row corresponding to id and corresponding value from a lookup table. :param id: Primary key of a lookup record. :type id: int :param value: Lookup value :type value: str """ value_item = QStandardItem(value) value_item.setCheckable(True) id_item = QStandardItem(str(id)) self._item_model.appendRow([value_item, id_item]) def _initialize(self): # Populate list with lookup items self.reset_model() # Add all lookup values in the value list table vl_cls = entity_model(self._value_list) if not vl_cls is None: vl_obj = vl_cls() res = vl_obj.queryObject().all() for r in res: self._lookup_cache[r.id] = r self._add_item(r.id, r.value) self.setModel(self._item_model) def clear_selection(self): """ Unchecks all items in the view. """ for i in range(self._item_model.rowCount()): value_item = self._item_model.item(i, 0) if value_item.checkState() == Qt.Checked: value_item.setCheckState(Qt.Unchecked) if value_item.rowCount() > 0: value_item.removeRow(0) def selection(self): """ :return: Returns a list of selected items. :rtype: list """ selection = [] for i in range(self._item_model.rowCount()): value_item = self._item_model.item(i, 0) if value_item.checkState() == Qt.Checked: id_item = self._item_model.item(i, 1) id = int(id_item.text()) # Get item from the lookup cache and append to selection if id in self._lookup_cache: lookup_rec = self._lookup_cache[id] selection.append(lookup_rec) return selection def set_selection(self, models): """ Checks items corresponding to the specified models. :param models: List containing model values in the view for selection. :type models: list """ for m in models: search_value = m.value v_items = self._item_model.findItems(search_value) # Loop through result and check items for vi in v_items: if vi.checkState() == Qt.Unchecked: vi.setCheckState(Qt.Checked)
gpl-2.0
-4,508,056,281,506,664,000
30.821053
80
0.536553
false
psychopy/versions
psychopy/experiment/components/static/__init__.py
1
9050
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Part of the PsychoPy library Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd. Distributed under the terms of the GNU General Public License (GPL). """ from __future__ import absolute_import, print_function from builtins import str from os import path from psychopy.experiment.components import BaseComponent, Param, _translate __author__ = 'Jon Peirce' # the absolute path to the folder containing this path thisFolder = path.abspath(path.dirname(__file__)) iconFile = path.join(thisFolder, 'static.png') tooltip = _translate('Static: Static screen period (e.g. an ISI). ' 'Useful for pre-loading stimuli.') _localized = {'Custom code': _translate('Custom code')} class StaticComponent(BaseComponent): """A Static Component, allowing frame rendering to pause. E.g., pause while disk is accessed for loading an image """ # override the categories property below # an attribute of the class, determines the section in the components panel categories = ['Custom'] def __init__(self, exp, parentName, name='ISI', startType='time (s)', startVal=0.0, stopType='duration (s)', stopVal=0.5, startEstim='', durationEstim=''): BaseComponent.__init__(self, exp, parentName, name=name) self.updatesList = [] # a list of dicts {compParams, fieldName} self.type = 'Static' self.url = "http://www.psychopy.org/builder/components/static.html" hnt = _translate( "Custom code to be run during the static period (after updates)") self.params['code'] = Param("", valType='code', hint=hnt, label=_localized['Custom code']) self.order = ['name'] # make name come first (others don't matter) hnt = _translate("How do you want to define your start point?") self.params['startType'] = Param(startType, valType='str', allowedVals=['time (s)', 'frame N'], hint=hnt) hnt = _translate("How do you want to define your end point?") _allow = ['duration (s)', 'duration (frames)', 'time (s)', 'frame N'] self.params['stopType'] = Param(stopType, valType='str', allowedVals=_allow, # copy not needed hint=hnt) hnt = _translate("When does the component start?") self.params['startVal'] = Param(startVal, valType='code', allowedTypes=[], hint=hnt) hnt = _translate("When does the component end? (blank is endless)") self.params['stopVal'] = Param(stopVal, valType='code', allowedTypes=[], updates='constant', allowedUpdates=[], hint=hnt) hnt = _translate("(Optional) expected start (s), purely for " "representing in the timeline") self.params['startEstim'] = Param(startEstim, valType='code', allowedTypes=[], hint=hnt) hnt = _translate("(Optional) expected duration (s), purely for " "representing in the timeline") self.params['durationEstim'] = Param(durationEstim, valType='code', allowedTypes=[], hint=hnt) def addComponentUpdate(self, routine, compName, fieldName): self.updatesList.append({'compName': compName, 'fieldName': fieldName, 'routine': routine}) def remComponentUpdate(self, routine, compName, fieldName): # have to do this in a loop rather than a simple remove target = {'compName': compName, 'fieldName': fieldName, 'routine': routine} for item in self.updatesList: if item == target: self.updatesList.remove(item) def writeInitCode(self, buff): code = ("%(name)s = clock.StaticPeriod(win=win, " "screenHz=expInfo['frameRate'], name='%(name)s')\n") buff.writeIndented(code % self.params) def writeFrameCode(self, buff): self.writeStartTestCode(buff) # to get out of the if statement buff.setIndentLevel(-1, relative=True) self.writeStopTestCode(buff) def writeStartTestCode(self, buff): """This will be executed as the final component in the routine """ buff.writeIndented("# *%s* period\n" % (self.params['name'])) BaseComponent.writeStartTestCode(self, buff) if self.params['stopType'].val == 'time (s)': durationSecsStr = "%(stopVal)s-t" % (self.params) elif self.params['stopType'].val == 'duration (s)': durationSecsStr = "%(stopVal)s" % (self.params) elif self.params['stopType'].val == 'duration (frames)': durationSecsStr = "%(stopVal)s*frameDur" % (self.params) elif self.params['stopType'].val == 'frame N': durationSecsStr = "(%(stopVal)s-frameN)*frameDur" % (self.params) else: msg = ("Couldn't deduce end point for startType=%(startType)s, " "stopType=%(stopType)s") raise Exception(msg % self.params) vals = (self.params['name'], durationSecsStr) buff.writeIndented("%s.start(%s)\n" % vals) def writeStopTestCode(self, buff): """Test whether we need to stop """ code = ("elif %(name)s.status == STARTED: # one frame should " "pass before updating params and completing\n") buff.writeIndented(code % self.params) buff.setIndentLevel(+1, relative=True) # entered an if statement self.writeParamUpdates(buff) code = "%(name)s.complete() # finish the static period\n" buff.writeIndented(code % self.params) # Calculate stop time if self.params['stopType'].val == 'time (s)': code = "%(name)s.tStop = %(stopVal)s # record stop time\n" elif self.params['stopType'].val == 'duration (s)': code = "%(name)s.tStop = %(name)s.tStart + %(stopVal)s # record stop time\n" elif self.params['stopType'].val == 'duration (frames)': code = "%(name)s.tStop = %(name)s.tStart + %(stopVal)s*frameDur # record stop time\n" elif self.params['stopType'].val == 'frame N': code = "%(name)s.tStop = %(stopVal)s*frameDur # record stop time\n" else: msg = ("Couldn't deduce end point for startType=%(startType)s, " "stopType=%(stopType)s") raise Exception(msg % self.params) # Store stop time buff.writeIndented(code % self.params) # to get out of the if statement buff.setIndentLevel(-1, relative=True) # pass # the clock.StaticPeriod class handles its own stopping def writeParamUpdates(self, buff, updateType=None, paramNames=None): """Write updates. Unlike most components, which us this method to update themselves, the Static Component uses this to update *other* components """ if updateType == 'set every repeat': return # the static component doesn't need to change itself if len(self.updatesList): code = "# updating other components during *%s*\n" buff.writeIndented(code % self.params['name']) for update in self.updatesList: # update = {'compName':compName,'fieldName':fieldName, # 'routine':routine} compName = update['compName'] fieldName = update['fieldName'] routine = self.exp.routines[update['routine']] if hasattr(compName, 'params'): prms = compName.params # it's already a compon so get params else: # it's a name so get compon and then get params prms = self.exp.getComponentFromName(str(compName)).params self.writeParamUpdate(buff, compName=compName, paramName=fieldName, val=prms[fieldName], updateType=prms[fieldName].updates, params=prms) code = "# component updates done\n" # Write custom code if self.params['code']: code += ("# Adding custom code for {name}\n" "{code}\n".format(name=self.params['name'], code=self.params['code'])) buff.writeIndentedLines(code)
gpl-3.0
-2,184,813,839,990,873,900
47.138298
98
0.550276
false
wdm0006/git-pandas
examples/lifeline.py
1
2187
from gitpandas import Repository import numpy as np import lifelines import matplotlib.pyplot as plt plt.style.use('ggplot') __author__ = 'willmcginnis' if __name__ == '__main__': threshold = 100 repo = Repository(working_dir='git://github.com/scikit-learn/scikit-learn.git', verbose=True) fch = repo.file_change_history(limit=None, include_globs=['*.py']) fch['file_owner'] = '' fch['refactor'] = 0 fch['timestamp'] = fch.index.astype(np.int64) // (24 * 3600 * 10**9) fch['observed'] = False fch = fch.reindex() fch = fch.reset_index() # add in the file owner and whether or not each item is a refactor for idx, row in fch.iterrows(): fch.set_value(idx, 'file_owner', repo.file_owner(row.rev, row.filename)) if abs(row.insertions - row.deletions) > threshold: fch.set_value(idx, 'refactor', 1) else: fch.set_value(idx, 'refactor', 0) # add in the time since column fch['time_until_refactor'] = 0 for idx, row in fch.iterrows(): ts = None chunk = fch[(fch['timestamp'] > row.timestamp) & (fch['refactor'] == 1) & (fch['filename'] == row.filename)] if chunk.shape[0] > 0: ts = chunk['timestamp'].min() fch.set_value(idx, 'observed', True) else: ts = fch['timestamp'].max() fch.set_value(idx, 'time_until_refactor', ts - row.timestamp) # fch.to_csv('lifelines_data_t_%s.csv' % (threshold, )) # fch = pd.read_csv('lifelines_data_t_%s.csv' % (threshold, )) # plot out some survival curves fig = plt.figure() ax = plt.subplot(111) for filename in set(fch['file_owner'].values): sample = fch[fch['file_owner'] == filename] if sample.shape[0] > 500: print('Evaluating %s' % (filename, )) kmf = lifelines.KaplanMeierFitter() kmf.fit(sample['time_until_refactor'].values, event_observed=sample['observed'], timeline=list(range(365)), label=filename) ax = kmf.survival_function_.plot(ax=ax) plt.title('Survival function of file owners (thres=%s)' % (threshold, )) plt.xlabel('Lifetime (days)') plt.show()
bsd-3-clause
-6,093,378,501,094,915,000
34.852459
135
0.600366
false
psychopy/psychopy
psychopy/visual/backends/_base.py
1
14757
#!/usr/bin/env python # -*- coding: utf-8 -*- # Part of the PsychoPy library # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd. # Distributed under the terms of the GNU General Public License (GPL). """A Backend class defines the core low-level functions required by a Window class, such as the ability to create an OpenGL context and flip the window. Users simply call visual.Window(..., winType='pyglet') and the winType is then used by backends.getBackend(winType) which will locate the appropriate class and initialize an instance using the attributes of the Window. """ from __future__ import absolute_import, print_function import weakref from abc import ABC, abstractmethod import numpy as np from psychopy import logging from psychopy.tools.attributetools import attributeSetter class BaseBackend(ABC): """The backend abstract base class that defines all the core low-level functions required by a :class:`~psychopy.visual.Window` class. Such functions as the ability to create an OpenGL context, process events, and flip the window. Sub-classes of this function must implement the abstract methods shown here to be complete. Users simply call visual.Window(..., winType='pyglet') and the `winType` is then used by `backends.getBackend(winType)` which will locate the appropriate class and initialize an instance using the attributes of the Window. """ # define GL here as a class attribute that includes all the opengl funcs # e.g. GL = pyglet.gl # define the name of the backend, used to register the name to use when # specifying `winType` # e.g. winTypeName = 'custom' def __init__(self, win): """Set up the backend window according the params of the PsychoPy win Before PsychoPy 1.90.0 this code was executed in Window._setupPyglet() :param: win is a PsychoPy Window (usually not fully created yet) """ self.win = win # this will use the @property to make/use a weakref super().__init__() @abstractmethod def swapBuffers(self): """Set the gamma table for the graphics card """ raise NotImplementedError( "Backend has failed to override a necessary method") @abstractmethod def setCurrent(self): """Sets this window to be the current rendering target (for backends where 2 windows are permitted, e.g. not pygame) """ pass @attributeSetter def gamma(self, gamma): """Set the gamma table for the graphics card :param gamma: a single value or a triplet for separate RGB gamma values """ self.__dict__['gamma'] = gamma raise NotImplementedError( "Backend has failed to override a necessary method") @attributeSetter def gammaRamp(self, gammaRamp): """Gets the gamma ramp or sets it to a new value (an Nx3 or Nx1 array) """ self.__dict__['gammaRamp'] = gammaRamp raise NotImplementedError( "Backend has failed to override a necessary method") @property def shadersSupported(self): """This is a read-only property indicating whether or not this backend supports OpenGL shaders""" raise NotImplementedError( "Backend has failed to override a necessary method") # Optional, depending on backend needs def dispatchEvents(self): """This method is not needed for all backends but for engines with an event loop it may be needed to pump for new events (e.g. pyglet) """ logging.warning("dispatchEvents() method in {} was called " "but is not implemented. Is it needed?" .format(self.win.winType) ) def onResize(self, width, height): """A method that will be called if the window detects a resize event """ logging.warning("dispatchEvents() method in {} was called " "but is not implemented. Is it needed?" .format(self.win.winType) ) # Helper methods that don't need converting @property def win(self): """The PsychoPy Window that this backend is supporting, which provides various important variables (like size, units, color etc). NB win is stored as a weakref to a psychopy.window and this property helpfully converts it back to a regular object so you don't need to think about it! """ ref = self.__dict__['win'] return ref() @win.setter def win(self, win): """The PsychoPy Window that this backend is supporting, which provides various important variables (like size, units, color etc). NB win is stored as a weakref to a psychopy.window and this property helpfully converts it back to a regular object so you don't need to think about it! """ self.__dict__['win'] = weakref.ref(win) @property def autoLog(self): """If the window has logging turned on then backend should too""" return self.win.autoLog @property def name(self): """Name of the backend is only used for logging purposes""" return "{}_backend".format(self.win.name) # -------------------------------------------------------------------------- # Window unit conversion # def _windowToBufferCoords(self, pos): """Convert window coordinates to OpenGL buffer coordinates. The standard convention for window coordinates is that the origin is at the top-left corner. The `y` coordinate increases in the downwards direction. OpenGL places the origin at bottom left corner, where `y` increases in the upwards direction. Parameters ---------- pos : ArrayLike Position `(x, y)` in window coordinates. Returns ------- ndarray Position `(x, y)` in buffer coordinates. """ # This conversion is typical for many frameworks. If the framework uses # some other convention, that backend class should override this method # to ensure `_windowToPixCoords` returns the correct value. # return np.array((pos[0], self.win.size[1] - pos[1]), dtype=np.float32) def _bufferToWindowCoords(self, pos): """OpenGL buffer coordinates to window coordinates. This is the inverse of `_windowToBufferCoords`. Parameters ---------- pos : ArrayLike Position `(x, y)` in window coordinates. Returns ------- ndarray Position `(x, y)` in buffer coordinates. """ # This conversion is typical for many frameworks. If the framework uses # some other convention, that backend class should override this method # to ensure `_windowToPixCoords` returns the correct value. # return np.array((pos[0], -pos[1] + self.win.size[1]), dtype=np.float32) def _windowCoordsToPix(self, pos): """Convert window coordinates to the PsychoPy 'pix' coordinate system. This puts the origin at the center of the window. Parameters ---------- pos : ArrayLike Position `(x, y)` in window coordinates. Returns ------- ndarray Position `(x, y)` in PsychoPy pixel coordinates. """ return np.asarray(self._windowToBufferCoords(pos) - self.win.size / 2.0, dtype=np.float32) def _pixToWindowCoords(self, pos): """Convert PsychoPy 'pix' to the window coordinate system. This is the inverse of `_windowToPixCoords`. Parameters ---------- pos : ArrayLike Position `(x, y)` in PsychoPy pixel coordinates. Returns ------- ndarray Position `(x, y)` in window coordinates. """ return self._bufferToWindowCoords( np.asarray(pos, dtype=np.float32) + self.win.size / 2.0) # -------------------------------------------------------------------------- # Mouse related methods (e.g., event handlers) # # These methods are used to handle mouse events. Each function is bound to # the appropriate callback which registers the mouse event with the global # mouse event handler (psychopy.hardware.mouse.Mouse). Each callback has an # `*args` parameter which allows the backend to pass whatever parameters. # @abstractmethod def onMouseButton(self, *args, **kwargs): """Event handler for any mouse button event (pressed and released). This is used by backends which combine both button state changes into a single event. Usually this would pass events to the appropriate `onMouseButtonPress` and `onMouseButtonRelease` methods. """ raise NotImplementedError( "`onMouseButton` is not yet implemented for this backend.") @abstractmethod def onMouseButtonPress(self, *args, **kwargs): """Event handler for mouse press events. This handler can also be used for release events if the backend passes all button events to the same callback. """ raise NotImplementedError( "`onMouseButtonPress` is not yet implemented for this backend.") @abstractmethod def onMouseButtonRelease(self, *args, **kwargs): """Event handler for mouse release events.""" raise NotImplementedError( "`onMouseButtonRelease` is not yet implemented for this backend.") @abstractmethod def onMouseScroll(self, *args, **kwargs): """Event handler for mouse scroll events. Called when the mouse scroll wheel is moved.""" raise NotImplementedError( "`onMouseScroll` is not yet implemented for this backend.") @abstractmethod def onMouseMove(self, *args, **kwargs): """Event handler for mouse move events.""" raise NotImplementedError( "`onMouseMove` is not yet implemented for this backend.") @abstractmethod def onMouseEnter(self, *args, **kwargs): """Event called when the mouse enters the window. Some backends might combine enter and leave events to the same callback, this will handle both if so. """ raise NotImplementedError( "`onMouseEnter` is not yet implemented for this backend.") @abstractmethod def onMouseLeave(self, *args, **kwargs): """Event called when a mouse leaves the window.""" raise NotImplementedError( "`onMouseLeave` is not yet implemented for this backend.") @abstractmethod def getMousePos(self): """Get the position of the mouse on the current window. Returns ------- ndarray Position `(x, y)` in window coordinates. """ raise NotImplementedError( "`getMousePos` is not yet implemented for this backend.") @abstractmethod def setMousePos(self, pos): """Set/move the position of the mouse on the current window. Parameters ---------- pos : ArrayLike Position `(x, y)` in window coordinates. """ raise NotImplementedError( "`setMousePos` is not yet implemented for this backend.") def setMouseType(self, name='arrow'): """Change the appearance of the cursor for this window. Cursor types provide contextual hints about how to interact with on-screen objects. **Deprecated!** Use `setMouseCursor` instead. Parameters ---------- name : str Type of standard cursor to use. """ self.setMouseCursor(name) @abstractmethod def setMouseCursor(self, cursorType='default'): """Change the appearance of the cursor for this window. Cursor types provide contextual hints about how to interact with on-screen objects. The graphics used 'standard cursors' provided by the operating system. They may vary in appearance and hot spot location across platforms. The following names are valid on most platforms: * ``arrow`` or ``default`` : Default system pointer. * ``ibeam`` or ``text`` : Indicates text can be edited. * ``crosshair`` : Crosshair with hot-spot at center. * ``hand`` : A pointing hand. * ``hresize`` : Double arrows pointing horizontally. * ``vresize`` : Double arrows pointing vertically. * ``help`` : Arrow with a question mark beside it (Windows only). * ``no`` : 'No entry' sign or circle with diagonal bar. * ``size`` : Vertical and horizontal sizing. * ``downleft`` or ``upright`` : Double arrows pointing diagonally with positive slope (Windows only). * ``downright`` or ``upleft`` : Double arrows pointing diagonally with negative slope (Windows only). * ``lresize`` : Arrow pointing left (Mac OS X only). * ``rresize`` : Arrow pointing right (Mac OS X only). * ``uresize`` : Arrow pointing up (Mac OS X only). * ``dresize`` : Arrow pointing down (Mac OS X only). * ``wait`` : Hourglass (Windows) or watch (Mac OS X) to indicate the system is busy. * ``waitarrow`` : Hourglass beside a default pointer (Windows only). In cases where a cursor is not supported, the default for the system will be used. Parameters ---------- cursorType : str Type of standard cursor to use. If not specified, `'default'` is used. Notes ----- * On some platforms the 'crosshair' cursor may not be visible on uniform grey backgrounds. """ raise NotImplementedError( "`setMouseCursor` is not yet implemented for this backend.") @abstractmethod def setMouseVisibility(self, visible): """Set mouse visibility. Parameters ---------- visible : bool Mouse visibility mode. """ raise NotImplementedError( "`setMouseVisibility` is not yet implemented for this backend.") @abstractmethod def setMouseExclusive(self, exclusive): """Set mouse exclusivity. Parameters ---------- exclusive : bool Mouse exclusivity mode. """ raise NotImplementedError( "`setMouseExclusive` is not yet implemented for this backend.") if __name__ == "__main__": pass
gpl-3.0
-6,047,814,411,506,508,000
34.473558
80
0.61564
false
rionbr/transparencia_harvester
transparencia_harvester/orgao.py
1
2003
# -*- coding: utf-8 -*- """ Orgão =================== """ # Copyright (C) 2016 by # Rion Brattig Correia <[email protected]> # All rights reserved. # MIT license. import numpy as np import pandas as pd import requests from lxml import etree import urlparse # from db import DB __name__ = 'orgao' __author__ = """\n""".join(['Rion Brattig Correia <[email protected]>']) __all__ = ['Orgao'] # # # class Orgao(object): """ """ def __init__(self, verbose=False): self.url = 'http://www.portaldatransparencia.gov.br/servidores/OrgaoExercicio-ListaOrgaos.asp?CodOS=%d&Pagina=%d' self.verbose = verbose def harvest(self, codOS): """ """ # # Collect the first page, and identify how many pages there are # page = requests.get(self.url % (codOS, 1)) tree = etree.HTML(page.content, etree.HTMLParser(encoding="windows-1252")) _, n_pages = tree.xpath('//div[@id="paginacao"]/p[@class="paginaAtual"]')[0].text.split('/') if self.verbose: print 'Pages found: %d' % (int(n_pages)) # # Loop all Pages to insert information into DB # for i_page in np.arange(1, int(n_pages)+1): if self.verbose: print 'Page: %d/%d' % (i_page, int(n_pages)) if i_page>1: page = requests.get(self.url % (codOS, i_page)) tree = etree.HTML(page.content, etree.HTMLParser(encoding="windows-1252")) lista = tree.xpath('//div[@id="listagem"]/table')[0] items = [] for i, item in enumerate(lista, start=0): if i ==0: continue _id, name, n_servidores = item.xpath('td') items.append( (int(_id.text), codOS, name[0].text.title(), int(n_servidores.text)) ) # To DataFrame df = pd.DataFrame(items, columns=['id_orgao','id_orgao_superior','name','n_servidores']) # Insert into MySQL status = DB().DataFrameToMySQL(df, table='orgao') def getDF(self): """ """ return DB().MySQLToDataFrame("SELECT * from orgao", index_col='id_orgao') # # # if __name__ == '__main__': Orgao(verbose=True).harvest(codOS=15000)
mit
-1,102,174,417,294,426,800
20.526882
115
0.618881
false
Enucatl/pypes
pypes/plugins/nm_function.py
1
3356
"""Any function from n inputs to m outputs""" import logging from itertools import zip_longest import pypes.component log = logging.getLogger(__name__) def default_function(*args): "pass" return args class NMFunction(pypes.component.Component): """ mandatory input packet attributes: - data: for each of the input ports parameters: - function: [default: merge the inputs into a list if more than one input, then replicate over all the outputs] output packet attributes: - data: each of the M outputs goes to an output port """ # defines the type of component we're creating. __metatype__ = 'TRANSFORMER' def __init__(self, n=1, m=1): # initialize parent class pypes.component.Component.__init__(self) # Optionally add/remove component ports # self.remove_output('out') self._n = n self._m = m self._in_ports = ["in"] self._out_ports = ["out"] if n > 1: self._in_ports += ["in{0}".format(i) for i in range(1, n)] for port in self._in_ports: self.add_input(port, 'input') if m > 1: self._out_ports += ["out{0}".format(i) for i in range(1, m)] for port in self._out_ports: self.add_output(port, 'output') # Setup any user parameters required by this component # 2nd arg is the default value, 3rd arg is optional list of choices self.set_parameter('function', default_function) # log successful initialization message log.debug('Component Initialized: %s', self.__class__.__name__) def run(self): # Define our components entry point while True: function = self.get_parameter('function') name = function.__name__ packets = [self.receive(port) for port in self._in_ports] try: args = [packet.get("data") for packet in packets] log.debug("%s: args %s", name, args) results = function(*args) log.debug("%s: results %s", name, results) if self._m == 1: packet = packets[0] packet.set("data", results[0]) self.send("out", packet) elif self._m > 1 and len(results) <= self._m: for result, port in zip_longest(results, self._out_ports, fillvalue=results[-1]): packet = pypes.packet.Packet() for key, value in packets[0]: packet.set(key, value) packet.set("data", result) log.debug("%s: sending %s to %s", name, packet.get("data"), port) self.send(port, packet) else: raise ValueError("too many results!") except: log.error('Component Failed: %s', name, exc_info=True) # yield the CPU, allowing another component to run self.yield_ctrl()
apache-2.0
5,331,688,705,115,168,000
33.244898
75
0.494041
false
bmentges/brainiak_api
resources/ontologies/scripts/deploy_data.py
1
1346
import subprocess import paramiko DATABASE_HOST = "qa1.virtuoso.globoi.com" DATABASE_USER = "dev" DATABASE_PASSWORD = "dev" DATABASE_PORT = "1111" GRAPH = "http://semantica.globo.com/person/" SSH_USER = 'virtuoso' SSH_PWD = 'virtuoso' LOCALFILE = 'data/person.ttl' #REMOTEFILE = '/tmp/person.ttl' REMOTEFILE = '/opt/semantica/virtuoso_ops/var/lib/virtuoso/db/person.ttl' # Setup of connection with remote server ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(DATABASE_HOST, username=SSH_USER, password=SSH_PWD) sftp = ssh.open_sftp() # (1) Transfer TTL from local to remote server sftp.put(LOCALFILE, REMOTEFILE) # (2) Parse TTL and place its triples in the remote server # For more info: http://docs.openlinksw.com/virtuoso/fn_ttlp_mt_local_file.html isql = "isql -U %(user)s -P %(pwd)s -H %(host)s -S %(port)s" %\ {"user": DATABASE_USER, "pwd": DATABASE_PASSWORD, "host": DATABASE_HOST, "port": DATABASE_PORT} isql_cmd = "DB.DBA.TTLP_MT_LOCAL_FILE('%s', '', '%s');" % (REMOTEFILE, GRAPH) cmd = '%s < "%s"' % (isql, isql_cmd) process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout_value, stderr_value = process.communicate() # (3) Remove TTL from remote server sftp.remove(REMOTEFILE) # Close connections sftp.close() ssh.close()
gpl-2.0
5,922,306,472,382,499,000
28.911111
91
0.711738
false
ColeFrench/python-analysis
CORGIS/weather/analysis/storage.py
1
1536
#!/usr/bin/env python3 import shelve class Weather: db_path = 'locations' def __init__(self, *args, **kwargs): self._set_data(args[0]) self._geolocation = None if len(args) == 2: self.set_location(args[1]) def _set_data(self, data): for (k, v) in data.items(): if isinstance(v, dict): self._set_data(v) else: setattr(self, '_{}'.format(k.lower().replace(' ', '_')), v) def get_data(self, k): return getattr(self, '_{}'.format(k.lower().replace(' ', '_'))) def get_location(self): return self._geolocation def set_location(self, geolocator): location = self.get_data('Location') with shelve.open(self.__class__.db_path) as db: if location in db: self._geolocation = db[location] else: self._geolocation = geolocator.geocode(location) db[location] = self._geolocation def get_by_coords(weather_objs, latitude=0, latitude_tolerance=90, longitude=0, longitude_tolerance=180): new_weather_objs = [] for weather_obj in weather_objs: location = weather_obj.get_location() if (latitude - latitude_tolerance <= location.latitude <= latitude + latitude_tolerance and longitude - longitude_tolerance <= location.longitude <= longitude + longitude_tolerance): new_weather_objs.append(weather_obj) return new_weather_objs
mit
-6,422,435,209,303,905,000
29.72
79
0.566406
false
jamesabel/mops
mops/logger.py
1
1529
import os import appdirs import logging import logging.handlers import mops.const import mops.util fh = None ch = None log = None log_folder = None def init(log_folder_param=None): global fh, ch, log, log_folder if log_folder_param: log_folder = log_folder_param # mainly for testing else: log_folder = calculate_log_folder() if not os.path.exists(log_folder): os.makedirs(log_folder) log = logging.getLogger(mops.const.APPLICATION) log.setLevel(logging.DEBUG) # create file handler fh = logging.handlers.RotatingFileHandler(os.path.join(log_folder, mops.const.LOG_FILE_NAME), maxBytes=20*1E6, backupCount=3) #fh = logging.FileHandler(LOG_FILE_NAME) fh.setLevel(logging.INFO) # create console handler ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(filename)s - %(funcName)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger log.addHandler(fh) log.addHandler(ch) return log_folder def get_log_folder(): return log_folder def set_file_log_level(new_level): fh.setLevel(new_level) def set_console_log_level(new_level): ch.setLevel(new_level) def calculate_log_folder(): return os.path.join(appdirs.user_log_dir(mops.const.APPLICATION, mops.const.COMPANY))
gpl-3.0
456,126,684,004,530,240
22.90625
119
0.671027
false
singingwolfboy/seamless-karma
seamless_karma/restful/decorators.py
1
5677
# coding=utf-8 from __future__ import unicode_literals import re from functools import wraps from six.moves.urllib.parse import urlsplit from textwrap import dedent import sqlalchemy as sa from seamless_karma.extensions import db from flask import request from flask.ext.restful import abort, marshal from .utils import update_url_query def parse_sqlalchemy_exception(exception, model=None): """ Given a SQLAlchemy exception, return a string to nicely display to the client that explains the error. """ message = exception.orig.args[0] if not model: return message if db.engine.name == 'postgresql': unique_re_strs = [ r""" duplicate key value violates unique constraint "[^"]+" DETAIL: Key \((?P<column>[^)]+)\)=\((?P<value>[^)]+)\) already exists. """ ] not_null_re_strs = [ r""" null value in column "(?P<column>\w+)" violates not-null constraint """, ] UNIQUE_RES = [re.compile(dedent(s).strip()) for s in unique_re_strs] NOT_NULL_RES = [re.compile(dedent(s).strip()) for s in not_null_re_strs] else: # sqlite unique_re_strs = [ r"column (?P<column>\w+) is not unique", r"UNIQUE constraint failed: (?P<table>\w+)\.(?P<column>\w+)", ] not_null_re_strs = [ r"(?P<table>\w+)\.(?P<column>\w+) may not be NULL", r"NOT NULL constraint failed: (?P<table>\w+)\.(?P<column>\w+)", ] UNIQUE_RES = [re.compile(dedent(s).strip()) for s in unique_re_strs] NOT_NULL_RES = [re.compile(dedent(s).strip()) for s in not_null_re_strs] for regex in UNIQUE_RES: match = regex.search(message) if match: column = match.group("column") try: value = match.group("value") except IndexError: value = request.form.get(column) return "{model} with {column} {value} already exists".format( model=model.__name__, column=column, value=value ) for regex in NOT_NULL_RES: match = regex.search(message) if match: column = match.group("column") return "{model} must have {column} specified".format( model=model.__name__, column=column ) return message def handle_sqlalchemy_errors(model=None): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except sa.exc.SQLAlchemyError as e: message = parse_sqlalchemy_exception(e, model) abort(400, message=message) return wrapper decorator.__name__ = str("handle_sqlalchemy_errors") return decorator def resource_list(model, marshal_fields, default_limit=50, max_limit=200, parser=None): def outer(func): @wraps(func) def inner(*args, **kwargs): # parse values before processing function limit = default_limit if "limit" in request.values: try: limit = int(request.values["limit"]) except ValueError: abort(400, "limit must be an integer, not {!r}".format( request.values["limit"])) if limit < 1: abort(400, "limit must be greater than 0") if max_limit and limit > max_limit: abort(400, "maximum limit is {}".format(max_limit)) offset = None if "offset" in request.values: try: offset = int(request.values["offset"]) except ValueError: abort(400, "offset must be an integer, not {!r}".format( request.values["limit"])) if offset < 0: abort(400, "offset cannot be negative") orders = [] if "order" in request.values: for order_str in request.values["order"].split(','): if not hasattr(model, order_str): abort(400, "cannot order on attribute {!r}".format(order_str)) orders.append(getattr(model, order_str)) elif hasattr(model, "id"): orders.append(model.id) # process the function query = func(*args, **kwargs) # allow users to filter by parser fields if parser: for name, value in parser.parse_args().items(): if hasattr(model, name) and value is not None: query = query.filter(getattr(model, name) == value) # build the results count = query.count() results = query.order_by(*orders).limit(limit).offset(offset).all() output = { "count": count, "data": marshal(results, marshal_fields), } # just get path and query args from URL scheme, netloc, path, query, fragment = urlsplit(request.url) url = "{path}?{query}".format(path=path, query=query) offset = offset or 0 if count > offset + limit: output["next"] = update_url_query(url, offset=offset+limit) if offset > 0: new_offset = offset - limit if new_offset <= 0: new_offset = None output["prev"] = update_url_query(url, offset=new_offset) return output return inner return outer
mit
5,202,805,049,591,452,000
36.846667
87
0.529857
false
simplegeo/clusto-sgext
sgext/commands/ec2_report.py
1
1161
#!/usr/bin/env python from clusto import script_helper import clusto import sgext from sgext.drivers import EC2Zone import csv import sys class EC2Report(script_helper.Script): def run(self, args): keys = args.keys.split(",") writer = csv.writer(sys.stdout) writer.writerow(['name', 'zone'] + keys) for entity in clusto.get_from_pools(args.pools): attrs = [entity.name, entity.parents(clusto_types=[EC2Zone])[0].name] for key in keys: k, sk = key.split('_', 1) attrs += [unicode(x).strip() for x in entity.attr_values(key=k, subkey=sk)] writer.writerow(attrs) def _add_arguments(self, parser): parser.add_argument("-k", "--keys", dest="keys", required=True, help="Comma-delimited list of keys to report on") parser.add_argument('pools', nargs='*') def add_subparser(self, subparsers): parser = self._setup_subparser(subparsers) self._add_arguments(parser) def main(): ec2report, args = script_helper.init_arguments(EC2Report) return ec2report.run(args) if __name__ == '__main__': sys.exit(main())
bsd-2-clause
2,230,970,096,727,505,000
30.378378
121
0.6236
false
ntucllab/libact
libact/query_strategies/multilabel/binary_minimization.py
1
2573
"""Binary Minimization """ import copy import numpy as np from libact.base.dataset import Dataset from libact.base.interfaces import QueryStrategy, ContinuousModel from libact.utils import inherit_docstring_from, seed_random_state, zip from libact.models.multilabel import BinaryRelevance, DummyClf class BinaryMinimization(QueryStrategy): r"""Binary Version Space Minimization (BinMin) Parameters ---------- base_clf : ContinuousModel object instance The base learner for binary relavance. random_state : {int, np.random.RandomState instance, None}, optional (default=None) If int or None, random_state is passed as parameter to generate np.random.RandomState instance. if np.random.RandomState instance, random_state is the random number generate. Attributes ---------- Examples -------- Here is an example of declaring a BinaryMinimization query_strategy object: .. code-block:: python from libact.query_strategies.multilabel import BinaryMinimization from sklearn.linear_model import LogisticRegression qs = BinaryMinimization( dataset, # Dataset object br_base=LogisticRegression() ) References ---------- .. [1] Brinker, Klaus. "On active learning in multi-label classification." From Data and Information Analysis to Knowledge Engineering. Springer Berlin Heidelberg, 2006. 206-213. """ def __init__(self, dataset, base_clf, random_state=None): super(BinaryMinimization, self).__init__(dataset) self.n_labels = len(self.dataset.data[0][1]) self.base_clf = base_clf self.random_state_ = seed_random_state(random_state) @inherit_docstring_from(QueryStrategy) def make_query(self): dataset = self.dataset X, Y = dataset.get_labeled_entries() Y = np.array(Y) unlabeled_entry_ids, X_pool = dataset.get_unlabeled_entries() X_pool = np.array(X_pool) clfs = [] boundaries = [] for i in range(self.n_labels): if len(np.unique(Y[:, i])) == 1: clf = DummyClf() else: clf = copy.deepcopy(self.base_clf) clf.train(Dataset(X, Y[:, i])) boundaries.append(np.abs(clf.predict_real(X_pool)[:, 1])) clfs.append(clf) choices = np.where(np.array(boundaries) == np.min(boundaries))[1] ask_id = self.random_state_.choice(choices) return unlabeled_entry_ids[ask_id]
bsd-2-clause
-7,095,860,874,748,604,000
30.378049
87
0.634279
false
EndyKaufman/django-postgres-angularjs-blog
app/file/resource.py
1
2500
# -*- coding: utf-8 -*- from project import helpers from django.db.models import Q from models import File def get_fields(): return [f.name for f in File._meta.get_fields()] def create(request): """Create record""" data = request.POST user = helpers.get_user(request) data = helpers.set_null_values_if_not_exist(data, get_fields()) if request.FILES and request.FILES.get('file'): if user.is_superuser: url = helpers.save_file(False, request.FILES.get('file')) else: url = helpers.save_file(str(user.id), request.FILES.get('file')) else: url = '' item, created = File.objects.get_or_create(src=url) if created: helpers.json_to_objects(item, data) item.created_user = user item.save() return {'code': 'ok', 'data': helpers.objects_to_json(request, [item])}, 200, item def update(request, file_id): """Update record""" data = request.DATA data = helpers.set_null_values_if_not_exist(data, get_fields()) try: item = File.objects.get(pk=file_id) except File.DoesNotExist: return {'code': 'file/not_found', 'values': [file_id]}, 404, False helpers.json_to_objects(item, data) item.save() return {'code': 'ok', 'data': helpers.objects_to_json(request, [item])}, 200, item def delete(request, file_id): """Update record""" try: item = File.objects.get(pk=file_id) except File.DoesNotExist: return {'code': 'file/not_found', 'values': [file_id]}, 404 helpers.remove_file(item.src) item.delete() return {'code': 'ok'}, 200 def get_item(request, file_id): try: item = File.objects.get(pk=file_id) except File.DoesNotExist: return {'code': 'file/not_found', 'values': [file_id]}, 404, False return {'code': 'ok', 'data': helpers.objects_to_json(request, [item])}, 200, item def get_list(request): items = File.objects.all().order_by('created').all() return {'code': 'ok', 'data': helpers.objects_to_json(request, items)}, 200, items def get_search(request, search_text): if search_text == 'all': return get_list(request) else: items = File.objects.filter( helpers.get_searching_all_fields_qs(File, search_text) ).order_by('created').all() return {'code': 'ok', 'data': helpers.objects_to_json(request, items)}, 200, items
mit
-8,550,994,325,728,797,000
25.88172
90
0.594
false
stickybath/BetaMaleBot
src/core/utilTelegram/authorize.py
2
2599
def authorize(bot, update, args): """Authorizes a user. Authorizes a user to make modifications to the bot if called by an already authorized user. Syntax: /authorize <id> <name> Args: id: Telegram user ID of user to authorize. name: Telegram user name of user to authorize. Returns: Failure: -1 Success: 0 """ import declarations import psycopg2 from util.dbConnect import dbConnect #Initialize function try: id = int(args[0]) name = args[1] except: bot.sendMessage(chat_id = update.message.chat_id, text = 'Invalid use of /authorize') return -1 #Open database connection try: dbConnection = dbConnect(declarations.dbInfo['dbHost'], declarations.dbInfo['dbPort'], declarations.dbInfo['dbName'], declarations.dbInfo['dbUser'], declarations.dbInfo['dbPass']) dbCursor = dbConnection.cursor() except: bot.sendMessage(chat_id = update.message.chat_id, text = 'Failed to open a connection to the database.') return -1 #Authenticate telegram user try: dbCursor.callproc("authenticate", [update.message.from_user.id]) auth = dbCursor.fetchall() if not auth[0][0]: bot.sendMessage(chat_id = update.message.chat_id, text = '%s is not authorized to add phrases.' \ % (update.message.from_user.username)) dbCursor.close() dbConnection.close() return 0 except: bot.sendMessage(chat_id = update.message.chat_id, text = 'Failed to authorize user.') dbCursor.close() dbConnection.close() return -1 #Authorize telegram user try: dbCursor.callproc("authorize", [id, name.lower()]) dbConnection.commit() except: bot.sendMessage(chat_id = update.message.chat_id, text = 'Failed to authorize %s.' % (name)) dbCursor.close() dbConnection.close() return -1 #Close database connection dbCursor.close() dbConnection.close() #Print success message bot.sendMessage(chat_id = update.message.chat_id, text = 'Authorized %s.' % (name)) #Exit function return 0
gpl-3.0
-8,709,280,206,077,438,000
29.940476
78
0.533282
false
Aeolitus/Sephrasto
Fertigkeiten.py
1
4974
''' In dieser Datei wird das Backend für Fertigkeiten und Attribute gelegt. Alle steigerbaren Traits verfügen über die Funktionen steigern() und senken() ''' from Definitionen import Attribute from Wolke import Wolke # Allgemeine Implementation für steigerbare Traits class Steigerbar(object): def __init__(self): super().__init__() self.name = '' self.wert = 0 self.steigerungsfaktor = 0 self.text = '' def __eq__(self, other) : if self.__class__ != other.__class__: return False return self.__dict__ == other.__dict__ # Implementation für Attribute: SF ist 16, PW ist 2x Wert class Attribut(Steigerbar): def __init__(self, Key): super().__init__() self.steigerungsfaktor = 16 self.probenwert = 0 self.name = Attribute[Key] self.key = Key def aktualisieren(self): self.probenwert = self.wert*2 # Implementation für Steigerbare Energien (Karma und AsP). SF ist 1, kein Limit. class Energie(Steigerbar): def __init__(self): super().__init__() self.steigerungsfaktor = 1 # Implementation für freie Fertigkeiten class FreieFertigkeit(Steigerbar): def __init__(self): super().__init__() self.steigerungsfaktor = -1 def __deepcopy__(self, memo=""): F = FreieFertigkeit() F.name = self.name F.steigerungsfaktor = -1 F.wert = self.wert return F # Implementation für Fertigkeiten im Allgemeinen class Fertigkeit(Steigerbar): def __init__(self): super().__init__() self.gekaufteTalente = [] self.talentMods = {} #for vorteil scripts, { talentnname1 : { condition1 : mod1, condition2 : mod2, ... }, talentname2 : {}, ... } self.kampffertigkeit = 0; #0 = nein, 1 = nahkampffertigkeit, 2 = sonstige kampffertigkeit self.attribute = ['KO','KO','KO'] self.attributswerte = [-1,-1,-1] self.basiswert = -1 self.basiswertMod = 0 #for vorteil scripts self.probenwert = -1 self.probenwertTalent = -1 self.voraussetzungen = [] self.maxWert = -1 self.printclass = -1 self.isUserAdded = True self.addToPDF = True def aktualisieren(self): self.attributswerte = [Wolke.Char.attribute[self.attribute[0]].wert, Wolke.Char.attribute[self.attribute[1]].wert, Wolke.Char.attribute[self.attribute[2]].wert] self.maxWert = max(self.attributswerte)+2 # Python Round does mess up sometimes self.basiswert = round(sum(self.attributswerte)/3+0.0001) + self.basiswertMod self.probenwert = self.basiswert + round(self.wert/2+0.0001) self.probenwertTalent = self.basiswert + self.wert def __deepcopy__(self, memo=""): F = Fertigkeit() F.name = self.name F.wert = self.wert F.steigerungsfaktor = self.steigerungsfaktor F.text = self.text F.voraussetzungen = self.voraussetzungen.copy() F.attribute = self.attribute.copy() F.kampffertigkeit = self.kampffertigkeit F.gekaufteTalente = self.gekaufteTalente.copy() F.talentMods = self.talentMods.copy() F.attributswerte = self.attributswerte.copy() F.basiswert = self.basiswert F.basiswertMod = self.basiswertMod F.probenwert = self.probenwert F.probenwertTalent = -self.probenwertTalent F.maxWert = self.maxWert F.printclass = self.printclass F.isUserAdded = self.isUserAdded F.addToPDF = self.addToPDF return F class Talent(): def __init__(self): self.name = '' self.kosten = -1 self.verbilligt = 0 self.fertigkeiten = [] self.voraussetzungen = [] self.variable = -1 self.text = '' self.printclass = -1 self.isUserAdded = True def __eq__(self, other) : if self.__class__ != other.__class__: return False return self.__dict__ == other.__dict__ def isSpezialTalent(self): return self.kosten != -1 class Vorteil(): def __init__(self): self.name = '' self.kosten = -1 self.variable = -1 self.typ = 0 self.voraussetzungen = [] self.nachkauf = '' self.text = '' self.script = None self.scriptPrio = 0 self.isUserAdded = True def __eq__(self, other) : if self.__class__ != other.__class__: return False if self.__dict__ == other.__dict__: return True class Manoever(): def __init__(self): self.name = '' self.typ = 0 self.voraussetzungen = [] self.probe = '' self.gegenprobe = '' self.text = '' self.isUserAdded = True def __eq__(self, other) : if self.__class__ != other.__class__: return False if self.__dict__ == other.__dict__: return True
mit
603,267,983,409,238,500
33.020548
138
0.584776
false
brianquinlan/learn-machine-learning
well_bouncer/pygame_well_bouncer_player.py
1
3941
# Copyright 2019 Brian Quinlan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Use pygame to visualize well_bouncer_game.Game.""" import pygame import well_bouncer_game _screen = None _PLAY_AREA_WIDTH = 200 _PLAY_AREA_HEIGHT = 800 _BORDER_WIDTH = 10 _PLAY_AREA_SCALE = 8 def game_to_screen_point(x, y): x = float(x) * _PLAY_AREA_SCALE + _BORDER_WIDTH y = float(y) * _PLAY_AREA_SCALE return int(x), _PLAY_AREA_HEIGHT + _BORDER_WIDTH - int(y) def scale_to_screen(p): p = float(p) * _PLAY_AREA_SCALE return int(p) class InteractiveMoveMaker(well_bouncer_game.MoveMaker): def make_move(self, state) -> well_bouncer_game.Direction: for event in pygame.event.get(): if event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: return well_bouncer_game.Direction.LEFT elif event.key == pygame.K_RIGHT: return well_bouncer_game.Direction.RIGHT return well_bouncer_game.Direction.CENTER def _initialize_screen(): global _screen pygame.init() pygame.font.init() size = 220, 820 _screen = pygame.display.set_mode(size, pygame.SRCALPHA) pygame.key.set_repeat(1, 50) def play_game(title, game, move_maker: well_bouncer_game.MoveMaker): if _screen is None: _initialize_screen() pygame.display.set_caption(title) score_font = pygame.font.SysFont("Consolas", 60) clock = pygame.time.Clock() score_width = 0 while not game.done: direction = move_maker.make_move(game.state) for event in pygame.event.get(): pass game.move(direction) _screen.fill((0, 0, 0)) move_distribution = move_maker.move_probabilities(game.state) if move_distribution is not None: for i in range(well_bouncer_game.Game.NUM_ACTIONS): probability = move_distribution.get(i, 0.0) c = pygame.Color(0) c.hsva = (60 * i, 100, int(100 * probability), 20) pygame.draw.rect(_screen, c, pygame.Rect(66 * i, 0, 66 * i + 66, 8)) score_surface = score_font.render(str(int(game.score)), False, (255, 255, 0)) score_surface.set_alpha(64) new_score_width = score_surface.get_rect().width if new_score_width > score_width: score_width = new_score_width + 10 _screen.blit(score_surface, ((_screen.get_rect().width - score_width) // 2, 10)) pygame.draw.circle( _screen, (255, 0, 0), game_to_screen_point(game.ball_x, game.ball_y), scale_to_screen(game.ball_radius), ) pygame.draw.circle( _screen, (0, 0, 255), game_to_screen_point(game.paddle_x, game.paddle_y), scale_to_screen(game.paddle_radius), ) pygame.draw.rect( _screen, (255, 255, 255), pygame.Rect( 0, # Start off screen so that there is no top border -_BORDER_WIDTH, _PLAY_AREA_WIDTH + 2 * _BORDER_WIDTH, # 3 * _BORDER_WIDTH because the rectangle started off screen. _PLAY_AREA_HEIGHT + 3 * _BORDER_WIDTH), _BORDER_WIDTH) pygame.display.flip() clock.tick(60)
mit
6,822,654,629,688,851,000
32.117647
77
0.586653
false
nanomolina/JP
src/odontology/core/views.py
1
4772
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.shortcuts import render, render_to_response, redirect from django.contrib.auth import authenticate, login, logout from django.template import RequestContext, Context, loader from django.contrib.auth.decorators import login_required from django.http import HttpResponse import mercadopago from person.models import Dentist from person.forms import PatientForm from django.template.response import TemplateResponse from core.forms import PatientSelectForm def principal(request): if request.method == 'GET': if request.user.is_authenticated(): return redirect('core:home') else: return redirect('account_login') @login_required def home(request): from person.models import Patient, Dentist from allauth.socialaccount.models import SocialAccount dentist, is_new = Dentist.objects.get_or_create(user=request.user) if is_new: dentist.save() has_connection = SocialAccount.objects.filter(user=request.user).exists() patient_select_form = PatientSelectForm(dentist.id) form = PatientForm() return render_to_response( 'core/home.html', { 'template': 'home', 'has_connection': has_connection, 'patient_select_form': patient_select_form, 'patient_form': form, }, RequestContext(request) ) @login_required def logout_user(request): logout(request) return redirect('account_login') @login_required def version(request): return render_to_response( 'core/version.html', {}, RequestContext(request) ) @login_required def mp(request): preference = { "items": [ { "title": "Servidor de página dentalsoft.com.ar", "quantity": 1, "currency_id": "ARS", # Available currencies at: https://api.mercadopago.com/currencies "unit_price": 150.0 } ] } mp = mercadopago.MP("1413986768414297", "NRFtu2EIIUzUOmAz5NJLd0UtORfvy6d5") preferenceResult = mp.create_preference(preference) url = preferenceResult["response"]["init_point"] return render_to_response( 'core/mp.html', {'url': url}, RequestContext(request) ) @login_required def tariff(request): from core.forms import TariffForm from core.models import Tariff tariff_form = TariffForm() if request.is_ajax(): chapter = request.GET.get('chapter', 1) tariffs = Tariff.objects.filter(chapter__number=chapter).order_by( 'chapter__number', 'index', 'sub_index' ) return TemplateResponse( request, 'core/_list_tariff.html', {'tariffs': tariffs}, ) tariffs = Tariff.objects.filter(chapter__number=1).order_by( 'chapter__number', 'index', 'sub_index' ) return render_to_response( 'core/tariff.html', { 'template': 'tariff', 'tariff_form': tariff_form, 'tariffs': tariffs }, RequestContext(request) ) @login_required def birthdays(request): from person.models import Patient, Dentist from datetime import datetime patients_birthday = request.user.dentist.get_patients_birthdays( datetime.now().month ) return render_to_response( 'core/birthdays.html', { 'template': '', 'list_patients_birthday': patients_birthday, }, RequestContext(request) ) @login_required def contact_us(request): from core.models import Message from templated_email import send_templated_mail from django.conf import settings if request.method == 'POST': subject = request.POST.get('subject') content = request.POST.get('content') msg = Message(user=request.user, subject=subject, content=content) msg.save() send_templated_mail( template_name='message', from_email=settings.EMAIL_HOST_USER, recipient_list=['[email protected]'], context={ 'username': msg.user.username, 'full_name': msg.user.get_full_name(), 'subject': msg.subject, 'content': msg.content, 'email': msg.user.email, 'date': msg.date_created, }, ) return HttpResponse(status=200) def error404(request): template = loader.get_template('404.html') context = Context({'message': 'All: %s' % request,}) return HttpResponse(content=template.render(context), content_type='text/html; charset=utf-8', status=404)
apache-2.0
-822,771,827,813,158,100
29.006289
111
0.613079
false
shaurz/devo
search_dialog.py
1
5908
import sys import os import wx from file_picker import DirPicker from util import get_combo_history file_pattern_choices = [ "*", "*.py", "*.c;*.cc;*.cxx;*.cpp;*.h;*.hh;*.hxx;*.hpp;*.m;*.mm", "*.txt;*.text;*.rst;*.md;*.markdown", "*.html;*.htm;*.xml;*.xhtml;*.xht", ] class SearchDetails(object): def __init__(self, case=False, regexp=False, hidden=False, find="", find_history=(), file_patterns="*", path="", path_history=()): self.case = case self.regexp = regexp self.hidden = hidden self.find = find self.find_history = find_history self.file_patterns = file_patterns self.path = path self.path_history = path_history def LoadPerspective(self, p): try: self.case = bool(p.get("case_sensitive", False)) self.regexp = bool(p.get("is_regex", False)) self.hidden = bool(p.get("hidden", False)) self.find = str(p.get("find", "")) self.find_history = p.get("find_history", [])[:10] self.file_patterns = str(p.get("file_patterns")) self.path = str(p.get("path", "")) self.path_history = p.get("path_history")[:10] except Exception: pass def SavePerspective(self): return { "case_sensitive": self.case, "is_regex": self.regexp, "hidden": self.hidden, "find": self.find, "find_history": self.find_history[:], "file_patterns": self.file_patterns, "path": self.path, "path_history": self.path_history[:], } class SearchDialog(wx.Dialog): def __init__(self, parent, details=None): style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER wx.Dialog.__init__(self, parent, title="Search", style=style) combo_style = wx.TE_PROCESS_ENTER if wx.Platform == "__WXMAC__" else 0 self.combo_find = wx.ComboBox(self, size=(300, -1), style=combo_style) self.combo_file_patterns = wx.ComboBox( self, size=(300, -1), value="*", choices=file_pattern_choices, style=combo_style) self.dir_picker = DirPicker(self, size=(300, -1), combo=True) self.check_case = wx.CheckBox(self, wx.ID_ANY, "&Case sensitive") self.check_regexp = wx.CheckBox(self, label="Regular e&xpression") self.check_hidden = wx.CheckBox(self, label="Search &hidden files and folders") grid = wx.FlexGridSizer(cols=2, vgap=5, hgap=5) grid.AddGrowableCol(1, 1) grid.Add(wx.StaticText(self, label="Find Text"), 0, wx.ALIGN_CENTRE_VERTICAL) grid.Add(self.combo_find, 0, wx.EXPAND) grid.Add(wx.StaticText(self, label="File Patterns"), 0, wx.ALIGN_CENTRE_VERTICAL) grid.Add(self.combo_file_patterns, 0, wx.EXPAND) grid.Add(wx.StaticText(self, label="Directory"), 0, wx.ALIGN_CENTRE_VERTICAL) grid.Add(self.dir_picker, 0, wx.EXPAND) grid.AddSpacer(0) chksizer = wx.BoxSizer(wx.VERTICAL) chksizer.Add(self.check_case, 0, wx.ALL, 2) chksizer.Add(self.check_regexp, 0, wx.ALL, 2) chksizer.Add(self.check_hidden, 0, wx.ALL, 2) grid.Add(chksizer) btn_search = wx.Button(self, wx.ID_OK, label="&Search") btn_search.SetDefault() btn_cancel = wx.Button(self, wx.ID_CANCEL) btnsizer = wx.StdDialogButtonSizer() btnsizer.AddButton(btn_search) btnsizer.AddButton(btn_cancel) btnsizer.Realize() sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(grid, 1, wx.EXPAND|wx.ALL, 5) sizer.Add(btnsizer, 0, wx.EXPAND|wx.ALL, 5) self.SetSizer(sizer) self.Fit() self.SetMinSize(self.Size) self.SetMaxSize((-1, self.Size.height)) self.Centre() if details is not None: self.combo_find.SetItems(details.find_history) self.combo_find.SetValue(details.find) self.combo_file_patterns.SetValue(details.file_patterns) self.dir_picker.SetHistory(details.path_history) self.dir_picker.SetValue(details.path) self.check_case.SetValue(details.case) self.check_regexp.SetValue(details.regexp) self.check_hidden.SetValue(details.hidden) self.combo_find.SetFocus() self.combo_find.SetMark(0, len(self.combo_find.GetValue())) self.combo_find.Bind(wx.EVT_KEY_DOWN, self.OnComboKeyDown) self.Bind(wx.EVT_TEXT_ENTER, self.OnFind) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateFind, btn_search) @property def find(self): return self.combo_find.GetValue() @find.setter def find(self, value): self.combo_find.SetValue(value) @property def path(self): return os.path.expanduser(self.dir_picker.GetValue().strip()) @path.setter def path(self, value): self.dir_picker.SetValue(value.strip()) def GetDetails(self): return SearchDetails( case = self.check_case.GetValue(), regexp = self.check_regexp.GetValue(), hidden = self.check_hidden.GetValue(), find = self.find, find_history = get_combo_history(self.combo_find), file_patterns = self.combo_file_patterns.GetValue(), path = self.path, path_history = self.dir_picker.GetHistory()) def OnComboKeyDown(self, evt): if evt.KeyCode in (wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER): self.OnFind(None) else: evt.Skip() def OnFind(self, evt): self.EndModal(wx.ID_OK) def OnUpdateFind(self, evt): find = self.find path = self.path evt.Enable(bool(find and path and os.path.isdir(path))) if __name__ == "__main__": app = wx.App() dlg = SearchDialog(None) dlg.ShowModal() print dlg.GetDetails()
mit
-8,200,846,042,718,592,000
35.695652
93
0.592417
false
locaweb/leela
try/src/try_leela/main.py
1
2289
# -*- coding: utf-8 -*- #!/usr/bin/python import sys import argparse import unittest import importlib from try_leela import env class Stream(object): def __init__(self, fh): self.write = fh.write self.flush = fh.flush def writeln(self, str): self.write(str) self.write("\n") self.flush() def parse_args(): parser = argparse.ArgumentParser(description = "leela testing engine") parser.add_argument("suite", metavar = "SUITE", choices = ("smoke", "perf", "integrity"), help = "The test suite you want to invoke") parser.add_argument("program", metavar = "PROGRAM", help = "the program you want to test") parser.add_argument("--endpoint", metavar = "ENDPOINT", nargs = "*", default = ["tcp://localhost:4080"], help = "The leela endpoint you want to test") parser.add_argument("--username", metavar = "USERNAME", default = "leela", help = "The username to use to connect") parser.add_argument("--secret", metavar = "SECRET", default = "leela", help = "The secret to use to sign request messages") parser.add_argument("--logfile", metavar = "LOGFILE", default = "/dev/null", help = "The file to write the output of the driver program") parser.add_argument("--timeout-in-ms", metavar = "TIMEOUT-IN-MS", default = 60000, type = int, dest = "timeout", help = "The maximum time to wait for a response") return(parser.parse_args()) def main(): args = parse_args() env.set_args(args) suite = unittest.TestSuite() suite.addTests(unittest.TestLoader().discover("try_leela.suites.%s" % (args.suite), "test*.py")) runner = unittest.TextTestRunner() runner.run(suite) if (__name__ == "__main__"): main()
apache-2.0
2,619,649,412,185,253,000
34.765625
100
0.484491
false
aetilley/revscoring
revscoring/datasources/diff.py
1
2252
from deltas import segment_matcher from deltas.tokenizers import wikitext_split from . import parent_revision, revision from .datasource import Datasource def process_operations(a, b): return [op for op in segment_matcher.diff(a, b)], a, b operations = Datasource("diff.operations", process_operations, depends_on=[parent_revision.tokens, revision.tokens]) """ Returns a tuple that describes the difference between the parent revision text and the current revision's text. The tuple contains three fields: * operations: `list` of :class:`deltas.Operation` * A tokens: `list` of `str` * B tokens: `list` of `str` """ def process_added_tokens(diff_operations): operations, a, b = diff_operations return [t for op in operations if op.name == "insert" for t in b[op.b1:op.b2]] added_tokens = Datasource("diff.added_tokens", process_added_tokens, depends_on=[operations]) """ Returns a list of all tokens added in this revision. """ def process_removed_tokens(diff_operations): operations, a, b = diff_operations return [t for op in operations if op.name == "delete" for t in a[op.a1:op.a2]] removed_tokens = Datasource("removed_tokens", process_removed_tokens, depends_on=[operations]) """ Returns a list of all tokens removed in this revision. """ def process_added_segments(diff_operations): operations, a, b = diff_operations return ["".join(b[op.b1:op.b2]) for op in operations if op.name == "insert"] added_segments = Datasource("diff.added_segments", process_added_segments, depends_on=[operations]) """ Returns a list of all contiguous segments of tokens added in this revision. """ def process_removed_segments(revision_diff): operations, a, b = revision_diff return ["".join(a[op.a1:op.a2]) for op in operations if op.name == "delete"] removed_segments = Datasource("diff.removed_segments", process_removed_segments, depends_on=[operations]) """ Returns a list of all contiguous segments of tokens removed in this revision. """
mit
-4,191,576,510,164,387,000
27.871795
78
0.643872
false
PaloAltoNetworks/minemeld-core
minemeld/flask/jobs.py
1
8097
# Copyright 2017 Palo Alto Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import uuid import tempfile import subprocess import shutil import json import time import signal from collections import namedtuple, defaultdict import redis import psutil import werkzeug.local import gevent from gevent.subprocess import Popen from flask import g from . import REDIS_URL from . import config from .logger import LOG __all__ = ['init_app', 'JOBS_MANAGER'] REDIS_CP = redis.ConnectionPool.from_url( REDIS_URL, max_connections=int(os.environ.get('REDIS_MAX_CONNECTIONS', 5)) ) REDIS_JOBS_GROUP_PREFIX = 'mm-jobs-{}' _Job = namedtuple('_Job', field_names=['glet', 'timeout_glet']) class JobsManager(object): def __init__(self, connection_pool): self.SR = redis.StrictRedis(connection_pool=connection_pool) self.running_jobs = defaultdict(dict) def _safe_rmtree(self, path): shutil.rmtree(path, ignore_errors=True) def _safe_remove(self, path): try: os.remove(path) except: pass def _get_job_status(self, jobpid, jobhash): try: jobprocess = psutil.Process(pid=jobpid) except psutil.NoSuchProcess: return { 'status': 'DONE', 'returncode': None } if hash(jobprocess) != jobhash: return { 'status': 'DONE', 'returncode': None } return { 'status': 'RUNNING' } def _collect_job(self, jobdata): if 'collected' in jobdata: return tempdir = jobdata.get('cwd', None) if tempdir is not None: self._safe_rmtree(tempdir) jobdata['collected'] = True def _job_monitor_glet(self, job_group, jobid, description, args, data): jobname = (REDIS_JOBS_GROUP_PREFIX+'-{}').format(job_group, jobid) joblogfile = os.path.join( config.get('MINEMELD_LOG_DIRECTORY_PATH', '/tmp'), '{}.log'.format(jobname) ) jobtempdir = tempfile.mkdtemp(prefix=jobname) LOG.info('Executing job {} - {} cwd: {} logfile: {}'.format(jobname, args, jobtempdir, joblogfile)) try: with open(joblogfile, 'w+') as logfile: jobprocess = Popen( args=args, close_fds=True, cwd=jobtempdir, shell=False, stdout=logfile, stderr=subprocess.STDOUT ) except OSError: self._safe_remove(joblogfile) self._safe_rmtree(jobtempdir) LOG.exception('Error starting job {}'.format(jobname)) return jobpsproc = psutil.Process(pid=jobprocess.pid) jobdata = data if jobdata is None: jobdata = {} jobdata['create_time'] = int(time.time()*1000) jobdata['description'] = description jobdata['job_id'] = jobid jobdata['pid'] = jobpsproc.pid jobdata['hash'] = hash(jobpsproc) jobdata['logfile'] = joblogfile jobdata['cwd'] = jobtempdir jobdata['status'] = 'RUNNING' self.SR.hset( REDIS_JOBS_GROUP_PREFIX.format(job_group), jobid, json.dumps(jobdata) ) jobprocess.wait() if jobprocess.returncode != 0: jobdata['status'] = 'ERROR' else: jobdata['status'] = 'DONE' jobdata['returncode'] = jobprocess.returncode jobdata['end_time'] = int(time.time()*1000) self._collect_job(jobdata) self.SR.hset( REDIS_JOBS_GROUP_PREFIX.format(job_group), jobid, json.dumps(jobdata) ) job = self.running_jobs[job_group].pop(jobid, None) if job is not None and job.timeout_glet is not None: job.timeout_glet.kill() return jobprocess.returncode def _job_timeout_glet(self, job_group, jobid, timeout): gevent.sleep(timeout) prefix = REDIS_JOBS_GROUP_PREFIX.format(job_group) jobdata = self.SR.hget(prefix, jobid) if jobdata is None: return jobdata = json.loads(jobdata) status = jobdata.get('status', None) if status != 'RUNNING': LOG.info('Timeout for job {}-{} triggered but status not running'.format(prefix, jobid)) return pid = jobdata.get('pid', None) if pid is None: LOG.error('Timeout for job {}-{} triggered but no pid available'.format(prefix, jobid)) return LOG.error('Timeout for job {}-{} triggered, sending TERM signal'.format(prefix, jobid)) os.kill(pid, signal.SIGTERM) def delete_job(self, job_group, jobid): prefix = REDIS_JOBS_GROUP_PREFIX.format(job_group) jobdata = self.SR.hget(prefix, jobid) if jobdata is None: return jobdata = json.loads(jobdata) logfile = jobdata.get('logfile', None) if logfile is not None: self._safe_remove(logfile) self._collect_job(jobdata) self.SR.hdel(prefix, jobid) def get_jobs(self, job_group): prefix = REDIS_JOBS_GROUP_PREFIX.format(job_group) result = {} jobs_map = self.SR.hgetall(prefix) for jobid, jobdata in jobs_map.iteritems(): try: jobdata = json.loads(jobdata) if jobdata['status'] == 'RUNNING': jobpid = jobdata['pid'] job_status = self._get_job_status(jobpid, jobdata['hash']) jobdata.update(job_status) result[jobid] = jobdata except (ValueError, KeyError, psutil.ZombieProcess, psutil.AccessDenied): LOG.error('Invalid job value - deleting job {}::{}'.format(job_group, jobid)) self.delete_job(job_group, jobid) continue if jobdata['status'] == 'DONE' and 'collected' not in jobdata: if jobid not in self.running_jobs[job_group]: self._collect_job(jobdata) self.SR.hset(job_group, jobid, json.dumps(jobdata)) return result def exec_job(self, job_group, description, args, data=None, callback=None, timeout=None): jobid = str(uuid.uuid4()) glet = gevent.spawn( self._job_monitor_glet, job_group, jobid, description, args, data ) if callback is not None: glet.link(callback) timeout_glet = None if timeout is not None: timeout_glet = gevent.spawn(self._job_timeout_glet, job_group, jobid, timeout) self.running_jobs[job_group][jobid] = _Job(glet=glet, timeout_glet=timeout_glet) return jobid def get_JobsManager(): jobsmgr = getattr(g, '_jobs_manager', None) if jobsmgr is None: jobsmgr = JobsManager(connection_pool=REDIS_CP) g._jobs_manager = jobsmgr return jobsmgr def teardown(exception): jobsmgr = getattr(g, '_jobs_manager', None) if jobsmgr is not None: g._jobs_manager = None LOG.info( 'redis connection pool: in use: {} available: {}'.format( len(REDIS_CP._in_use_connections), len(REDIS_CP._available_connections) ) ) JOBS_MANAGER = werkzeug.local.LocalProxy(get_JobsManager) def init_app(app): app.teardown_appcontext(teardown)
apache-2.0
-190,821,465,123,232,500
28.125899
107
0.580215
false
joedursun/pytineye
pytineye/api_request.py
1
8404
# -*- coding: utf-8 -*- """ api_request.py Provides authentication with the TinEye API server. For more information see https://services.tineye.com/developers/tineyeapi/authentication.html Copyright (c) 2015 Idée Inc. All rights reserved worldwide. """ import hmac import mimetools import sys import time import urllib # Standard hashlib only available after Python 2.5 (inclusive) if sys.version_info < (2, 5): import sha else: from hashlib import sha1 as sha from Crypto.Random import random from exceptions import APIRequestError class APIRequest(object): """ Class providing authentication with the TinEye API server. """ # Nonce length min_nonce_length = 24 max_nonce_length = 255 nonce_allowable_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTSUVWXYZ0123456789-_=.,*^" def __init__(self, api_url, public_key, private_key): self.api_url = api_url self.public_key = public_key self.private_key = private_key def _generate_nonce(self, nonce_length=24): """ Generate a nonce used to make a request unique. - `nonce_length`, length of the generated nonce. Returns: a nonce. """ try: int(nonce_length) if nonce_length < APIRequest.min_nonce_length or nonce_length > APIRequest.max_nonce_length: raise ValueError() except ValueError: raise APIRequestError( "Nonce length must be an int between %d and %d chars" % (APIRequest.min_nonce_length, APIRequest.max_nonce_length)) rand = random.StrongRandom() nonce = "" nonce = [rand.choice(APIRequest.nonce_allowable_chars) for i in range(0, nonce_length)] return "".join(nonce) def _generate_get_hmac_signature(self, method, nonce, date, request_params={}): """ Generate the HMAC signature hash for a GET request. - `method`, the API method being called. - `nonce`, a nonce. - `date`, UNIX timestamp of the request. - `request_params`, dictionary of other search parameters. Returns: an HMAC signature hash. """ http_verb = "GET" param_str = self._sort_params(request_params=request_params) request_url = '%s%s/' % (self.api_url, method) to_sign = self.private_key + http_verb + str(date) + nonce + request_url + param_str return self._generate_hmac_signature(to_sign) def _generate_post_hmac_signature( self, method, boundary, nonce, date, filename, request_params={}): """ Generate the HMAC signature hash for a POST request. - `method`, the API method being called. - `boundary`, the HTTP request's boundary string. - `nonce`, a nonce. - `date`, UNIX timestamp of the request. - `filename`, filename of the image being uploaded. - `request_params`, dictionary of other search parameters. Returns: an HMAC signature hash. """ http_verb = "POST" content_type = "multipart/form-data; boundary=%s" % boundary param_str = self._sort_params(request_params=request_params) request_url = '%s%s/' % (self.api_url, method) to_sign = self.private_key + http_verb + content_type + \ urllib.quote_plus(filename).lower() + \ str(date) + nonce + request_url + param_str return self._generate_hmac_signature(to_sign) def _generate_hmac_signature(self, to_sign): """ Generate the HMAC signature hash given a message to sign. - `to_sign`, the message to sign. Returns: HMAC signature hash. """ signature = "" signature = hmac.new(self.private_key, to_sign, sha) return signature.hexdigest() def _sort_params(self, request_params, lowercase=True): """ Helper method to sort request parameters. If request_params has the image_url parameter it is URL encoded and then lowercased. - `request_params`, list of extra search parameters. Returns: the search parameters in alphabetical order in query string params. """ keys = [] unsorted_params = {} special_keys = ["api_key", "api_sig", "date", "nonce", "image_upload"] for key in request_params.keys(): lc_key = key.lower() # Sort the parameters if they are not part of the following list if lc_key not in special_keys: # If the parameter is image_url, URL encode the image URL then lowercase it if lc_key == "image_url": value = request_params[key] if "%" not in value: value = urllib.quote_plus(value, "~") unsorted_params[lc_key] = value if lowercase: unsorted_params[lc_key] = value.lower() else: unsorted_params[lc_key] = request_params[key] keys.append(key) keys.sort() sorted_pairs = [] # Return a query string for key in keys: sorted_pairs.append("%s=%s" % (key, unsorted_params[key.lower()])) return "&".join(sorted_pairs) def _request_url(self, method, nonce, date, api_signature, request_params): """ Helper method to generate a URL to call given a method, a signature and parameters. - `method`, API method being called. - `nonce`, a nonce. - `date`, UNIX timestamp of the request. - `api_signature`, the signature to be included with the URL. - `request_params`, the parameters to be included with the URL. Returns: The API URL to send a request to. """ base_url = '%s%s/' % (self.api_url, method) request_url = "%s?api_key=%s&date=%s&nonce=%s&api_sig=%s" request_url = request_url % (base_url, self.public_key, str(date), nonce, api_signature) # Need to sort all other parameters extra_params = self._sort_params(request_params=request_params, lowercase=False) if extra_params != "": request_url += "&" + extra_params return request_url def get_request(self, method, request_params={}): """ Generate an API GET request string. - `method`, API method being called. - `request_params`, the list of search parameters. Returns: a URL to send the search request to including the search parameters. """ # Have to generate a nonce and date to use in generating a GET request signature nonce = self._generate_nonce() date = int(time.time()) api_signature = self._generate_get_hmac_signature(method, nonce, date, request_params=request_params) return self._request_url(method, nonce, date, api_signature, request_params) def post_request(self, method, filename, request_params={}): """ Generate an API POST request string for an image upload search. The POST request string can be sent as is to issue the POST request to the API server. - `method`, API method being called. - `filename`, the filename of the image that is being searched for. - `request_params`, the list of search parameters. Returns: - `request_url`, the URL to send the search to. - `boundary`, the boundary to be used in the POST request. """ if filename is None or not len(str(filename).strip()): raise APIRequestError("Must specify an image to search for.") # Have to generate a boundary, nonce, and date to use in generating a POST # request signature boundary = mimetools.choose_boundary() nonce = self._generate_nonce() date = int(time.time()) api_signature = self._generate_post_hmac_signature( "search", boundary, nonce, date, filename, request_params=request_params) return self._request_url(method, nonce, date, api_signature, request_params), boundary
mit
-1,211,642,914,218,331,000
33.297959
104
0.590265
false
jrwdunham/old
onlinelinguisticdatabase/config/middleware.py
1
7016
# Copyright 2016 Joel Dunham # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pylons middleware initialization. .. module:: middleware :synopsis: middleware initialization. """ from beaker.middleware import SessionMiddleware from paste.cascade import Cascade from paste.registry import RegistryManager from paste.urlparser import StaticURLParser from paste.deploy.converters import asbool from pylons.middleware import ErrorHandler, StatusCodeRedirect from pylons.wsgiapp import PylonsApp from routes.middleware import RoutesMiddleware from onlinelinguisticdatabase.config.environment import load_environment import logging import pprint log = logging.getLogger(__name__) class HTML2JSONContentType(object): """Middleware transforms ``Content-Type: text/html`` headers to ``Content-Type: application/json``. """ def __init__(self, app): self.app = app def __call__(self, environ, start_response): def custom_start_response(status, headers, exc_info=None): new_headers = dict(headers) if dict(headers).get('Content-Type') == 'text/html; charset=utf-8': new_headers['Content-Type'] = 'application/json' # CORS stuff. See http://stackoverflow.com/questions/2771974/modify-headers-in-pylons-using-middleware try: origin = environ.get('HTTP_ORIGIN') except Exception, e: origin = 'http://dativebeta.lingsync.org' # In the test case, there will be no origin. So we set it to # *anything* here, so that WebTest's lint.py doesn't choke on # `None`. if not origin: origin = 'http://localhost:9000' # new_headers['Access-Control-Allow-Origin'] = 'http://localhost:9000' new_headers['Access-Control-Allow-Origin'] = origin # Use this header to indicate that cookies should be included in CORS requests. new_headers['Access-Control-Allow-Credentials'] = 'true' # What was here before: new_headers['Access-Control-Allow-Methods'] = 'OPTIONS, GET, POST' new_headers['Access-Control-Allow-Methods'] = ( 'GET, ' 'HEAD, ' 'POST, ' 'PUT, ' 'DELETE, ' 'TRACE, ' 'CONNECT, ' 'COPY, ' 'OPTIONS, ' 'SEARCH' ) # What was here before: new_headers['Access-Control-Allow-Headers'] = 'Content-Type, content-type, Depth, User-Agent, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control' new_headers['Access-Control-Allow-Headers'] = ( 'Content-Type, ' 'content-type, ' 'If-Modified-Since' ) # This causes the preflight result to be cached for specified milliseconds. # From LingSync's CouchDB config # NOTE: Comment out during development #new_headers['Access-Control-Max-Age'] = '12345' # Access-Control-Expose-Headers (optional) # The XMLHttpRequest 2 object has a getResponseHeader() method that # returns the value of a particular response header. During a CORS # request, the getResponseHeader() method can only access simple # response headers. Simple response headers are defined as follows: # # Cache-Control # Content-Language # Content-Type # Expires # Last-Modified # Pragma # # If you want clients to be able to access other headers, you have # to use the Access-Control-Expose-Headers header. The value of # this header is a comma-delimited list of response headers you # want to expose to the client. # NOTE: Commented this out for debuggin ... new_headers['Access-Control-Expose-Headers'] = \ 'Access-Control-Allow-Origin, Access-Control-Allow-Credentials' headers = new_headers.items() return start_response(status, headers, exc_info) return self.app(environ, custom_start_response) def make_app(global_conf, full_stack=False, static_files=True, **app_conf): """Create a Pylons WSGI application and return it ``global_conf`` The inherited configuration for this application. Normally from the [DEFAULT] section of the Paste ini file. ``full_stack`` Whether this application provides a full WSGI stack (by default, meaning it handles its own exceptions and errors). Disable full_stack when this application is "managed" by another WSGI middleware. ``static_files`` Whether this application serves its own static files; disable when another web server is responsible for serving them. ``app_conf`` The application's local configuration. Normally specified in the [app:<name>] section of the Paste ini file (where <name> defaults to main). """ # Configure the Pylons environment config = load_environment(global_conf, app_conf) # The Pylons WSGI app app = PylonsApp(config=config) # Routing/Session Middleware app = RoutesMiddleware(app, config['routes.map'], singleton=False) app = SessionMiddleware(app, config) # At some point it seems that Pylons converts the Content-Type of any # response without a 200 OK status to 'text/html; charset=utf-8'. Well # no more Pylons! The HTML2JSONContentType middleware zaps those # nasty text/html content types and converts them to application/json! app = HTML2JSONContentType(app) if asbool(full_stack): # Handle Python exceptions app = ErrorHandler(app, global_conf, **config['pylons.errorware']) # Display error documents for 401, 403, 404 status codes (and # 500 when debug is disabled) if asbool(config['debug']): app = StatusCodeRedirect(app) else: app = StatusCodeRedirect(app, [400, 401, 403, 404, 500]) # Establish the Registry for this application app = RegistryManager(app) if asbool(static_files): # Serve static files static_app = StaticURLParser(config['pylons.paths']['static_files']) app = Cascade([static_app, app]) app.config = config return app
apache-2.0
-2,579,050,878,312,153,000
37.762431
207
0.634122
false
mozilla/ichnaea
ichnaea/api/locate/source.py
1
2082
"""Base implementation of a search source.""" from functools import partial from ichnaea.api.locate.result import ( Position, PositionResultList, Region, RegionResultList, ) class Source(object): """ A source represents data from the same data source or collection effort, for example a GeoIP database or our own crowd-sourced data collection. """ fallback_field = None result_list = None result_type = None source = None def __init__(self, geoip_db, raven_client, redis_client, data_queues): self.geoip_db = geoip_db self.raven_client = raven_client self.redis_client = redis_client self.data_queues = data_queues self.result_type = partial( self.result_type, source=self.source, fallback=self.fallback_field ) def should_search(self, query, results): """ Given a query and possible results found by other sources, check if this source should attempt to perform a search. :param query: A query. :type query: :class:`~ichnaea.api.locate.query.Query` :param results: All results found by other sources. :type results: :class:`~ichnaea.api.locate.result.ResultList` :rtype: bool """ if self.fallback_field is not None: return bool(getattr(query.fallback, self.fallback_field, True)) return True def search(self, query): """Provide a type specific possibly empty result list. :param query: A query. :type query: :class:`~ichnaea.api.locate.query.Query` """ raise NotImplementedError() class PositionSource(Source): """ A PositionSource will return a position result with a latitude, a longitude and an accuracy in meters in it. """ result_list = PositionResultList result_type = Position class RegionSource(Source): """ A RegionSource will return a region result with a region name and code in it. """ result_list = RegionResultList result_type = Region
apache-2.0
-1,481,785,587,450,500,600
25.692308
78
0.645533
false
mruwek/topydo
topydo/lib/View.py
1
2563
# Topydo - A todo.txt client written in Python. # Copyright (C) 2014 - 2015 Bram Schoenmakers <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ A view is a list of todos, sorted and filtered. """ from topydo.lib.PrettyPrinterFilter import ( PrettyPrinterColorFilter, PrettyPrinterNumbers ) from topydo.lib.PrettyPrinter import PrettyPrinter class View(object): """ A view is instantiated by a todo list, usually obtained from a todo.txt file. Also a sorter and a list of filters should be given that is applied to the list. A printer can be passed, but it won't be used when pretty_print() is called, since it will instantiate its own pretty printer instance. """ def __init__(self, p_sorter, p_filters, p_todolist, p_printer=PrettyPrinter()): self._todolist = p_todolist self._viewdata = [] self._sorter = p_sorter self._filters = p_filters self._printer = p_printer self.update() def update(self): """ Updates the view data. Should be called when the backing todo list has changed. """ self._viewdata = self._sorter.sort(self._todolist.todos()) for _filter in self._filters: self._viewdata = _filter.filter(self._viewdata) def pretty_print(self, p_pp_filters=None): """ Pretty prints the view. """ p_pp_filters = p_pp_filters or [] # since we're using filters, always use PrettyPrinter printer = PrettyPrinter() printer.add_filter(PrettyPrinterNumbers(self._todolist)) for ppf in p_pp_filters: printer.add_filter(ppf) # apply colors at the last step, the ANSI codes may confuse the # preceding filters. printer.add_filter(PrettyPrinterColorFilter()) return printer.print_list(self._viewdata) def __str__(self): return self._printer.print_list(self._viewdata)
gpl-3.0
-628,074,714,298,006,500
33.635135
77
0.672259
false
eJon/enjoy
utils/profiler.py
1
1323
__author__ = 'Leo' import logging import time from functools import wraps def timer(f): logger = logging.getLogger("%s.%s" % (f.__module__, f.__name__)) @wraps(f) def wrapper(*a, **kw): t0 = time.time() r = f(*a, **kw) td = time.time() - t0 logger.info("took %0.2fs" % td) return r return wrapper class Stopwatch(object): def __init__(self, name='Stopwatch'): self.name = name self.start = time.time() self.ticks = [] def tick(self, name): self.ticks.append((name, time.time())) def stop(self): self.stop = time.time() def summary(self): """Return a summary of timing information.""" self.stop() total = self.stop - self.start s = "%s duration: %0.2f\n" % (self.name, total) prev = ("start", self.start) for tick in self.ticks: s += (" %s => %s" % (prev[0], tick[0])).ljust(30) + "... %0.2fs\n" % (tick[1] - prev[1]) prev = tick s += (" %s => end" % (tick[0])).ljust(30) + "... %0.2fs" % (self.stop - tick[1]) return s if __name__ == "__main__": @timer def test(): time.sleep(5) s = Stopwatch() test() s.tick("test1") test() s.tick("test2") test() print s.summary()
gpl-3.0
-6,228,505,810,644,120,000
23.072727
102
0.490552
false
Outernet-Project/librarian-library
librarian_library/importer.py
1
4423
import json import logging import os import re import uuid import gevent import scandir from bottle_utils.common import unicode, to_bytes, to_unicode ILLEGAL = re.compile(r'[\s!"#$%&\':()*\-/<=>?@\[\\\]^_`{|},.]+') FIRST_CHAR = re.compile(r'\w{1}', re.UNICODE) MAX_TITLE_LENGTH = 255 def find_content_dirs(basedir, meta_filenames, sleep_interval=0.01): for entry in scandir.scandir(basedir): if entry.is_dir(): for child in find_content_dirs(entry.path, meta_filenames): yield child else: filename = os.path.basename(entry.path) if filename in meta_filenames: yield os.path.dirname(entry.path) # when it resumes, abort exploration of the current folder # since it got removed in the meantime break # force context switch gevent.sleep(sleep_interval) def get_random_title(): return uuid.uuid4().hex def safe_title(source, delim=u' '): result = [] for word in ILLEGAL.split(source): if word: result.append(word) return unicode(delim.join(result)) def read_meta(basedir, meta_filenames): meta = None for filename in meta_filenames: meta_path = os.path.join(basedir, filename) if os.path.exists(meta_path): try: with open(meta_path, 'r') as meta_file: meta = json.load(meta_file) except Exception: continue return meta def upgrade_meta(meta): meta['gen'] = 1 meta['content'] = { 'html': { 'main': meta.pop('index', 'index.html'), 'keep_formatting': meta.pop('keep_formatting', False) } } for ignored in ('images', 'multipage'): meta.pop(ignored, None) def delete_old_meta(path, meta_filenames): if len(meta_filenames) > 1: for old_meta_filename in meta_filenames[1:]: old_meta_path = os.path.join(path, old_meta_filename) if os.path.exists(old_meta_path): os.remove(old_meta_path) def import_content(srcdir, destdir, meta_filenames, fsal, notifications, notifications_db): """Discover content directories under ``srcdir`` using the first generation folder structure and copy them into ``destdir``, while dropping the old nested structure and putting them into a single folder which name is generated from the slugified title of the content.""" srcdir = os.path.abspath(srcdir) if not os.path.exists(srcdir): logging.info(u"Content directory: {0} does not exist.".format(srcdir)) return logging.info(u"Starting content import of {0}".format(srcdir)) added = 0 for src_path in find_content_dirs(srcdir, meta_filenames): meta = read_meta(src_path, meta_filenames) if not meta: logging.error(u"Content import of {0} skipped. No valid metadata " "was found.".format(src_path)) continue # metadata couldn't be found or read, skip this item # process and save the found metadata upgrade_meta(meta) meta_path = os.path.join(src_path, meta_filenames[0]) with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) # delete any other meta files delete_old_meta(src_path, meta_filenames) # move content folder into library title = to_unicode(to_bytes(safe_title(meta['title']) or safe_title(meta['url']) or get_random_title())[:MAX_TITLE_LENGTH]) match = FIRST_CHAR.search(title) first_letter = (match.group() if match else None) or title[0] dest_path = os.path.join(destdir, first_letter.upper(), title) if not fsal.exists(dest_path, unindexed=True): (success, error) = fsal.transfer(src_path, dest_path) if not success: logging.error(u"Content import of {0} failed with " "{1}".format(src_path, error)) continue # adding to database will happen when we're notified by fsal about # the event added += 1 success_msg = "{0} content items imported from {1}.".format(added, srcdir) logging.info(success_msg) notifications.send(success_msg, db=notifications_db)
gpl-3.0
-2,436,103,556,933,083,600
34.384
79
0.595071
false
susurrant-audio/scdown
scdown/sc.py
1
3580
import soundcloud import os import logging from datetime import datetime import requests import sys from celeryconfig import mongolab import pymongo from pymongo import MongoClient from pymongo.errors import OperationFailure USER = '/users/{_id}' USER_TRACKS = '/users/{_id}/tracks' USER_FOLLOWINGS = '/users/{_id}/followings' USER_FOLLOWERS = '/users/{_id}/followers' USER_WEB_PROFILES = '/users/{_id}/web-profiles' TRACK = '/tracks/{_id}' TRACK_COMMENTS = '/tracks/{_id}/comments' TRACK_FAVORITERS = '/tracks/{_id}/favoriters' TRACK_DOWNLOAD = '/tracks/{_id}/download' TRACK_STREAM = '/tracks/{_id}/stream' class RequestDB(object): client = None db = None coll = None logger = None def __init__(self, db_name="soundcloud", logger=logging.getLogger("")): self.logger = logger self.client = MongoClient(mongolab) self.db = self.client[db_name] self.coll = self.db.requests try: self.coll.ensure_index([("key", pymongo.ASCENDING), ("unique", True)]) except OperationFailure as e: logger.error("Could not create index.") logger.error(e) def get(self, key): v = self.coll.find_one({"key": key}) if v is not None: return v["value"] else: return None def set(self, key, value): now = datetime.utcnow() doc = {"key": key, "value": value, "retrieved": now} self.coll.update({"key": key}, doc, upsert=True) self.logger.info("Stored {} in db".format(key)) def close(self): if self.db is not None: self.db.close() class Sc(object): _sc_client = None _db = None _logger = None def __init__(self, sc_client=None, db_name="soundcloud", logger=logging.getLogger("")): self._logger = logger if sc_client is None: sc_client_id = os.getenv('SOUNDCLOUD_CLIENT_ID') if sc_client_id is None: err = "SOUNDCLOUD_CLIENT_ID was not set!" self._logger.error(err) sys.exit(err) sc_client = soundcloud.Client(client_id=sc_client_id) self._sc_client = sc_client self._db = RequestDB(db_name, logger) def get_sc(self, template, _id=None): key = template.format(_id=_id) if _id is not None else template self._logger.info("GET {}".format(key)) value = self._db.get(key) if value is not None: return value else: if _id is None: res = self._sc_client.get(key, allow_redirects=False) track_url = res.location return requests.get(track_url, stream=True) else: res = self._sc_client.get(key) if hasattr(res, "data"): res1 = [dict(o.fields()) for o in res] self._db.set(key, res1) return res1 elif hasattr(res, "fields"): res1 = dict(res.fields()) self._logger.info(repr(res1)) self._db.set(key, res1) return res1 else: return res def __del__(self): if self._db is not None: self._db.close() def prefill_user(user_id): """Cache the basic info on a user""" sc = Sc(db_name="soundcloud") for t in [USER, USER_WEB_PROFILES, USER_FOLLOWINGS, USER_TRACKS, USER_FOLLOWERS]: sc.get_sc(t, user_id)
mit
-5,842,580,240,576,161,000
30.130435
75
0.547207
false
temmeand/scikit-rf
skrf/tests/test_network.py
1
31053
import unittest import os import tempfile import six import numpy as npy import six.moves.cPickle as pickle import skrf as rf from copy import deepcopy from nose.plugins.skip import SkipTest from skrf import setup_pylab from skrf.media import CPW from skrf.media import DistributedCircuit from skrf.constants import S_DEFINITIONS from skrf.networkSet import tuner_constellation from skrf.plotting import plot_contour class NetworkTestCase(unittest.TestCase): ''' Network class operation test case. The following is true, as tested by lihan in ADS, test3 == test1 ** test2 To test for 2N-port deembeding Meas, Fix and DUT are created such as: :: Meas == Fix ** DUT Meas Fix DUT +---------+ +---------+ +---------+ -|0 4|- -|0 4|---|0 4|- -|1 5|- = -|1 5|---|1 5|- -|2 6|- -|2 6|---|2 6|- -|3 7|- -|3 7|---|3 7|- +---------+ +---------+ +---------+ Note: ----- due to the complexity of inv computations, there will be an unavoidable precision loss. thus Fix.inv ** Meas will show a small difference with DUT. ''' def setUp(self): ''' this also tests the ability to read touchstone files without an error ''' setup_pylab() self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/' self.ntwk1 = rf.Network(os.path.join(self.test_dir, 'ntwk1.s2p')) self.ntwk2 = rf.Network(os.path.join(self.test_dir, 'ntwk2.s2p')) self.ntwk3 = rf.Network(os.path.join(self.test_dir, 'ntwk3.s2p')) self.freq = rf.Frequency(75,110,101,'ghz') self.cpw = CPW(self.freq, w=10e-6, s=5e-6, ep_r=10.6) l1 = self.cpw.line(0.20, 'm', z0=50) l2 = self.cpw.line(0.07, 'm', z0=50) l3 = self.cpw.line(0.47, 'm', z0=50) self.Fix = rf.concat_ports([l1, l1, l1, l1]) self.DUT = rf.concat_ports([l2, l2, l2, l2]) self.Meas = rf.concat_ports([l3, l3, l3, l3]) def test_timedomain(self): t = self.ntwk1.s11.s_time s = self.ntwk1.s11.s self.assertTrue(len(t)== len(s)) def test_time_gate(self): ntwk = self.ntwk1 gated = self.ntwk1.s11.time_gate(0,.2) self.assertTrue(len(gated)== len(ntwk)) def test_time_transform(self): spb = (4, 5) data_rate = 5e9 num_taps = (100, 101) for i in range(2): tps = 1. / spb[i] / data_rate num_points = spb[i] * num_taps[i] # Frequency terms should NOT contain Nyquist frequency if number of points is odd inc_nyq = True if num_points % 2 == 0 else False freq = npy.linspace(0, 1. / 2 / tps, num_points // 2 + 1, endpoint=inc_nyq) dut = self.ntwk1.copy() freq_valid = freq[npy.logical_and(freq >= dut.f[0], freq <= dut.f[-1])] dut.interpolate_self(rf.Frequency.from_f(freq_valid, unit='hz')) dut_dc = dut.extrapolate_to_dc() t, y = dut_dc.s21.impulse_response(n=num_points) self.assertEqual(len(t), num_points) self.assertEqual(len(y), num_points) self.assertTrue(npy.isclose(t[1] - t[0], tps)) t, y = dut_dc.s21.step_response(n=num_points) self.assertEqual(len(t), num_points) self.assertEqual(len(y), num_points) self.assertTrue(npy.isclose(t[1] - t[0], tps)) def test_constructor_empty(self): rf.Network() def test_constructor_from_values(self): rf.Network(f=[1,2],s=[1,2],z0=[1,2] ) def test_constructor_from_touchstone(self): rf.Network(os.path.join(self.test_dir, 'ntwk1.s2p')) def test_constructor_from_hfss_touchstone(self): # HFSS can provide the port characteric impedances in its generated touchstone file. # Check if reading a HFSS touchstone file with non-50Ohm impedances ntwk_hfss = rf.Network(os.path.join(self.test_dir, 'hfss_threeport_DB.s3p')) self.assertFalse(npy.isclose(ntwk_hfss.z0[0,0], 50)) def test_constructor_from_pickle(self): rf.Network(os.path.join(self.test_dir, 'ntwk1.ntwk')) def test_constructor_from_fid_touchstone(self): filename= os.path.join(self.test_dir, 'ntwk1.s2p') with open(filename,'rb') as fid: rf.Network(fid) def test_open_saved_touchstone(self): self.ntwk1.write_touchstone('ntwk1Saved',dir=self.test_dir) ntwk1Saved = rf.Network(os.path.join(self.test_dir, 'ntwk1Saved.s2p')) self.assertEqual(self.ntwk1, ntwk1Saved) os.remove(os.path.join(self.test_dir, 'ntwk1Saved.s2p')) def test_pickling(self): original_ntwk = self.ntwk1 with tempfile.NamedTemporaryFile(dir=self.test_dir, suffix='ntwk') as fid: pickle.dump(original_ntwk, fid, protocol=2) # Default Python2: 0, Python3: 3 fid.seek(0) unpickled = pickle.load(fid) self.assertEqual(original_ntwk, unpickled) def test_stitch(self): tmp = self.ntwk1.copy() tmp.f = tmp.f+ tmp.f[0] c = rf.stitch(self.ntwk1, tmp) def test_cascade(self): self.assertEqual(self.ntwk1 ** self.ntwk2, self.ntwk3) self.assertEqual(self.Fix ** self.DUT ** self.Fix.flipped(), self.Meas) def test_connect(self): self.assertEqual(rf.connect(self.ntwk1, 1, self.ntwk2, 0) , \ self.ntwk3) xformer = rf.Network() xformer.frequency=(1,) xformer.s = ((0,1),(1,0)) # connects thru xformer.z0 = (50,25) # transforms 50 ohm to 25 ohm c = rf.connect(xformer,0,xformer,1) # connect 50 ohm port to 25 ohm port self.assertTrue(npy.all(npy.abs(c.s-rf.impedance_mismatch(50, 25)) < 1e-6)) def test_connect_multiports(self): a = rf.Network() a.frequency=(1,) a.s = npy.arange(16).reshape(4,4) a.z0 = npy.arange(4) + 1 # Z0 should never be zero b = rf.Network() b.frequency=(1,) b.s = npy.arange(16).reshape(4,4) b.z0 = npy.arange(4)+10 c=rf.connect(a,2,b,0,2) self.assertTrue((c.z0==[1,2,12,13]).all()) d=rf.connect(a,0,b,0,3) self.assertTrue((d.z0==[4,13]).all()) def test_connect_fast(self): raise SkipTest('not supporting this function currently ') self.assertEqual(rf.connect_fast(self.ntwk1, 1, self.ntwk2, 0) , \ self.ntwk3) xformer = rf.Network() xformer.frequency=(1,) xformer.s = ((0,1),(1,0)) # connects thru xformer.z0 = (50,25) # transforms 50 ohm to 25 ohm c = rf.connect_fast(xformer,0,xformer,1) # connect 50 ohm port to 25 ohm port self.assertTrue(npy.all(npy.abs(c.s-rf.impedance_mismatch(50, 25)) < 1e-6)) def test_flip(self): self.assertEqual(rf.connect(self.ntwk1, 1, self.ntwk2, 0) , \ self.ntwk3) gain = rf.Network() gain.frequency=(1,) gain.s = ((0,2),(0.5,0)) # connects thru with gain of 2.0 gain.z0 = (37,82) flipped = gain.copy() flipped.flip() c = rf.connect(gain,1,flipped,0) self.assertTrue(npy.all(npy.abs(c.s - npy.array([[0,1],[1,0]])) < 1e-6)) def test_de_embed_by_inv(self): self.assertEqual(self.ntwk1.inv ** self.ntwk3, self.ntwk2) self.assertEqual(self.ntwk3 ** self.ntwk2.inv, self.ntwk1) self.assertEqual(self.Fix.inv ** self.Meas ** self.Fix.flipped().inv, self.DUT) def test_plot_one_port_db(self): self.ntwk1.plot_s_db(0,0) def test_plot_one_port_deg(self): self.ntwk1.plot_s_deg(0,0) def test_plot_one_port_smith(self): self.ntwk1.plot_s_smith(0,0) def test_plot_two_port_db(self): self.ntwk1.plot_s_db() def test_plot_two_port_deg(self): self.ntwk1.plot_s_deg() def test_plot_two_port_smith(self): self.ntwk1.plot_s_smith() def test_zy_singularities(self): open = rf.N(f=[1], s=[1], z0=[50]) short = rf.N(f=[1], s=[-1], z0=[50]) react = rf.N(f=[1],s=[[0,1],[1,0]],z0=50) z = open.z y = short.y a = react.y def test_conversions(self): #Converting to other format and back to S-parameters should return the original network s_random = npy.random.uniform(-10, 10, (self.freq.npoints, 2, 2)) + 1j * npy.random.uniform(-10, 10, (self.freq.npoints, 2, 2)) ntwk_random = rf.Network(s=s_random, frequency=self.freq) for test_z0 in (50, 10, 90+10j, 4-100j): for test_ntwk in (self.ntwk1, self.ntwk2, self.ntwk3, ntwk_random): ntwk = rf.Network(s=test_ntwk.s, f=test_ntwk.f, z0=test_z0) npy.testing.assert_allclose(rf.a2s(rf.s2a(ntwk.s, test_z0), test_z0), ntwk.s) npy.testing.assert_allclose(rf.z2s(rf.s2z(ntwk.s, test_z0), test_z0), ntwk.s) npy.testing.assert_allclose(rf.y2s(rf.s2y(ntwk.s, test_z0), test_z0), ntwk.s) npy.testing.assert_allclose(rf.h2s(rf.s2h(ntwk.s, test_z0), test_z0), ntwk.s) npy.testing.assert_allclose(rf.t2s(rf.s2t(ntwk.s)), ntwk.s) npy.testing.assert_allclose(rf.t2s(rf.s2t(self.Fix.s)), self.Fix.s) def test_sparam_conversion_with_complex_char_impedance(self): ''' Renormalize a 2-port network wrt to complex characteristic impedances using power-waves definition of s-param Example based on scikit-rf issue #313 ''' f0 = rf.Frequency(75.8, npoints=1, unit='GHz') s0 = npy.array([ [-0.194 - 0.228j, -0.721 + 0.160j], [-0.721 + 0.160j, +0.071 - 0.204j]]) ntw = rf.Network(frequency=f0, s=s0, z0=50, name='dut') # complex characteristic impedance to renormalize to zdut = 100 + 10j # reference solutions obtained from ANSYS Circuit or ADS (same res) # case 1: z0=[50, zdut] s_ref = npy.array([[ [-0.01629813-0.29764199j, -0.6726785 +0.24747539j], [-0.6726785 +0.24747539j, -0.30104687-0.10693578j]]]) npy.testing.assert_allclose(rf.z2s(ntw.z, z0=[50, zdut]), s_ref) npy.testing.assert_allclose(rf.renormalize_s(ntw.s, [50,50], [50,zdut]), s_ref) # case 2: z0=[zdut, zdut] s_ref = npy.array([[ [-0.402829859501534 - 0.165007172677339j,-0.586542065592524 + 0.336098534178339j], [-0.586542065592524 + 0.336098534178339j,-0.164707376748782 - 0.21617153431756j]]]) npy.testing.assert_allclose(rf.z2s(ntw.z, z0=[zdut, zdut]), s_ref) npy.testing.assert_allclose(rf.renormalize_s(ntw.s, [50,50], [zdut,zdut]), s_ref) # Compararing Z and Y matrices from reference ones (from ADS) # Z or Y matrices do not depend of characteristic impedances. # Precision is 1e-4 due to rounded results in ADS export files z_ref = npy.array([[ [34.1507 -65.6786j, -37.7994 +73.7669j], [-37.7994 +73.7669j, 55.2001 -86.8618j]]]) npy.testing.assert_allclose(ntw.z, z_ref, atol=1e-4) y_ref = npy.array([[ [0.0926 +0.0368j, 0.0770 +0.0226j], [0.0770 +0.0226j, 0.0686 +0.0206j]]]) npy.testing.assert_allclose(ntw.y, y_ref, atol=1e-4) def test_sparam_conversion_vs_sdefinition(self): ''' Check that power-wave or pseudo-waves scattering parameters definitions give same results for real characteristic impedances ''' f0 = rf.Frequency(75.8, npoints=1, unit='GHz') s_ref = npy.array([[ # random values [-0.1000 -0.2000j, -0.3000 +0.4000j], [-0.3000 +0.4000j, 0.5000 -0.6000j]]]) ntw = rf.Network(frequency=f0, s=s_ref, z0=50, name='dut') # renormalize s parameter according one of the definition. # As characteristic impedances are all real, should be all equal npy.testing.assert_allclose(ntw.s, s_ref) npy.testing.assert_allclose(rf.renormalize_s(ntw.s, 50, 50, s_def='power'), s_ref) npy.testing.assert_allclose(rf.renormalize_s(ntw.s, 50, 50, s_def='pseudo'), s_ref) npy.testing.assert_allclose(rf.renormalize_s(ntw.s, 50, 50, s_def='traveling'), s_ref) # also check Z and Y matrices, just in case z_ref = npy.array([[ [18.0000 -16.0000j, 20.0000 + 40.0000j], [20.0000 +40.0000j, 10.0000 -80.0000j]]]) npy.testing.assert_allclose(ntw.z, z_ref, atol=1e-4) y_ref = npy.array([[ [0.0251 +0.0023j, 0.0123 -0.0066j], [0.0123 -0.0066j, 0.0052 +0.0055j]]]) npy.testing.assert_allclose(ntw.y, y_ref, atol=1e-4) # creating network by specifying s-params definition ntw_power = rf.Network(frequency=f0, s=s_ref, z0=50, s_def='power') ntw_pseudo = rf.Network(frequency=f0, s=s_ref, z0=50, s_def='pseudo') ntw_legacy = rf.Network(frequency=f0, s=s_ref, z0=50, s_def='traveling') self.assertTrue(ntw_power == ntw_pseudo) self.assertTrue(ntw_power == ntw_legacy) def test_network_from_z_or_y(self): ' Construct a network from its z or y parameters ' # test for both real and complex char. impedance # and for 2 frequencies z0 = [npy.random.rand(), npy.random.rand()+1j*npy.random.rand()] freqs = npy.array([1, 2]) # generate arbitrary complex z and y z_ref = npy.random.rand(2,3,3) + 1j*npy.random.rand(2,3,3) y_ref = npy.random.rand(2,3,3) + 1j*npy.random.rand(2,3,3) # create networks from z or y and compare ntw.z to the reference # check that the conversions work for all s-param definitions for s_def in S_DEFINITIONS: ntwk = rf.Network(s_def=s_def) ntwk.z0 = rf.fix_z0_shape(z0, 2, 3) ntwk.f = freqs # test #1: define the network directly from z ntwk.z = z_ref npy.testing.assert_allclose(ntwk.z, z_ref) # test #2: define the network from s, after z -> s (s_def is important) ntwk.s = rf.z2s(z_ref, z0, s_def=s_def) npy.testing.assert_allclose(ntwk.z, z_ref) # test #3: define the network directly from y ntwk.y = y_ref npy.testing.assert_allclose(ntwk.y, y_ref) # test #4: define the network from s, after y -> s (s_def is important) ntwk.s = rf.y2s(y_ref, z0, s_def=s_def) npy.testing.assert_allclose(ntwk.y, y_ref) def test_z0_pure_imaginary(self): ' Test cases where z0 is pure imaginary ' # test that conversion to Z or Y does not give NaN for pure imag z0 for s_def in S_DEFINITIONS: ntwk = rf.Network(s_def=s_def) ntwk.z0 = npy.array([50j, -50j]) ntwk.f = npy.array([1000]) ntwk.s = npy.random.rand(1,2,2) + npy.random.rand(1,2,2)*1j self.assertFalse(npy.any(npy.isnan(ntwk.z))) self.assertFalse(npy.any(npy.isnan(ntwk.y))) def test_yz(self): tinyfloat = 1e-12 ntwk = rf.Network() ntwk.z0 = npy.array([28,75+3j]) ntwk.f = npy.array([1000, 2000]) ntwk.s = rf.z2s(npy.array([[[1+1j,5,11],[40,5,3],[16,8,9+8j]], [[1,20,3],[14,10,16],[27,18,-19-2j]]])) self.assertTrue((abs(rf.y2z(ntwk.y)-ntwk.z) < tinyfloat).all()) self.assertTrue((abs(rf.y2s(ntwk.y, ntwk.z0)-ntwk.s) < tinyfloat).all()) self.assertTrue((abs(rf.z2y(ntwk.z)-ntwk.y) < tinyfloat).all()) self.assertTrue((abs(rf.z2s(ntwk.z, ntwk.z0)-ntwk.s) < tinyfloat).all()) def test_mul(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) # operating on networks self.assertTrue( ((a*a).s == npy.array([[[-3+4j]],[[-7+24j]]])).all()) # operating on numbers self.assertTrue( ((2*a*2).s == npy.array([[[4+8j]],[[12+16j]]])).all()) # operating on list self.assertTrue( ((a*[1,2]).s == npy.array([[[1+2j]],[[6+8j]]])).all()) self.assertTrue( (([1,2]*a).s == npy.array([[[1+2j]],[[6+8j]]])).all()) def test_sub(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) # operating on networks self.assertTrue( ((a-a).s == npy.array([[[0+0j]],[[0+0j]]])).all()) # operating on numbers self.assertTrue( ((a-(2+2j)).s == npy.array([[[-1+0j]],[[1+2j]]])).all()) # operating on list self.assertTrue( ((a-[1+1j,2+2j]).s == npy.array([[[0+1j]],[[1+2j]]])).all()) def test_div(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) # operating on networks self.assertTrue( ((a/a).s == npy.array([[[1+0j]],[[1+0j]]])).all()) # operating on numbers self.assertTrue( ((a/2.).s == npy.array([[[.5+1j]],[[3/2.+2j]]])).all()) # operating on list self.assertTrue( ((a/[1,2]).s == npy.array([[[1+2j]],[[3/2.+2j]]])).all()) def test_add(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) # operating on networks self.assertTrue( ((a+a).s == npy.array([[[2+4j]],[[6+8j]]])).all()) # operating on numbers self.assertTrue( ((a+2+2j).s == npy.array([[[3+4j]],[[5+6j]]])).all()) # operating on list self.assertTrue( ((a+[1+1j,2+2j]).s == npy.array([[[2+3j]],[[5+6j]]])).all()) def test_interpolate(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) freq = rf.F.from_f(npy.linspace(1,2,4), unit='ghz') b = a.interpolate(freq) # TODO: numerically test for correct interpolation def test_interpolate_rational(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) freq = rf.F.from_f(npy.linspace(1,2,4), unit='ghz') b = a.interpolate(freq, kind='rational') # TODO: numerically test for correct interpolation def test_interpolate_self_npoints(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) a.interpolate_self_npoints(4) # TODO: numerically test for correct interpolation def test_interpolate_from_f(self): a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1) a.interpolate_from_f(npy.linspace(1,2,4), unit='ghz') # TODO: numerically test for correct interpolation def test_slicer(self): a = rf.Network(f=[1,2,4,5,6], s=[1,1,1,1,1], z0=50 ) b = a['2-5ghz'] tinyfloat = 1e-12 self.assertTrue((abs(b.frequency.f - [2e9,4e9,5e9]) < tinyfloat).all()) # Network classifiers def test_is_reciprocal(self): a = rf.Network(f=[1, 2], s=[[0, 1, 0], [0, 0, 1], [1, 0, 0]], z0=50) self.assertFalse(a.is_reciprocal(), 'A circulator is not reciprocal.') b = rf.Network(f=[1, 2], s=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]], z0=50) self.assertTrue(b.is_reciprocal(), 'This power divider is reciprocal.') return def test_is_symmetric(self): # 2-port a = rf.Network(f=[1, 2], s=[[-1, 0], [0, -1]], z0=50) self.assertTrue(a.is_symmetric(), 'A short is symmetric.') self.assertRaises(ValueError, a.is_symmetric, port_order={1: 2}) # error raised by renumber() a.s[0, 0, 0] = 1 self.assertFalse(a.is_symmetric(), 'non-symmetrical') # 3-port b = rf.Network(f=[1, 2], s=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]], z0=50) with self.assertRaises(ValueError) as context: b.is_symmetric() self.assertEqual(str(context.exception), 'test of symmetric is only valid for a 2N-port network') # 4-port c = rf.Network(f=[1, 2], s=[[0, 1j, 1, 0], [1j, 0, 0, 1], [1, 0, 0, 1j], [0, 1, 1j, 0]], z0=50) self.assertTrue(c.is_symmetric(n=2), 'This quadrature hybrid coupler is symmetric.') self.assertTrue(c.is_symmetric(n=2, port_order={0: 1, 1: 2, 2: 3, 3: 0}), 'This quadrature hybrid coupler is symmetric even after rotation.') with self.assertRaises(ValueError) as context: c.is_symmetric(n=3) self.assertEqual(str(context.exception), 'specified order n = 3 must be between 1 and N = 2, inclusive') d = rf.Network(f=[1, 2], s=[[1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 1], [0, 1, 0, 1]], z0=50) self.assertTrue(d.is_symmetric(n=1), 'This contrived non-reciprocal device has a line of symmetry.') self.assertFalse(d.is_symmetric(n=2), 'This device only has first-order line symmetry.') self.assertFalse(d.is_symmetric(port_order={0: 1, 1: 0}), 'This device is no longer symmetric after reordering ports 1 and 2.') self.assertTrue(d.is_symmetric(port_order={0: 1, 1: 0, 2: 3, 3: 2}), 'This device is symmetric after swapping ports 1 with 2 and 3 with 4.') # 6-port x = rf.Network(f=[1, 2], s=[[0, 0, 0, 0, 0, 0], [0, 1, 9, 0, 0, 0], [0, 0, 2, 0, 0, 0], [0, 0, 0, 2, 0, 0], [0, 0, 0, 9, 1, 0], [0, 0, 0, 0, 0, 0]], z0=50) self.assertFalse(x.is_symmetric(n=3)) self.assertFalse(x.is_symmetric(n=2)) self.assertTrue(x.is_symmetric(n=1)) self.assertTrue(x.is_symmetric(n=1, port_order={-3: -1, -1: -3, 0: 2, 2: 0})) # 8-port s8p_diag = [1j, -1j, -1j, 1j, 1j, -1j, -1j, 1j] s8p_mat = npy.identity(8, dtype=complex) for row in range(8): s8p_mat[row, :] *= s8p_diag[row] y = rf.Network(f=[1, 2], s=s8p_mat, z0=50) self.assertTrue(y.is_symmetric()) self.assertTrue(y.is_symmetric(n=2)) self.assertFalse(y.is_symmetric(n=4)) return def test_is_passive(self): a = rf.Network(f=[1, 2], s=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]], z0=50) self.assertTrue(a.is_passive(), 'This power divider is passive.') b = rf.Network(f=[1, 2], s=[[0, 0], [10, 0]], z0=50) self.assertFalse(b.is_passive(), 'A unilateral amplifier is not passive.') return def test_is_lossless(self): a = rf.Network(f=[1, 2], s=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]], z0=50) self.assertFalse(a.is_lossless(), 'A resistive power divider is lossy.') b = rf.Network(f=[1, 2], s=[[0, -1j/npy.sqrt(2), -1j/npy.sqrt(2)], [-1j/npy.sqrt(2), 1./2, -1./2], [-1j/npy.sqrt(2), -1./2, 1./2]], z0=50) self.assertTrue(b.is_lossless(), 'This unmatched power divider is lossless.') return def test_noise(self): a = rf.Network(os.path.join(self.test_dir,'ntwk_noise.s2p')) nf = 10**(0.05) self.assertTrue(a.noisy) self.assertTrue(abs(a.nfmin[0] - nf) < 1.e-6, 'noise figure does not match original spec') self.assertTrue(abs(a.z_opt[0] - 50.) < 1.e-6, 'optimal resistance does not match original spec') self.assertTrue(abs(a.rn[0] - 0.1159*50.) < 1.e-6, 'equivalent resistance does not match original spec') self.assertTrue(npy.all(abs(a.g_opt) < 1.e-6), 'calculated optimal reflection coefficient does not match original coefficients') b = rf.Network(f=[1, 2], s=[[[0, 1], [1, 0]], [[0, 1], [1, 0]]], z0=50).interpolate(a.frequency) with self.assertRaises(ValueError) as context: b.n with self.assertRaises(ValueError) as context: b.f_noise self.assertEqual(str(context.exception), 'network does not have noise') c = a ** b self.assertTrue(a.noisy) self.assertTrue(abs(c.nfmin[0] - nf) < 1.e-6, 'noise figure does not match original spec') self.assertTrue(abs(c.z_opt[0] - 50.) < 1.e-6, 'optimal resistance does not match original spec') self.assertTrue(abs(c.rn[0] - 0.1159*50.) < 1.e-6, 'equivalent resistance does not match original spec') d = b ** a self.assertTrue(d.noisy) self.assertTrue(abs(d.nfmin[0] - nf) < 1.e-6, 'noise figure does not match original spec') self.assertTrue(abs(d.z_opt[0] - 50.) < 1.e-6, 'optimal resistance does not match original spec') self.assertTrue(abs(d.rn[0] - 0.1159*50.) < 1.e-6, 'equivalent resistance does not match original spec') e = a ** a self.assertTrue(abs(e.nfmin[0] - (nf + (nf-1)/(10**2))) < 1.e-6, 'noise figure does not match Friis formula') self.assertTrue(a.noisy) self.assertTrue(abs(a.nfmin[0] - nf) < 1.e-6, 'noise figure was altered') self.assertTrue(abs(a.z_opt[0] - 50.) < 1.e-6, 'optimal resistance was altered') self.assertTrue(abs(a.rn[0] - 0.1159*50.) < 1.e-6, 'equivalent resistance was altered') tem = DistributedCircuit(z0=50) inductor = tem.inductor(1e-9).interpolate(a.frequency) f = inductor ** a expected_zopt = 50 - 2j*npy.pi*1e+9*1e-9 self.assertTrue(abs(f.z_opt[0] - expected_zopt) < 1.e-6, 'optimal resistance was not 50 ohms - inductor') return def test_noise_deembed(self): f1_ =[75.5, 75.5] ; f2_=[75.5, 75.6] ; npt_ = [1,2] # single freq and multifreq for f1,f2,npt in zip (f1_,f2_,npt_) : freq=rf.Frequency(f1,f2,npt,'ghz') ntwk4_n = rf.Network(os.path.join(self.test_dir,'ntwk4_n.s2p'), f_unit='GHz').interpolate(freq) ntwk4 = rf.Network(os.path.join(self.test_dir,'ntwk4.s2p'),f_unit='GHz').interpolate(freq) thru = rf.Network(os.path.join(self.test_dir,'thru.s2p'),f_unit='GHz').interpolate(freq) ntwk4_thru = ntwk4 ** thru ;ntwk4_thru.name ='ntwk4_thru' retrieve_thru = ntwk4.inv ** ntwk4_thru ;retrieve_thru.name ='retrieve_thru' self.assertEqual(retrieve_thru, thru) self.assertTrue(ntwk4_thru.noisy) self.assertTrue(retrieve_thru.noisy) self.assertTrue((abs(thru.nfmin - retrieve_thru.nfmin) < 1.e-6).all(), 'nf not retrieved by noise deembed') self.assertTrue((abs(thru.rn - retrieve_thru.rn) < 1.e-6).all(), 'rn not retrieved by noise deembed') self.assertTrue((abs(thru.z_opt - retrieve_thru.z_opt) < 1.e-6).all(), 'noise figure does not match original spec') ntwk4_n_thru = ntwk4_n ** thru ;ntwk4_n_thru.name ='ntwk4_n_thru' retrieve_n_thru = ntwk4_n.inv ** ntwk4_n_thru ;retrieve_n_thru.name ='retrieve_n_thru' self.assertTrue(ntwk4_n_thru.noisy) self.assertEqual(retrieve_n_thru, thru) self.assertTrue(ntwk4_n_thru.noisy) self.assertTrue(retrieve_n_thru.noisy) self.assertTrue((abs(thru.nfmin - retrieve_n_thru.nfmin) < 1.e-6).all(), 'nf not retrieved by noise deembed') self.assertTrue((abs(thru.rn - retrieve_n_thru.rn) < 1.e-6).all(), 'rn not retrieved by noise deembed') self.assertTrue((abs(thru.z_opt - retrieve_n_thru.z_opt) < 1.e-6).all(), 'noise figure does not match original spec') tuner, x,y,g = tuner_constellation() newnetw = thru.copy() nfmin_set=4.5; gamma_opt_set=complex(.7,-0.2); rn_set=1 newnetw.set_noise_a(thru.noise_freq, nfmin_db=nfmin_set, gamma_opt=gamma_opt_set, rn=rn_set ) z = newnetw.nfdb_gs(g)[:,0] freq = thru.noise_freq.f[0] gamma_opt_rb, nfmin_rb = plot_contour(freq,x,y,z, min0max1=0, graph=False) self.assertTrue(abs(nfmin_set - nfmin_rb) < 1.e-2, 'nf not retrieved by noise deembed') self.assertTrue(abs(gamma_opt_rb.s[0,0,0] - gamma_opt_set) < 1.e-1, 'nf not retrieved by noise deembed') def test_se2gmm2se_mag(self): ntwk4 = rf.Network(os.path.join(self.test_dir, 'cst_example_4ports.s4p')) ntwk4t = deepcopy(ntwk4) ntwk4t.se2gmm(p=2) ntwk4t.gmm2se(p=2) self.assertTrue(npy.allclose(abs(ntwk4.s), abs(ntwk4t.s), rtol=1E-7, atol=0)) # phase testing does not pass - see #367 #self.assertTrue(npy.allclose(npy.angle(ntwk4.s), npy.angle(ntwk4t.s), rtol=1E-7, atol=1E-10)) def test_s_active(self): ''' Test the active s-parameters of a 2-ports network ''' s_ref = self.ntwk1.s # s_act should be equal to s11 if a = [1,0] npy.testing.assert_array_almost_equal(rf.s2s_active(s_ref, [1, 0])[:,0], s_ref[:,0,0]) # s_act should be equal to s22 if a = [0,1] npy.testing.assert_array_almost_equal(rf.s2s_active(s_ref, [0, 1])[:,1], s_ref[:,1,1]) # s_act should be equal to s11 if a = [1,0] npy.testing.assert_array_almost_equal(self.ntwk1.s_active([1, 0])[:,0], s_ref[:,0,0]) # s_act should be equal to s22 if a = [0,1] npy.testing.assert_array_almost_equal(self.ntwk1.s_active([0, 1])[:,1], s_ref[:,1,1]) def test_vswr_active(self): ''' Test the active vswr-parameters of a 2-ports network ''' s_ref = self.ntwk1.s vswr_ref = self.ntwk1.s_vswr # vswr_act should be equal to vswr11 if a = [1,0] npy.testing.assert_array_almost_equal(rf.s2vswr_active(s_ref, [1, 0])[:,0], vswr_ref[:,0,0]) # vswr_act should be equal to vswr22 if a = [0,1] npy.testing.assert_array_almost_equal(rf.s2vswr_active(s_ref, [0, 1])[:,1], vswr_ref[:,1,1]) # vswr_act should be equal to vswr11 if a = [1,0] npy.testing.assert_array_almost_equal(self.ntwk1.vswr_active([1, 0])[:,0], vswr_ref[:,0,0]) # vswr_act should be equal to vswr22 if a = [0,1] npy.testing.assert_array_almost_equal(self.ntwk1.vswr_active([0, 1])[:,1], vswr_ref[:,1,1]) suite = unittest.TestLoader().loadTestsFromTestCase(NetworkTestCase) unittest.TextTestRunner(verbosity=2).run(suite)
bsd-3-clause
-4,573,243,307,892,667,400
44.332847
136
0.54758
false
arunchandramouli/fanofpython
code/features/decorators/dec7.py
1
1428
import logging import datetime ''' Set the logger ''' logging.basicConfig(level = logging.INFO) core_logger = logging.getLogger("Python") ''' Aim :: Write a plain decorator to execute a function ''' ''' A Decorator to execute the methods of the class ''' def execute(func): def inner(*args,**kargs): core_logger.info("Executing function - %s "%func.__name__) start = datetime.datetime.now() result = func(*args , **kargs) end = datetime.datetime.now() core_logger.info("Result == %s "%result) core_logger.info("Total time taken %s "%(start-end)) return result return inner ''' A Decorator to apply above decorator - execute to all methods of the class ''' def apply_class_decorator(klass): def inner(*args , **kwargs): for key , value in vars(klass).items(): try: setattr(value,key,execute) except AttributeError as I: continue return klass return inner ''' Define a Simple Class ''' @apply_class_decorator class Alpha(object): def __init__(instance , *args): instance.x = args def instancemethod(instance): return "I am an instance method" @classmethod def classmethod(klass): return "I am a Class Method" @staticmethod def staticmethod(): return "I am a Static Method" if __name__ == "__main__": instance_x = Alpha() print instance_x ,'\n' ,instance_x.classmethod() #,'\n',instance_x.staticmethod(),'\n'
gpl-3.0
-2,581,313,880,784,152,600
13.424242
89
0.654762
false
ianunruh/flask-api-skeleton
tests/routes/conftest.py
1
1233
import json import uuid import pytest from backend.app import app, db from backend.model import Session, User @pytest.fixture(scope='module') def client(): client = app.test_client() client.testing = True return client @pytest.fixture(scope='module') def test_user(): username = 'testuser-%s' % str(uuid.uuid4()) email = '[email protected]' password = 'password' user = User.query.filter_by(username=username).first() if not user: user = User(username=username, email=email) user.change_password(password) db.session.add(user) db.session.commit() yield user db.session.delete(user) db.session.commit() @pytest.fixture(scope='module') def session(client, test_user): params = { 'username': test_user.username, 'password': 'password', } resp = client.post('/sessions', data=json.dumps(params)) assert resp.status_code == 200 data = json.loads(resp.data.decode('utf-8')) assert data['token'] yield data # Clean up session Session.query.filter_by(token=data['token']).delete() @pytest.fixture(scope='module') def auth_headers(session): return { 'X-Auth-Token': session['token'], }
mit
8,456,106,094,660,362,000
21.418182
60
0.644769
false
DarthMaulware/EquationGroupLeaks
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/DataHandlers/Mcl_Cmd_Services_DataHandler.py
1
4885
# uncompyle6 version 2.9.10 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) # [GCC 6.2.0 20161005] # Embedded file name: Mcl_Cmd_Services_DataHandler.py def DataHandlerMain(namespace, InputFilename, OutputFilename): import mcl.imports import mcl.data.Input import mcl.data.Output import mcl.status import mcl.target import mcl.object.Message mcl.imports.ImportNamesWithNamespace(namespace, 'mca.survey.cmd.services', globals()) input = mcl.data.Input.GetInput(InputFilename) output = mcl.data.Output.StartOutput(OutputFilename, input) output.Start('Services', 'services', []) msg = mcl.object.Message.DemarshalMessage(input.GetData()) if input.GetStatus() != mcl.status.MCL_SUCCESS: errorMsg = msg.FindMessage(mcl.object.Message.MSG_KEY_RESULT_ERROR) moduleError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_MODULE) osError = errorMsg.FindU32(mcl.object.Message.MSG_KEY_RESULT_ERROR_OS) output.RecordModuleError(moduleError, osError, errorStrings) output.EndWithStatus(input.GetStatus()) return True from mcl.object.XmlOutput import XmlOutput xml = XmlOutput() xml.Start('Services') while msg.GetNumRetrieved() < msg.GetCount(): if mcl.CheckForStop(): output.EndWithStatus(mcl.target.CALL_FAILED) return False results = Result() results.Demarshal(msg) sub = xml.AddSubElement('Service') sub.AddAttribute('serviceName', results.name) sub.AddAttribute('displayName', results.displayName) subsub = sub.AddSubElement('State') subsub.AddAttribute('value', '%u' % results.serviceState) subsub.SetText(_getServiceState(results.serviceState)) subsub = sub.AddSubElement('ServiceType') subsub.AddAttribute('value', '0x%08x' % results.serviceType) if results.serviceType & RESULT_SERVICE_TYPE_OWN_PROCESS: subsub.AddSubElement('SERVICE_WIN32_OWN_PROCESS') if results.serviceType & RESULT_SERVICE_TYPE_SHARE_PROCESS: subsub.AddSubElement('SERVICE_WIN32_SHARE_PROCESS') if results.serviceType & RESULT_SERVICE_TYPE_KERNEL_DRIVER: subsub.AddSubElement('SERVICE_KERNEL_DRIVER') if results.serviceType & RESULT_SERVICE_TYPE_FILE_SYSTEM_DRIVER: subsub.AddSubElement('SERVICE_FILE_SYSTEM_DRIVER') if results.serviceType & RESULT_SERVICE_TYPE_INTERACTIVE_PROCESS: subsub.AddSubElement('SERVICE_INTERACTIVE_PROCESS') subsub = sub.AddSubElement('AcceptedCodes') subsub.AddAttribute('value', '0x%08x' % results.serviceControls) if results.serviceControls & RESULT_CONTROL_ACCEPT_STOP: subsub.AddSubElement('SERVICE_ACCEPT_STOP') if results.serviceControls & RESULT_CONTROL_ACCEPT_PAUSE_CONTINUE: subsub.AddSubElement('SERVICE_ACCEPT_PAUSE_CONTINUE') if results.serviceControls & RESULT_CONTROL_ACCEPT_SHUTDOWN: subsub.AddSubElement('SERVICE_ACCEPT_SHUTDOWN') if results.serviceControls & RESULT_CONTROL_ACCEPT_PARAMCHANGE: subsub.AddSubElement('SERVICE_ACCEPT_PARAMCHANGE') if results.serviceControls & RESULT_CONTROL_ACCEPT_NETBINDCHANGE: subsub.AddSubElement('SERVICE_ACCEPT_NETBINDCHANGE') if results.serviceControls & RESULT_CONTROL_ACCEPT_HARDWAREPROFILECHANGE: subsub.AddSubElement('SERVICE_ACCEPT_HARDWAREPROFILECHANGE') if results.serviceControls & RESULT_CONTROL_ACCEPT_POWEREVENT: subsub.AddSubElement('SERVICE_ACCEPT_POWEREVENT') if results.serviceControls & RESULT_CONTROL_ACCEPT_SESSIONCHANGE: subsub.AddSubElement('SERVICE_ACCEPT_SESSIONCHANGE') output.RecordXml(xml) output.EndWithStatus(mcl.target.CALL_SUCCEEDED) return True def _getServiceState(state): if state == RESULT_SERVICE_STATE_STOPPED: return 'STOPPED' else: if state == RESULT_SERVICE_STATE_START_PENDING: return 'START PENDING' if state == RESULT_SERVICE_STATE_STOP_PENDING: return 'STOP PENDING' if state == RESULT_SERVICE_STATE_RUNNING: return 'RUNNING' if state == RESULT_SERVICE_STATE_CONTINUE_PENDING: return 'CONTINUE PENDING' if state == RESULT_SERVICE_STATE_PAUSE_PENDING: return 'PAUSE PENDING' if state == RESULT_SERVICE_STATE_PAUSED: return 'PAUSED' return 'UNKNOWN' if __name__ == '__main__': import sys try: namespace, InputFilename, OutputFilename = sys.argv[1:] except: print '%s <namespace> <input filename> <output filename>' % sys.argv[0] sys.exit(1) if DataHandlerMain(namespace, InputFilename, OutputFilename) != True: sys.exit(-1)
unlicense
-2,288,969,511,820,265,700
45.09434
89
0.683112
false
aburrell/pysat
pysat/ssnl/avg.py
1
8524
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import """ Instrument independent seasonal averaging routine. Supports averaging 1D and 2D data. """ import pysat import numpy as np import pandas as pds import collections def median2D(inst, bin1, label1, bin2, label2, data_label, returnData=False): """Return a 2D average of data_label over a season and label1, label2. Parameters ---------- bin#: [min, max, number of bins] label#: string identifies data product for bin# data_label: list-like contains strings identifying data product(s) to be averaged Returns ------- median : dictionary 2D median accessed by data_label as a function of label1 and label2 over the season delineated by bounds of passed instrument objects. Also includes 'count' and 'avg_abs_dev' as well as the values of the bin edges in 'bin_x' and 'bin_y'. """ # create bins binx = np.linspace(bin1[0], bin1[1], bin1[2]+1) biny = np.linspace(bin2[0], bin2[1], bin2[2]+1) numx = len(binx)-1 numy = len(biny)-1 numz = len(data_label) # create array to store all values before taking median yarr = np.arange(numy) xarr = np.arange(numx) zarr = np.arange(numz) ans = [ [ [collections.deque() for i in xarr] for j in yarr] for k in zarr] # set up output arrays medianAns = [ [ [ None for i in xarr] for j in yarr] for k in zarr] countAns = [ [ [ None for i in xarr] for j in yarr] for k in zarr] devAns = [ [ [ None for i in xarr] for j in yarr] for k in zarr] # do loop to iterate over instrument season for inst in inst: # collect data in bins for averaging if len(inst.data) != 0: xind = np.digitize(inst.data[label1], binx)-1 for xi in xarr: xindex, = np.where(xind==xi) if len(xindex) > 0: yData = inst.data.iloc[xindex] yind = np.digitize(yData[label2], biny)-1 for yj in yarr: yindex, = np.where(yind==yj) if len(yindex) > 0: for zk in zarr: ans[zk][yj][xi].extend( yData.ix[yindex,data_label[zk]].tolist() ) # all of the loading and storing data is done # determine what kind of data is stored # if just numbers, then use numpy arrays to store data # if the data is a more generalized object, use lists to store data # need to find first bin with data dataType = [None for i in np.arange(numz)] for zk in zarr: breakNow=False for yj in yarr: for xi in xarr: if len(ans[zk][yj][xi]) > 0: dataType[zk] = type(ans[zk][yj][xi][0]) breakNow = True break if breakNow: break # determine if normal number objects are being used or if there # are more complicated objects objArray = [False]*len(zarr) for i,thing in enumerate(dataType): if thing == pds.core.series.Series: objArray[i] = 'S' elif thing == pds.core.frame.DataFrame: objArray[i] = 'F' else: # other, simple scalaRs objArray[i] = 'R' objArray = np.array(objArray) # if some pandas data series are returned in average, return a list objidx, = np.where(objArray == 'S') if len(objidx) > 0: for zk in zarr[objidx]: for yj in yarr: for xi in xarr: if len(ans[zk][yj][xi]) > 0: ans[zk][yj][xi] = list(ans[zk][yj][xi]) medianAns[zk][yj][xi] = pds.DataFrame(ans[zk][yj][xi] ).median(axis=0) countAns[zk][yj][xi] = len(ans[zk][yj][xi]) devAns[zk][yj][xi] = pds.DataFrame([abs(temp - medianAns[zk][yj][xi]) for temp in ans[zk][yj][xi] ] ).median(axis=0) # if some pandas DataFrames are returned in average, return a list objidx, = np.where(objArray == 'F') if len(objidx) > 0: for zk in zarr[objidx]: for yj in yarr: for xi in xarr: if len(ans[zk][yj][xi]) > 0: ans[zk][yj][xi] = list(ans[zk][yj][xi]) countAns[zk][yj][xi] = len(ans[zk][yj][xi]) test = pds.Panel.from_dict(dict([(i,temp) for i,temp in enumerate(ans[zk][yj][xi]) ]) ) medianAns[zk][yj][xi] = test.median(axis=0) devAns[zk][yj][xi] = (test.subtract(medianAns[zk][yj][xi], axis=0)).abs().median(axis=0, skipna=True) objidx, = np.where(objArray == 'R') if len(objidx) > 0: for zk in zarr[objidx]: medianAns[zk] = np.zeros((numy, numx))*np.nan countAns[zk] = np.zeros((numy, numx))*np.nan devAns[zk] = np.zeros((numy, numx))*np.nan for yj in yarr: for xi in xarr: # convert deque storing data into numpy array ans[zk][yj][xi] = np.array(ans[zk][yj][xi]) # filter out an NaNs in the arrays idx, = np.where(np.isfinite(ans[zk][yj][xi])) ans[zk][yj][xi] = (ans[zk][yj][xi])[idx] # perform median averaging if len(idx) > 0: medianAns[zk][yj,xi] = np.median(ans[zk][yj][xi]) countAns[zk][yj,xi] = len(ans[zk][yj][xi]) devAns[zk][yj,xi] = np.median(abs(ans[zk][yj][xi] - medianAns[zk][yj,xi])) # prepare output output = {} for i,label in enumerate(data_label): output[label] = {'median': medianAns[i], 'count':countAns[i], 'avg_abs_dev':devAns[i], 'bin_x': binx, 'bin_y': biny} if returnData: output[label]['data'] = ans[i] return output # simple averaging through multiple iterations def mean_by_day(inst, data_label): """Mean of data_label by day over Instrument.bounds Parameters ---------- data_label : string string identifying data product to be averaged Returns ------- mean : pandas Series simple mean of data_label indexed by day """ return _core_mean(inst, data_label, by_day=True) def mean_by_orbit(inst, data_label): """Mean of data_label by orbit over Instrument.bounds Parameters ---------- data_label : string string identifying data product to be averaged Returns ------- mean : pandas Series simple mean of data_label indexed by start of each orbit """ return _core_mean(inst, data_label, by_orbit=True) def mean_by_file(inst, data_label): """Mean of data_label by orbit over Instrument.bounds Parameters ---------- data_label : string string identifying data product to be averaged Returns ------- mean : pandas Series simple mean of data_label indexed by start of each file """ return _core_mean(inst, data_label, by_file=True) def _core_mean(inst, data_label, by_orbit=False, by_day=False, by_file=False): if by_orbit: iterator = inst.orbits elif by_day or by_file: iterator = inst else: raise ValueError('A choice must be made, by day, file, or orbit') # create empty series to hold result mean_val = pds.Series() # iterate over season, calculate the mean for inst in iterator: if not inst.data.empty: # compute mean absolute using pandas functions and store # data could be an image, or lower dimension, account for 2D and lower data = inst[data_label] data.dropna(inplace=True) if by_orbit or by_file: date = inst.data.index[0] else: date = inst.date # perform average mean_val[date] = pysat.utils.computational_form(data).mean(axis=0, skipna=True) del iterator return mean_val
bsd-3-clause
3,378,148,420,981,645,000
34.966245
140
0.530033
false
Signbank/BSL-signbank
signbank/tools.py
1
1122
import signbank.settings import os import shutil from zipfile import ZipFile from datetime import datetime, date import json import re from django.utils.translation import override from signbank.dictionary.models import * from signbank.dictionary.update import gloss_from_identifier from django.utils.dateformat import format from django.core.exceptions import ObjectDoesNotExist from django.core.urlresolvers import reverse def get_gloss_data(): glosses = Gloss.objects.all() gloss_data = {} for gloss in glosses: gloss_data[gloss.pk] = gloss.get_fields_dict() return gloss_data def create_zip_with_json_files(data_per_file,output_path): """Creates a zip file filled with the output of the functions supplied. Data should either be a json string or a list, which will be transformed to json.""" INDENTATION_CHARS = 4 zip = ZipFile(output_path,'w') for filename, data in data_per_file.items(): if isinstance(data,list) or isinstance(data,dict): output = json.dumps(data,indent=INDENTATION_CHARS) zip.writestr(filename+'.json',output)
bsd-3-clause
3,518,267,376,247,329,000
27.05
88
0.734403
false
jhmatthews/cobra
source/critical_density.py
1
2804
#! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python ''' critical_density.py This program calculates critical densities for ions ''' import pylab as pl import matplotlib.pyplot as plt import numpy as np import os, sys from constants import * import classes as cls #print ECS_CONSTANT def q12(line, T): '''calculate q12 for a line at temperature T''' term_a = 1.0*line.g_u / line.g_l term_b = q21(line, T) term_c = np.exp ( H_OVER_K * line.freq / T ) return term_a * term_b * term_c def q21(line, T): '''calculate q21 for a line at temperature T''' term_a = 1.0*line.g_l / line.g_u term_b = line.osc / (line.freq * np.sqrt(T) ) term_c = ECS_CONSTANT * 8.629e-9 return term_a * term_b * term_c def A21(line): '''calculate A21 for a given line''' term_a = (line.freq**2) * line.osc term_b = (1.0*line.g_l) / (1.0*line.g_u) term_c = A21_CONSTANT return term_a * term_b * term_c def read_line_info(filename): line_array_read = np.loadtxt(filename, comments='#', unpack = True, dtype = 'string') line_array_read = np.transpose(line_array_read) print line_array_read[0] line = np.ndarray( len(line_array_read),dtype=np.object) for i in range(len(line)): z = float (line_array_read[i][1]) ion = float (line_array_read[i][2]) wave = ANGSTROM * float (line_array_read[i][3]) freq = C / ( wave ) osc = float (line_array_read[i][4]) gl = int (line_array_read[i][5]) gu = int (line_array_read[i][6]) ll = int (line_array_read[i][9]) lu = int (line_array_read[i][10]) line[i] = cls.line(z, ion, wave, freq, osc, gl, gu, ll, lu) #line = cls.line(0,0,0,0,0) return line l = read_line_info ("data/atomic_macro/h20_lines.py") nentries = len(l) # wavelengths we want without decimals. # for the moment this is just Paschen alpha and Halpha reference_array = [6562, 18750] #reference_array = [2876109] # now we want to put the values associated with the lines into n=0 store = np.ndarray( len(l),dtype=np.object) for i in range(nentries): for j in range(len(reference_array)): if int(l[i].wavelength) == reference_array[j]: store[n] = l[i] n+=1 T=10000.0 # 10000K #print q12(l[19], T) # n is number of lines we want to look at for i in range(n): i_lev = store[i].lu qsum = 0 # sum of q coefficients Asum = 0 # sum of A coefficients for j in range(len(l)): if l[j].lu == i_lev: print A21(l[j]) print 'j < i: ', l[j].lu, l[j].ll, i_lev qsum += q21(l[j], T) Asum += A21(l[j]) if l[j].ll == i_lev and l[j].ll<5: print 'j != i: ', l[j].ll, l[j].lu, i_lev term = q12(l[j], T) qsum += q12(l[j], T) crit_density = Asum / qsum print "For wavelength %f critical density is %8.4e" % (store[i].wavelength, crit_density)
gpl-2.0
-6,040,408,780,806,114,000
17.447368
95
0.628031
false
peraktong/AnniesLasso
sandbox-scripts/mudbox_rgb.py
1
2766
""" Perform the 17-label training for a few Lambda parameters for the red giant branch sample that we normalized ourselves. """ import cPickle as pickle import numpy as np import os from astropy.table import Table import AnniesLasso as tc np.random.seed(123) # For reproducibility. #base_10_Lambda, scale_factor = sys.argv[1], sys.argv[2] #base_10_Lambda, scale_factor = 1, 0.5 print("USING BASE 10 {base_10_Lambda} AND SCALE FACTOR {scale_factor}".format( scale_factor=scale_factor, base_10_Lambda=base_10_Lambda)) # Data. PATH, CATALOG, FILE_FORMAT = ("/Users/arc/research/apogee/", "apogee-rg.fits", "apogee-rg-custom-normalization-{}.memmap") # Load the data. labelled_set = Table.read(os.path.join(PATH, CATALOG)) dispersion = np.memmap(os.path.join(PATH, FILE_FORMAT).format("dispersion"), mode="r", dtype=float) normalized_flux = np.memmap( os.path.join(PATH, FILE_FORMAT).format("flux"), mode="r", dtype=float).reshape((len(labelled_set), -1)) normalized_ivar = np.memmap( os.path.join(PATH, FILE_FORMAT).format("ivar"), mode="r", dtype=float).reshape(normalized_flux.shape) elements = [label_name for label_name in labelled_set.dtype.names \ if label_name not in ("PARAM_M_H", "SRC_H") and label_name.endswith("_H")] # Split up the data into ten random subsets. q = np.random.randint(0, 10, len(labelled_set)) % 10 validate_set = (q == 0) train_set = (~validate_set) # Create a vectorizer for all models. vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(labelled_set, tc.vectorizer.polynomial.terminator(["TEFF", "LOGG", "FE_H"], 2), scale_factor=scale_factor) # Create a model and train it on 9/10ths the subset. model = tc.L1RegularizedCannonModel(labelled_set[train_set], normalized_flux[train_set], normalized_ivar[train_set], dispersion=dispersion) model.vectorizer = vectorizer model.s2 = 0.0 model.regularization = 10**base_10_Lambda # Train it. model.train(fixed_scatter=True, use_neighbouring_pixel_theta=True) # Save it. filename = "apogee-rg-custom-{0:.2f}-{1:.2e}.model".format( base_10_Lambda, scale_factor) with open(filename, "wb") as fp: pickle.dump((model.theta, model.s2, model.regularization, model._metadata), fp, -1) # Fit the remaining set of normalized spectra (just as a check: we will need to # do this for the individual stuff too.) inferred_labels = model.fit(normalized_flux[validate_set], normalized_ivar[validate_set]) expected_labels = np.vstack([labelled_set[label_name][validate_set] \ for label_name in model.vectorizer.label_names]).T for i, label_name in enumerate(model.vectorizer.label_names): difference = expected_labels[:, i] - inferred_labels[:, i] print(i, label_name, np.median(difference), np.std(difference))
mit
1,824,675,092,095,901,400
33.575
89
0.718366
false
Sushant/cheeses
crdt/or_set.py
1
1932
from big_cheese import BigCheese from collections import defaultdict from .exceptions import ORSetException class ORSet(BigCheese): """ Set where an element may be added and removed any number of times. A unique tag is added for every add operation. On remove, all the tags in add set are copied to the remove set. An element is present iff all tags in add set are not in remove set. """ def __init__(self): self.e = defaultdict(lambda : defaultdict(set)) @classmethod def from_dict(cls, set_dict): obj = cls() if 'e' in set_dict: try: for item in set_dict['e']: obj.e[item[0]] = {'a': set(item[1]), 'r': set(item[2])} except IndexError: raise ORSetException('Failed to parse dict.') return obj def add(self, element): self.e[element]['a'].add(BigCheese.tag()) def remove(self, element): if element in self.e: self.e[element]['r'].update(self.e[element]['a']) def to_list(self): return [x for x in self.e.iterkeys() if self.e[x]['a'] - self.e[x]['r']] def lookup(self, element): if element in self.e: return len(self.e[element]['a'] - self.e[element]['r']) > 0 return False def merge(self, other): if not isinstance(other, self.__class__): raise ORSetException('Attempted to merge with different type.') all_keys = self.e.viewkeys() | other.e for k in all_keys: if k in other.e: # else defaultdict adds k to other.e self.e[k]['a'].update(other.e[k]['a']) self.e[k]['r'].update(other.e[k]['r']) def to_dict(self): return { 'type': self.crdt_type(), 'e': [[k, list(v['a']), list(v['r'])] for k, v in self.e.iteritems()] } def crdt_type(self): return 'or-set'
mit
-3,705,936,586,846,222,300
31.745763
81
0.556418
false
common-workflow-language/schema_salad
schema_salad/tests/test_java_codegen.py
1
2189
import shutil from pathlib import Path from typing import Any, Dict, List, Optional, Text, cast from schema_salad import codegen, ref_resolver from schema_salad.schema import load_schema from .util import get_data def test_cwl_gen(tmp_path: Path) -> None: topmed_example_path = get_data( "tests/test_real_cwl/topmed/topmed_variant_calling_pipeline.cwl" ) assert topmed_example_path target_dir = tmp_path / "target" examples_dir = tmp_path / "examples" target_dir.mkdir() examples_dir.mkdir() shutil.copyfile(topmed_example_path, examples_dir / "valid_topmed.cwl") java_codegen(cwl_file_uri, target_dir, examples=examples_dir) pom_xml_path = target_dir / "pom.xml" assert pom_xml_path.exists tests_dir = ( target_dir / "src" / "test" / "java" / "org" / "w3id" / "cwl" / "cwl" / "utils" ) assert tests_dir.exists() with open(tests_dir / "ExamplesTest.java") as f: assert "topmed" in f.read() def test_meta_schema_gen(tmp_path: Path) -> None: target_dir = tmp_path / "target" target_dir.mkdir() java_codegen(metaschema_file_uri, target_dir) pom_xml_path = target_dir / "pom.xml" assert pom_xml_path.exists() src_dir = target_dir / "src" / "main" / "java" / "org" / "w3id" / "cwl" / "salad" assert src_dir.exists() def get_data_uri(resource_path: str) -> str: path = get_data(resource_path) assert path return ref_resolver.file_uri(path) cwl_file_uri = get_data_uri("tests/test_schema/CommonWorkflowLanguage.yml") metaschema_file_uri = get_data_uri("metaschema/metaschema.yml") def java_codegen(file_uri: str, target: Path, examples: Optional[Path] = None) -> None: document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema( file_uri ) schema_raw_doc = metaschema_loader.fetch(file_uri) schema_doc, schema_metadata = metaschema_loader.resolve_all( schema_raw_doc, file_uri ) codegen.codegen( "java", cast(List[Dict[Text, Any]], schema_doc), schema_metadata, document_loader, target=str(target), examples=str(examples) if examples else None, )
apache-2.0
-8,933,504,229,783,142,000
30.724638
87
0.654637
false
B3AU/micropython
MAX6675.py
1
2333
__author__ = 'beau' import pyb from FIR import FIR class MAX6675(): def __init__(self,CS_pin='Y8',SO_pin='Y7',SCK_pin='Y6'): #Thermocouple self.CS_pin = pyb.Pin(CS_pin, pyb.Pin.OUT_PP) self.CS_pin.high() self.SO_pin = pyb.Pin(SO_pin, pyb.Pin.IN) self.SO_pin.low() self.SCK_pin = pyb.Pin(SCK_pin, pyb.Pin.OUT_PP) self.SCK_pin.low() self.last_read_time = 0 self.last_read_temp = 0 self.last_error_tc = 0 self.FIR = FIR(20) def read(self): # self.CS_pin.low() # pyb.delay(2) # self.CS_pin.high() # pyb.delay(220) #check if new reading should be available #if True: if pyb.millis()-self.last_read_time > 220: #/* # Bring CS pin low to allow us to read the data from # the conversion process #*/ self.CS_pin.low() #/* Cycle the clock for dummy bit 15 */ self.SCK_pin.high() pyb.delay(1) self.SCK_pin.high() #/* # Read bits 14-3 from MAX6675 for the Temp. Loop for each bit reading # the value and storing the final value in 'temp' # */ value = 0 for i in range(12): self.SCK_pin.high() read = self.SO_pin.value() read = (read << 12 - i) value += read self.SCK_pin.low() #/* Read the TC Input inp to check for TC Errors */ self.SCK_pin.high() error_tc = self.SO_pin.value() self.SCK_pin.low() # /* # Read the last two bits from the chip, faliure to do so will result # in erratic readings from the chip. # */ for i in range(2): self.SCK_pin.high() pyb.delay(1) self.SCK_pin.low() self.CS_pin.high() self.FIR.push(value) temp = (value * 0.25) self.last_read_time = pyb.millis() self.last_read_temp = temp self.last_error_tc = error_tc return temp,error_tc #to soon for new reading else: return self.last_read_temp,self.last_error_tc
lgpl-3.0
-3,548,526,313,916,181,500
26.139535
82
0.477497
false
konchan/rccontrol
src/esc.py
1
4377
#/usr/bin/env python # -*- coding: utf-8 -*- # ESC.py # Author: KONNO Katsuyuki <[email protected]> # Copyright (c) 2015 KONNO Katsuyuki # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import mraa import time import logging import datetime from threading import Thread, Event class Esc(Thread): def __init__(self, pin): super(Esc, self).__init__() self.NEUTRAL = 1300 self.FORWARD_MAX = 2000 self.BACKWARD_MAX = 500 self.LIMIT_SPEED = 100 self.f_step = int((self.FORWARD_MAX-self.NEUTRAL)/100) self.b_step = int((self.NEUTRAL-self.BACKWARD_MAX)/100) self.x = mraa.Pwm(pin) self.x.period_ms(20) self.x.enable(True) self.current_pulsewidth = self.NEUTRAL self.step = 100 self.wait_time = 0.2 self.thread_wait = 0.1 self.stop_event = Event() # logger self.log = logging.getLogger('Esc') self.log.setLevel(logging.INFO) logHandler = logging.FileHandler(datetime.datetime.now().strftime('Esc_%Y%m%d_%H%M%S.log')) logHandler.setFormatter(logging.Formatter('%(asctime)s %(message)s')) self.log.addHandler(logHandler) self.logpath = "" self.log.info('Starting Esc...') def calibrate_forward(self): self.log.info('calibrating forward, %i' % self.FORWARD_MAX) self.x.pulsewidth_us(self.FORWARD_MAX) def calibrate_backward(self): self.log.info('calibrating backward, %i' % self.BACKWARD_MAX) self.x.pulsewidth_us(self.BACKWARD_MAX) def calibrate_neutral(self): self.log.info('calibrating neutral, %i' % self.NEUTRAL) self.x.pulsewidth_us(self.NEUTRAL) def set_limit(self, speed): self.LIMIT_SPEED = speed def up_range(self, start, stop): r = start while r < stop: yield r r += self.step def down_range(self, start, stop): r = start while r > stop: yield r r -= self.step def forward(self, speed): if speed > self.LIMIT_SPEED: speed = self.LIMIT_SPEED val = speed * self.f_step + self.NEUTRAL if val > self.current_pulsewidth: range_ = self.up_range(self.current_pulsewidth, val) else: range_ = self.down_range(self.current_pulsewidth, val) for v in range_: self.x.pulsewidth_us(v) time.sleep(self.wait_time) self.current_pulsewidth = val self.log.info('set forward speed to %i' % speed) def backward(self, speed): if speed > self.LIMIT_SPEED: speed = self.LIMIT_SPEED val = self.NEUTRAL - speed * self.b_step if val > self.current_pulsewidth: range_ = self.up_range(self.current_pulsewidth, val) else: range_ = self.down_range(self.current_pulsewidth, val) for v in range_: self.x.pulsewidth_us(v) time.sleep(self.wait_time) self.current_pulsewidth = val self.log.info('set backward speed to %i' % speed) def stop(self): self.stop_event.set() def run(self): while True: if self.stop_event.is_set(): self.forward(0) self.log.info('terminating Esc...') break self.stop_event.wait(self.thread_wait)
mit
4,946,641,429,441,241,000
35.173554
99
0.632168
false
aoeftiger/tunespreadtool
tunespreadtool/parameter-table.py
1
1315
import numpy as np from os import system as ex def go_through_table(twiss_files, table, lshape): labels = ' --labels ' for actualsetting in table[1:]: flags = '' for i in xrange(len(table[0])): flags += ' --' + table[0][i] + ' ' + str(actualsetting[i]) ex('python tunespread.py ' + ' '.join(twiss_files) + labels + flags + ' --lshape ' + str(lshape)) labels = '' # === PSB === print "\nPSB:" # fixed parameters twiss_files = ['PSB-inj50MeV.tfs'] table = [ ['n_part', 'emit_norm_tr', 'Ekin', 'bunch_length', 'deltap'], [28.07e+11, 1.8e-6, 2, 180, 1.2e-3] ] go_through_table(twiss_files, table, np.sqrt(2*np.pi)*.35) # === PS === print "PS:" # fixed parameters twiss_files = ['PS-inj2GeV-s.tfs', 'PS-inj2GeV-data.tfs'] table = [ ['n_part', 'emit_norm_tr', 'Ekin', 'bunch_length', 'deltap'], [28.07e+11, 1.8e-6, 2, 180, 1.2e-3], [29.07e+11, 1.8e-6, 2, 180, 1.2e-3] ] go_through_table(twiss_files, table, 1.0) # === SPS === print "\nSPS:" # fixed parameters twiss_files = ['SPS-inj25GeV.tfs'] table = [ ['n_part', 'emit_norm_tr', 'Ekin', 'bunch_length', 'deltap'], [28.07e+11, 1.8e-6, 2, 180, 1.2e-3] ] go_through_table(twiss_files, table, 1.0)
apache-2.0
4,428,357,243,362,774,000
27.586957
82
0.532319
false
mrts/ask-jira
ask-jira.py
1
4516
#!/usr/bin/env python from __future__ import print_function import sys import pprint import argparse import inspect from jira.client import JIRA from lib import timetracking from lib import subissues from lib import export_import from lib import google_calendar from utils.smart_argparse_formatter import SmartFormatter import jiraconfig as conf # helpers def _make_jql_argument_parser(parser): parser.add_argument("jql", help="the JQL query used in the command") return parser # commands def projects(jira, args): """List available JIRA projects""" projects = jira.projects() print("Available JIRA projects:") pprint.pprint([project.name for project in projects]) def fields(jira, args): """List available JIRA field names and IDs""" print("Available JIRA fields (name, id):") pprint.pprint([(field['name'], field['id']) for field in jira.fields()]) def sum_timetracking_for_jql(jira, args): """Sum original estimate, time spent and time remaining for all issues that match the given JQL query""" results = timetracking.sum_timetracking_for_jql(jira, args.jql) pprint.pprint(results) sum_timetracking_for_jql.argparser = _make_jql_argument_parser def list_epics_stories_and_tasks_for_jql(jira, args): """Print a Markdown-compatible tree of epics, stories and subtasks that match the given JQL query""" results = subissues.list_epics_stories_and_tasks(jira, args.jql) print(results) list_epics_stories_and_tasks_for_jql.argparser = _make_jql_argument_parser def export_import_issues_for_jql(jira, args): """Export issues from one JIRA instance to another with comments and attachments""" import exportimportconfig exported_issues = export_import.export_import_issues(jira, exportimportconfig, args.jql) print('Successfully imported', exported_issues) export_import_issues_for_jql.argparser = _make_jql_argument_parser def import_worklogs_from_google_calendar(jira, args): """Import worklog entries from Google Calendar to corresponding JIRA tasks""" import worklogconfig hours = google_calendar.import_worklogs(jira, conf.JIRA['user'], worklogconfig, args.calendar, args.fromdate, args.todate) print('Logged', hours, 'hours') def _import_worklogs_argument_parser(parser): parser.add_argument("calendar", help="the calendar name to import " "worklogs from") parser.add_argument("fromdate", help="import date range start, " "in yyyy-mm-dd format") parser.add_argument("todate", help="import date range end, " "in yyyy-mm-dd format") return parser import_worklogs_from_google_calendar.argparser = _import_worklogs_argument_parser # main def _main(): command_name, command = _get_command() args = _parse_command_specific_arguments(command_name, command) jira = JIRA({'server': conf.JIRA['server']}, # add 'verify': False if HTTPS cert is untrusted basic_auth=(conf.JIRA['user'], conf.JIRA['password'])) command(jira, args) # helpers def _make_main_argument_parser(): parser = argparse.ArgumentParser(formatter_class=SmartFormatter) parser.add_argument("command", help="R|the command to run, available " + "commands:\n{0}".format(_list_local_commands())) return parser def _get_command(): argparser = _make_main_argument_parser() def print_help_and_exit(): argparser.print_help() sys.exit(1) if len(sys.argv) < 2: print_help_and_exit() command_name = sys.argv[1] if not command_name[0].isalpha(): print_help_and_exit() if command_name not in globals(): print("Invalid command: {0}\n".format(command_name), file=sys.stderr) print_help_and_exit() command = globals()[command_name] return command_name, command def _list_local_commands(): sorted_globals = list(globals().items()) sorted_globals.sort() commands = [(var, obj.__doc__) for var, obj in sorted_globals if not var.startswith('_') and inspect.isfunction(obj)] return "\n".join("'{0}': {1}".format(name, doc) for name, doc in commands) def _parse_command_specific_arguments(command_name, command): if not hasattr(command, 'argparser'): return None parser = argparse.ArgumentParser() parser.add_argument("command", help=command_name) command_argparser = command.argparser(parser) return command_argparser.parse_args() if __name__ == "__main__": _main()
mit
-6,575,328,291,031,200,000
32.954887
97
0.68977
false
exepulveda/swfc
python/clustering_swfc_bm_no_target.py
1
4590
import sys import random import logging import collections import math import sys import argparse sys.path += ['..'] import clusteringlib as cl import numpy as np import scipy.stats import clustering_ga from scipy.spatial.distance import pdist from sklearn.cluster import KMeans from cluster_utils import fix_weights CHECK_VALID = False from case_study_bm import attributes,setup_case_study_ore,setup_case_study_all,setup_distances if __name__ == "__main__": locations,data,min_values,max_values,scale,var_types,categories = setup_case_study_ore(a=0.999) N,ND = data.shape print(N,ND) #print(min_values) #print(max_values) #print(scale) seed = 1634120 #targets = np.asfortranarray(np.percentile(data[:,-1], [15,50,85]),dtype=np.float32) #var_types[-1] = 2 #print('targets',targets) m = 2.0 force=None verbose=1 lambda_value = 0.25 filename_template = "../results/bm_{tag}_swfc_{nc}_no_target.csv" ngen=300 npop=200 cxpb=0.8 mutpb=0.4 stop_after=20 NC = 3 np.random.seed(seed) random.seed(seed) cl.utils.set_seed(seed) cl.distances.sk_setup(np.asfortranarray(np.float32(scale))) cl.distances.set_variables(np.asfortranarray(np.int32(var_types)),False) distances_cat = np.asfortranarray(np.ones((3,3)),dtype=np.float32) distances_cat[0,0] = 0.0 distances_cat[1,1] = 0.0 distances_cat[2,2] = 0.0 cl.distances.set_categorical(1, 3,distances_cat) #cl.distances.set_targeted(23,targets,False) #force = (22,0.15) force = None #initial centroids at random indices = np.random.choice(N,size=NC,replace=False) current_centroids = np.asfortranarray(np.empty((NC,ND))) current_centroids[:,:] = data[indices,:] #initial weights are uniform weights = np.asfortranarray(np.ones((NC,ND),dtype=np.float32)/ ND) for c in range(NC): weights[c,:] = fix_weights(weights[c,:],force=force) for k in range(100): best_centroids,best_u,best_energy_centroids,best_jm,current_temperature,evals = clustering_ga.optimize_centroids( data, current_centroids, weights, m, lambda_value, var_types, {}, ngen=ngen,npop=npop,cxpb=cxpb,mutpb=mutpb,stop_after=stop_after, min_values = min_values, max_values = max_values, verbose=verbose) #print("centroids",best_centroids,best_energy_centroids,"jm",best_jm) u = best_u N,NC = u.shape clusters = np.argmax(u,axis=1) centroids = best_centroids.copy() #print("centroids",centroids) #print("u",u) counter = collections.Counter(clusters) #print("number of clusters: ",counter.most_common()) best_weights,best_u,best_energy_weights,evals = clustering_ga.optimize_weights( data, centroids, weights, m, lambda_value, ngen=ngen,npop=npop,cxpb=cxpb,mutpb=mutpb,stop_after=stop_after, force=force, verbose=verbose) clusters = np.argmax(best_u,axis=1) weights = best_weights.copy() current_centroids = best_centroids.copy() #print(lambda_value,k,best_energy_centroids,best_energy_weights,"jm",best_jm) print('iteration',k,best_energy_centroids,best_energy_weights) #save data new_data = np.c_[locations,clusters] np.savetxt(filename_template.format(tag='clusters',nc=NC),new_data,delimiter=",",fmt="%.4f") np.savetxt(filename_template.format(tag='centroids',nc=NC),current_centroids,delimiter=",",fmt="%.4f") np.savetxt(filename_template.format(tag='u',nc=NC),best_u,delimiter=",",fmt="%.4f") np.savetxt(filename_template.format(tag='weights',nc=NC),best_weights,delimiter=",",fmt="%.4f") if abs(best_energy_centroids - best_energy_weights) < 1e-2: break centroid = np.asfortranarray(best_centroids,dtype=np.float32) weights = np.asfortranarray(best_weights,dtype=np.float32) clusters = np.asfortranarray(clusters,dtype=np.int8) ret_fc = cl.clustering.dbi_index(centroid,data,clusters,weights) ret_sill= cl.clustering.silhouette_index(data,clusters,weights) print("DB Index:",NC,ret_fc,ret_sill,sep=',') cl.distances.reset()
gpl-3.0
3,975,870,756,333,899,000
29.197368
121
0.612418
false
luoguizhou/gooderp_addons
account_cost/tests/test_cost_order.py
1
4021
# -*- coding: utf-8 -*- from odoo.tests.common import TransactionCase from odoo.exceptions import UserError class TestCostOrder(TransactionCase): def setUp(self): super(TestCostOrder, self).setUp() self.cost_order_1 = self.env.ref('account_cost.cost_order_1') self.cost_order_1.partner_id = self.env.ref('core.zt') self.buy_order_1 = self.env.ref('buy.buy_order_1') self.buy_order_1.buy_order_done() self.receipt = self.env['buy.receipt'].search( [('order_id', '=', self.buy_order_1.id)]) self.sell_order_1 = self.env.ref('sell.sell_order_1') self.env.ref('sell.sell_order_line_1').tax_rate = 0 self.sell_order_1.sell_order_done() self.delivery = self.env['sell.delivery'].search( [('order_id', '=', self.sell_order_1.id)]) def test_cost_order_confim(self): ''' 测试 服务订单 审核 ''' # no name self.cost_order_1.name = '' self.cost_order_1.cost_order_confim() # 重复审核 with self.assertRaises(UserError): self.cost_order_1.cost_order_confim() def test_cost_order_confim_cancel_then_confirm(self): ''' 测试 服务订单 审核终止的订单''' self.cost_order_1.state = 'cancel' # 不能审核已中止的订单 with self.assertRaises(UserError): self.cost_order_1.cost_order_confim() def test_cost_order_confim_no_line(self): ''' 测试 服务订单 审核 没有明细行''' # no line_ids self.cost_order_1.line_ids.unlink() with self.assertRaises(UserError): self.cost_order_1.cost_order_confim() def test_cost_order_confim_has_prepayment_no_bank(self): ''' 测试 服务订单 审核 有预付没有结算账户 ''' # 有预付款,但没有结算账户 self.cost_order_1.prepayment = 10 with self.assertRaises(UserError): self.cost_order_1.cost_order_confim() def test_confim_generate_payment_order(self): ''' 测试 服务订单 审核 生成付款单 ''' self.cost_order_1.prepayment = 10 self.cost_order_1.bank_account_id = self.env.ref('core.alipay') self.cost_order_1.cost_order_confim() def test_cost_order_draft(self): ''' 测试 服务订单 审核 ''' self.cost_order_1.cost_order_confim() self.cost_order_1.cost_order_draft() # 重复反审核 with self.assertRaises(UserError): self.cost_order_1.cost_order_draft() def test_unlink(self): '''删除服务订单''' # 不能删除审核过的 self.cost_order_1.cost_order_confim() with self.assertRaises(UserError): self.cost_order_1.unlink() # 删除草稿的 self.cost_order_1.cost_order_draft() self.cost_order_1.unlink() def test_create_mv_cost(self): '''在所关联的入库单/发货单上创建费用行''' # 关联入库单 self.cost_order_1.wh_move_ids = [(4, self.receipt.buy_move_id.id)] self.cost_order_1.cost_order_confim() # 关联发货单 self.cost_order_1.cost_order_draft() self.cost_order_1.wh_move_ids = [(4, self.delivery.sell_move_id.id)] self.cost_order_1.cost_order_confim() def test_cost_order_draft_has_prepayment(self): '''反审核服务订单''' self.cost_order_1.prepayment = 20 self.cost_order_1.bank_account_id = self.env.ref('core.alipay') self.cost_order_1.cost_order_confim() self.cost_order_1.cost_order_draft() class TestCostOrderLine(TransactionCase): def setUp(self): super(TestCostOrderLine, self).setUp() self.cost_order_1 = self.env.ref('account_cost.cost_order_1') def test_compute_all_amount(self): '''计算价税合计''' self.cost_order_1.line_ids[0].amount = 100 self.assertAlmostEqual(self.cost_order_1.line_ids[0].subtotal, 110.0)
agpl-3.0
5,454,974,457,871,182,000
33.877358
77
0.601839
false
urinieto/msaf
msaf/exceptions.py
1
1169
'''Exception classes for msaf''' class MSAFError(Exception): '''The root msaf exception class''' pass class NoReferencesError(MSAFError): '''Exception class for trying evaluations without references''' pass class WrongFeaturesFormatError(MSAFError): '''Exception class for worngly formatted features files''' pass class NoFeaturesFileError(MSAFError): '''Exception class for missing features file''' pass class FeaturesNotFound(MSAFError): '''Exception class for missing specific features in a file''' pass class FeatureTypeNotFound(MSAFError): '''Exception class for feature type missing''' pass class FeatureParamsError(MSAFError): '''Exception class for feature parameters missing''' pass class NoAudioFileError(MSAFError): '''Exception class for audio file not found''' pass class NoHierBoundaryError(MSAFError): '''Exception class for missing hierarchical boundary algorithm''' pass class NoEstimationsError(MSAFError): '''Exception class for missing estimations''' pass class WrongAlgorithmID(MSAFError): '''This algorithm was not found in msaf''' pass
mit
-7,676,108,470,494,181,000
19.875
69
0.72284
false
SpiderLabs/deblaze
pyamf/pyamf/tests/gateway/test_django.py
2
4736
# -*- coding: utf-8 -*- # # Copyright (c) 2007-2009 The PyAMF Project. # See LICENSE for details. """ Django gateway tests. @since: 0.1.0 """ import unittest, sys, os from django import http from pyamf import remoting, util from pyamf.remoting.gateway import django as _django class HttpRequest(http.HttpRequest): """ Custom C{HttpRequest} to support raw_post_data provided by C{django.core.handlers.*} """ def __init__(self, *args, **kwargs): http.HttpRequest.__init__(self, *args, **kwargs) self.raw_post_data = '' class DjangoGatewayTestCase(unittest.TestCase): def setUp(self): import new self.mod_name = '%s.%s' % (__name__, 'settings') sys.modules[self.mod_name] = new.module(self.mod_name) self.old_env = os.environ.get('DJANGO_SETTINGS_MODULE', None) os.environ['DJANGO_SETTINGS_MODULE'] = self.mod_name def tearDown(self): if self.old_env is not None: os.environ['DJANGO_SETTINGS_MODULE'] = self.old_env del sys.modules[self.mod_name] def test_request_method(self): gw = _django.DjangoGateway() http_request = HttpRequest() http_request.method = 'GET' http_response = gw(http_request) self.assertEquals(http_response.status_code, 405) def test_bad_request(self): gw = _django.DjangoGateway() request = util.BufferedByteStream() request.write('Bad request') request.seek(0, 0) http_request = HttpRequest() http_request.method = 'POST' http_request.raw_post_data = request.getvalue() http_response = gw(http_request) self.assertEquals(http_response.status_code, 400) def test_unknown_request(self): gw = _django.DjangoGateway() request = util.BufferedByteStream() request.write('\x00\x00\x00\x00\x00\x01\x00\x09test.test\x00' '\x02/1\x00\x00\x00\x14\x0a\x00\x00\x00\x01\x08\x00\x00\x00\x00' '\x00\x01\x61\x02\x00\x01\x61\x00\x00\x09') request.seek(0, 0) http_request = HttpRequest() http_request.method = 'POST' http_request.raw_post_data = request.getvalue() http_response = gw(http_request) envelope = remoting.decode(http_response.content) message = envelope['/1'] self.assertEquals(message.status, remoting.STATUS_ERROR) body = message.body self.assertTrue(isinstance(body, remoting.ErrorFault)) self.assertEquals(body.code, 'Service.ResourceNotFound') def test_expose_request(self): http_request = HttpRequest() def test(request): self.assertEquals(http_request, request) gw = _django.DjangoGateway({'test.test': test}, expose_request=True) request = util.BufferedByteStream() request.write('\x00\x00\x00\x00\x00\x01\x00\x09test.test\x00' '\x02/1\x00\x00\x00\x05\x0a\x00\x00\x00\x00') request.seek(0, 0) http_request.method = 'POST' http_request.raw_post_data = request.getvalue() gw(http_request) def _raiseException(self, e, *args, **kwargs): raise e() def test_really_bad_decode(self): self.old_method = remoting.decode remoting.decode = lambda *args, **kwargs: self._raiseException(Exception, *args, **kwargs) http_request = HttpRequest() http_request.method = 'POST' http_request.raw_post_data = '' gw = _django.DjangoGateway() try: http_response = gw(http_request) except: remoting.decode = self.old_method raise remoting.decode = self.old_method self.assertTrue(isinstance(http_response, http.HttpResponseServerError)) self.assertEquals(http_response.status_code, 500) self.assertEquals(http_response.content, '500 Internal Server Error\n\nAn unexpected error occurred.') def test_expected_exceptions_decode(self): self.old_method = remoting.decode gw = _django.DjangoGateway() http_request = HttpRequest() http_request.method = 'POST' http_request.raw_post_data = '' try: for x in (KeyboardInterrupt, SystemExit): remoting.decode = lambda *args, **kwargs: self._raiseException(x, *args, **kwargs) self.assertRaises(x, gw, http_request) except: remoting.decode = self.old_method raise remoting.decode = self.old_method def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DjangoGatewayTestCase)) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
gpl-3.0
1,130,627,825,348,616,000
27.53012
110
0.620988
false
our-city-app/oca-backend
src/solutions/common/bizz/inbox.py
1
21833
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ import base64 import datetime import logging import os import time from StringIO import StringIO from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from types import NoneType import jinja2 from babel.dates import format_datetime from google.appengine.ext import deferred, db import solutions from mcfw.properties import long_property from mcfw.rpc import returns, arguments from rogerthat.bizz.communities.communities import get_community from rogerthat.bizz.rtemail import EMAIL_REGEX from rogerthat.consts import SCHEDULED_QUEUE, DAY, MC_DASHBOARD from rogerthat.dal import parent_key_unsafe from rogerthat.dal.app import get_app_by_id from rogerthat.dal.profile import get_service_profile from rogerthat.models import App from rogerthat.rpc import users from rogerthat.service.api import system from rogerthat.settings import get_server_settings from rogerthat.to.service import UserDetailsTO from rogerthat.utils import send_mail_via_mime, now, send_mail from rogerthat.utils.cloud_tasks import create_task, schedule_tasks from rogerthat.utils.models import reconstruct_key from rogerthat.utils.transactions import run_in_transaction from shop.constants import LOGO_LANGUAGES from shop.exceptions import InvalidEmailFormatException from solutions import translate as common_translate from solutions.common import SOLUTION_COMMON from solutions.common.bizz import SolutionModule, create_pdf from solutions.common.bizz.loyalty import update_user_data_admins from solutions.common.dal import get_solution_settings, get_solution_settings_or_identity_settings from solutions.common.models import SolutionInboxMessage, SolutionSettings from solutions.common.models.properties import SolutionUserTO from solutions.common.utils import create_service_identity_user_wo_default from solutions.jinja_extensions import TranslateExtension JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader([os.path.join(os.path.dirname(__file__), '..', 'templates')]), extensions=[TranslateExtension, ]) @returns(NoneType) @arguments(service_user=users.User, str_key=unicode, msg_params=dict) def _send_styled_inbox_forwarders_email_reminder(service_user, str_key, msg_params=None): m = SolutionInboxMessage.get(str_key) if m.deleted == False and m.trashed == False and m.starred == False and m.reply_enabled == True and not m.child_messages: send_styled_inbox_forwarders_email(service_user, str_key, msg_params, True) @returns(NoneType) @arguments(service_user=users.User, str_key=unicode, msg_params=dict, reminder=bool) def send_styled_inbox_forwarders_email(service_user, str_key, msg_params, reminder=False): m = SolutionInboxMessage.get(str_key) service_identity = m.service_identity sln_settings = get_solution_settings(service_user) sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity) def transl(key, **params): return common_translate(sln_settings.main_language, key, **params) chat_topic = transl(m.chat_topic_key) if m.category in (SolutionInboxMessage.CATEGORY_OCA_INFO, SolutionInboxMessage.CATEGORY_CITY_MESSAGE): subject = transl('there_is_a_new_message_in_your_inbox', name=m.get_sender().name) else: subject = transl('if-email-subject', function=chat_topic) if reminder: if not sln_i_settings.inbox_email_reminders_enabled: return subject = transl('inbox-forwarding-reminder-text', text=subject) settings = get_server_settings() service_profile = get_service_profile(service_user) community = get_community(service_profile.community_id) app = get_app_by_id(community.default_app) mimeRoot = MIMEMultipart('related') mimeRoot['Subject'] = subject mimeRoot['From'] = settings.senderEmail if app.type == App.APP_TYPE_ROGERTHAT else ( "%s <%s>" % (community.name, app.dashboard_email_address)) mimeRoot['To'] = ', '.join(sln_i_settings.inbox_mail_forwarders) mime = MIMEMultipart('alternative') mimeRoot.attach(mime) button_css = 'display: inline-block; margin-left: 0.5em; margin-right: 0.5em; -webkit-border-radius: 6px;' \ ' -moz-border-radius: 6px; border-radius: 6px; font-family: Arial; color: #ffffff; font-size: 14px;' \ ' background: #3abb9e; padding: 8px 16px 8px 16px; text-decoration: none;' if m.category in (SolutionInboxMessage.CATEGORY_OCA_INFO, SolutionInboxMessage.CATEGORY_CITY_MESSAGE): if_email_body_1 = u'%s %s' % ( transl('there_is_a_new_message_in_your_inbox', name=m.get_sender().name), transl('login_to_dashboard_to_view_message', name=m.get_sender().name)) if_email_body_2 = if_email_body_3_button = if_email_body_3_url = None else: if_email_body_1 = transl('if-email-body-1', if_name=msg_params['if_name'], function=chat_topic, app_name=community.name) if_email_body_2 = transl('if-email-body-2') dashboard_trans = transl('dashboard') service_email = sln_settings.login.email() if sln_settings.login else service_user.email() btn = u'<a href="https://dashboard.onzestadapp.be/customers/signin?email=%(service_email)s" style="%(button_css)s">%(dashboard)s</a>' % { 'service_email': service_email, 'button_css': button_css, 'dashboard': dashboard_trans } if_email_body_3_button = transl('if-email-body-3-button', dashboard_button=btn) if_email_body_3_url = transl('if-email-body-3-url', dashboard_url='https://dashboard.onzestadapp.be/customers/signin?email=%s' % service_email) if_email_footer_1 = transl('if-email-footer-1', service_name=sln_settings.name, app_name=app.name) if_email_footer_2 = transl('if-email-footer-2') if_email_footer_3 = transl('if-email-footer-3') if_email_footer_4 = transl('if-email-footer-4') if_email_footer_5 = transl('if-email-footer-5') if_email_footer_6 = transl('if-email-footer-6') html_params = { 'category': m.category, 'if_email_body_1': if_email_body_1, 'if_email_body_2': if_email_body_2, 'if_email_body_3_button': if_email_body_3_button, 'if_email_footer_1': if_email_footer_1, 'if_email_footer_2': if_email_footer_2, 'if_email_footer_3': if_email_footer_3, 'if_email_footer_4': if_email_footer_4, 'if_email_footer_5': if_email_footer_5, 'if_email_footer_6': if_email_footer_6 } text_params = { 'category': m.category, 'if_email_body_1': if_email_body_1, 'if_email_body_2': if_email_body_2, 'if_email_body_3_url': if_email_body_3_url, 'if_email_footer_1': if_email_footer_1, 'if_email_footer_2': if_email_footer_2, 'if_email_footer_3': if_email_footer_3, 'if_email_footer_4': if_email_footer_4, 'if_email_footer_5': if_email_footer_5, 'if_email_footer_6': if_email_footer_6 } body_html = JINJA_ENVIRONMENT.get_template('emails/inbox_forwarded_message_html.tmpl').render(html_params) body = JINJA_ENVIRONMENT.get_template('emails/inbox_forwarded_message.tmpl').render(text_params) mime.attach(MIMEText(body.encode('utf-8'), 'plain', 'utf-8')) mime.attach(MIMEText(body_html.encode('utf-8'), 'html', 'utf-8')) with open(os.path.join(os.path.dirname(solutions.__file__), 'common', 'templates', 'emails', 'oca-email-header.png'), 'r') as f: img_data = f.read() img = MIMEImage(img_data, 'png') img.add_header('Content-Id', '<osa-footer>') img.add_header("Content-Disposition", "inline", filename="Onze Stad App footer") mimeRoot.attach(img) send_mail_via_mime(settings.senderEmail, sln_i_settings.inbox_mail_forwarders, mimeRoot) # todo patch if not reminder: if str_key: deferred.defer(_send_styled_inbox_forwarders_email_reminder, service_user, str_key, msg_params, _countdown=60 * 60 * 24, _queue=SCHEDULED_QUEUE) else: logging.debug("Ignoring reminder (no str_key given)") @returns(SolutionInboxMessage) @arguments(service_user=users.User, service_identity=unicode, category=unicode, category_key=unicode, sent_by_service=bool, user_details=[UserDetailsTO], timestamp=(int, long), message=unicode, reply_enabled=bool, picture_urls=[unicode], video_urls=[unicode], mark_as_read=bool) def create_solution_inbox_message(service_user, service_identity, category, category_key, sent_by_service, user_details, timestamp, message, reply_enabled, picture_urls=None, video_urls=None, mark_as_read=False): sln_settings = get_solution_settings(service_user) service_identity_user = create_service_identity_user_wo_default(service_user, service_identity) sim_parent = SolutionInboxMessage(parent=parent_key_unsafe(service_identity_user, SOLUTION_COMMON)) sim_parent.category = category sim_parent.category_key = category_key sim_parent.last_timestamp = timestamp sim_parent.last_message = message sim_parent.parent_message_key = None sim_parent.reply_enabled = reply_enabled sim_parent.child_messages = [] sim_parent.sent_by_service = sent_by_service sim_parent.save_sender(SolutionUserTO.fromTO(user_details[0]) if user_details else None) sim_parent.message_key = None sim_parent.timestamp = timestamp sim_parent.message = message if picture_urls is None: picture_urls = [] sim_parent.picture_urls = picture_urls if video_urls is None: video_urls = [] sim_parent.video_urls = video_urls sim_parent.read = mark_as_read sim_parent.starred = False sim_parent.trashed = False sim_parent.deleted = False sim_parent.put() if not mark_as_read and SolutionModule.LOYALTY in sln_settings.modules: deferred.defer(update_user_data_admins, service_user, service_identity) return sim_parent def send_inbox_info_messages_to_services(service_users, sender, message, category=SolutionInboxMessage.CATEGORY_OCA_INFO): # type: (list[users.User], users.User, unicode, unicode) -> None from solutions.common.bizz.service import new_inbox_message sender_settings = get_solution_settings(sender) service_profile = get_service_profile(sender) sender_user_details = UserDetailsTO(email=sender.email(), name=sender_settings.name, avatar_url=service_profile.avatarUrl, language=service_profile.defaultLanguage) sln_settings_cache = {model.service_user: model for model in db.get([SolutionSettings.create_key(user) for user in service_users])} tasks = [create_task(new_inbox_message, sln_settings_cache[user], message, category=category, reply_enabled=False, send_to_forwarders=True, user_details=sender_user_details) for user in service_users] schedule_tasks(tasks) @returns(tuple) @arguments(service_user=users.User, key=unicode, sent_by_service=bool, user_details=[UserDetailsTO], timestamp=(int, long), message=unicode, picture_urls=[unicode], video_urls=[unicode], mark_as_unread=bool, mark_as_read=bool, mark_as_trashed=bool) def add_solution_inbox_message(service_user, key, sent_by_service, user_details, timestamp, message, picture_urls=None, video_urls=None, mark_as_unread=True, mark_as_read=False, mark_as_trashed=False): def trans(message, picture_urls, video_urls): sln_settings = get_solution_settings(service_user) sim_parent = SolutionInboxMessage.get(reconstruct_key(db.Key(key))) sim_reply = SolutionInboxMessage(parent=sim_parent) sim_reply.sent_by_service = sent_by_service sim_reply.save_sender(SolutionUserTO.fromTO(user_details[0]) if user_details else None) sim_reply.message_key = None sim_reply.parent_message_key = sim_parent.message_key sim_reply.timestamp = timestamp sim_reply.message = message if picture_urls is None: picture_urls = [] sim_reply.picture_urls = picture_urls if video_urls is None: video_urls = [] sim_reply.video_urls = video_urls sim_reply.put() sim_parent.child_messages.append(sim_reply.id) sim_parent.last_timestamp = timestamp if not message: if picture_urls: message = common_translate(sln_settings.main_language, '<Picture>') elif video_urls: message = common_translate(sln_settings.main_language, '<Video>') sim_parent.last_message = message if mark_as_unread: sim_parent.read = False if mark_as_read: sim_parent.read = True if mark_as_trashed: sim_parent.trashed = True else: sim_parent.trashed = False sim_parent.deleted = False sim_parent.put() if SolutionModule.LOYALTY in sln_settings.modules: if mark_as_read or mark_as_unread or mark_as_trashed: deferred.defer(update_user_data_admins, service_user, sim_parent.service_identity, _transactional=True) return sim_parent, sim_reply return run_in_transaction(trans, True, message, picture_urls, video_urls) class _MessagesExport(object): incomming_messages = long_property('1') child_messages = long_property('2') def __init__(self): self.incomming_messages = 0 self.child_messages = 0 @property def total_messages(self): return self.incomming_messages + self.child_messages @returns(tuple) @arguments(service_user=users.User, service_identity=unicode) def export_inbox_messages(service_user, service_identity): tmpl_path = 'pdfs/inbox_export.html' sln_settings = get_solution_settings(service_user) sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity) all_parent_messages = list(SolutionInboxMessage.get_all_by_service( service_user, service_identity, now() - DAY * 365)) all_messages = list() to_get = list() for parent_msg in all_parent_messages: to_get.extend([SolutionInboxMessage.create_key(child, parent_msg.key()) for child in parent_msg.child_messages]) inbox_messages_months_dict = dict() for i in range(1, 13): inbox_messages_months_dict[i] = _MessagesExport() parent_messages_children = dict() for child in SolutionInboxMessage.get(to_get): if child.parent_key() not in parent_messages_children: parent_messages_children[child.parent_key()] = list() parent_messages_children[child.parent_key()].append(child) for parent_msg in all_parent_messages: parent_msg.datetime = format_datetime(parent_msg.timestamp, format='EEEE d MMM y HH:mm', locale=sln_settings.main_language) parent_msg.message = parent_msg.message.replace('\n', '<br />').replace('\t', '&nbsp;&nbsp;') if parent_msg.trashed: parent_msg.inbox = SolutionInboxMessage.INBOX_NAME_TRASH elif parent_msg.starred: parent_msg.inbox = SolutionInboxMessage.INBOX_NAME_STARRED elif not parent_msg.read: parent_msg.inbox = SolutionInboxMessage.INBOX_NAME_UNREAD else: parent_msg.inbox = SolutionInboxMessage.INBOX_NAME_READ parent_msg.inbox = parent_msg.inbox.title() parent_msg.children = list() # xls file parent_msg_month = time.localtime(parent_msg.timestamp).tm_mon inbox_messages_months_dict[parent_msg_month].incomming_messages += 1 if parent_msg.key() in parent_messages_children: for child in parent_messages_children[parent_msg.key()]: child.datetime = format_datetime(child.timestamp, format='EEEE d M y HH:mm', locale=sln_settings.main_language) child.message = child.message.replace('\n', '<br />').replace('\t', '&nbsp;&nbsp;') if child.trashed: child.inbox = SolutionInboxMessage.INBOX_NAME_TRASH elif child.starred: child.inbox = SolutionInboxMessage.INBOX_NAME_STARRED elif not child.read: child.inbox = SolutionInboxMessage.INBOX_NAME_UNREAD else: child.inbox = SolutionInboxMessage.INBOX_NAME_READ child.inbox = child.inbox.title() parent_msg.children.append(child) child_msg_month = time.localtime(child.timestamp).tm_mon inbox_messages_months_dict[child_msg_month].child_messages += 1 all_messages.append(parent_msg) if sln_settings.main_language in LOGO_LANGUAGES: logo_path = 'templates/img/osa_white_' + sln_settings.main_language + '_250.jpg' else: logo_path = 'templates/img/osa_white_en_250.jpg' tmpl_variables = { 'sln_i_settings': sln_i_settings, 'messages': all_messages, 'logo_path': logo_path } source_html = JINJA_ENVIRONMENT.get_template(tmpl_path).render(tmpl_variables) html_dir = os.path.join(os.path.dirname(__file__), '..', 'templates') pdf = create_pdf(source_html, html_dir, "") message_statistics = create_message_statistics_excel(inbox_messages_months_dict, sln_settings.main_language) return pdf, message_statistics def create_message_statistics_excel(messages_months_dict, language): import xlwt # amount of messages per month # month | incoming messages | replies | total messages def transl(key): return common_translate(language, key) column_month = 0 column_messages = 1 column_replies = 2 column_total_messages = 3 bold_style = xlwt.XFStyle() bold_style.font.bold = True book = xlwt.Workbook(encoding="utf-8") # Excel has a 31 character limit for sheet names messages_sheet = book.add_sheet(transl('inbox_messages')[0:31]) messages_sheet.write(0, column_month, transl('month').title(), bold_style) messages_sheet.write(0, column_messages, transl('incoming_messages'), bold_style) messages_sheet.write(0, column_replies, transl('replies'), bold_style) messages_sheet.write(0, column_total_messages, transl('total_messages'), bold_style) messages_sheet.col(column_month).width = 5000 messages_sheet.col(column_messages).width = 5000 messages_sheet.col(column_replies).width = 5000 messages_sheet.col(column_total_messages).width = 5000 # start with last month, end with current month current_month = datetime.datetime.now().month i = 0 for i in xrange(1, len(messages_months_dict) + 1): month_string = format_datetime(time.mktime((2015, current_month, 1, 12, 0, 0, 0, 0, 0)), format='MMMM', locale=language) messages_sheet.write(i, column_month, month_string) messages_sheet.write(i, column_messages, messages_months_dict[current_month].incomming_messages) messages_sheet.write(i, column_replies, messages_months_dict[current_month].child_messages) messages_sheet.write(i, column_total_messages, messages_months_dict[current_month].total_messages) current_month -= 1 if current_month < 1: current_month = 12 excel_file = StringIO() book.save(excel_file) return excel_file.getvalue() def send_statistics_export_email(service_user, service_identity, email, sln_settings): if not EMAIL_REGEX.match(email): raise InvalidEmailFormatException(email) deferred.defer(_deferred_statistics_email_export, service_user, service_identity, sln_settings.main_language, email) def _deferred_statistics_email_export(service_user, service_identity, lang, email): users.set_user(service_user) try: messages_pdf, message_statistics_excel = export_inbox_messages(service_user, service_identity) flow_statistics_excel = base64.b64decode(system.export_flow_statistics(service_identity)) finally: users.clear_user() cur_date = format_datetime(now(), format='d-M-yyyy', locale=lang) subject = common_translate(lang, 'inbox_messages_export_for_date', date=cur_date) body_text = common_translate(lang, 'see_attachment_for_detailed_statistics') attachment_name_pdf = 'Inbox ' + cur_date + '.pdf' attachment_name_inbox_excel = 'Inbox messages ' + cur_date + '.xls' attachment_name_flow_statistics_excel = 'Flow statistics ' + cur_date + '.xls' attachments = [ (attachment_name_pdf, base64.b64encode(messages_pdf)), (attachment_name_inbox_excel, base64.b64encode(message_statistics_excel)), (attachment_name_flow_statistics_excel, base64.b64encode(flow_statistics_excel)) ] send_mail(MC_DASHBOARD.email(), email, subject, body_text, attachments=attachments)
apache-2.0
-9,131,324,451,917,731,000
46.670306
212
0.67856
false
chrislit/abydos
abydos/phonetic/_eudex.py
1
7225
# Copyright 2018-2020 by Christopher C. Little. # This file is part of Abydos. # # Abydos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Abydos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Abydos. If not, see <http://www.gnu.org/licenses/>. """abydos.phonetic._eudex. Eudex phonetic hash """ from ._phonetic import _Phonetic __all__ = ['Eudex'] class Eudex(_Phonetic): """Eudex hash. This implementation of eudex phonetic hashing is based on the specification (not the reference implementation) at :cite:`Ticki:2016`. Further details can be found at :cite:`Ticki:2016b`. .. versionadded:: 0.3.6 """ _trailing_phones = { 'a': 0, # a 'b': 0b01001000, # b 'c': 0b00001100, # c 'd': 0b00011000, # d 'e': 0, # e 'f': 0b01000100, # f 'g': 0b00001000, # g 'h': 0b00000100, # h 'i': 1, # i 'j': 0b00000101, # j 'k': 0b00001001, # k 'l': 0b10100000, # l 'm': 0b00000010, # m 'n': 0b00010010, # n 'o': 0, # o 'p': 0b01001001, # p 'q': 0b10101000, # q 'r': 0b10100001, # r 's': 0b00010100, # s 't': 0b00011101, # t 'u': 1, # u 'v': 0b01000101, # v 'w': 0b00000000, # w 'x': 0b10000100, # x 'y': 1, # y 'z': 0b10010100, # z 'ß': 0b00010101, # ß 'à': 0, # à 'á': 0, # á 'â': 0, # â 'ã': 0, # ã 'ä': 0, # ä[æ] 'å': 1, # å[oː] 'æ': 0, # æ[æ] 'ç': 0b10010101, # ç[t͡ʃ] 'è': 1, # è 'é': 1, # é 'ê': 1, # ê 'ë': 1, # ë 'ì': 1, # ì 'í': 1, # í 'î': 1, # î 'ï': 1, # ï 'ð': 0b00010101, # ð[ð̠](represented as a non-plosive T) 'ñ': 0b00010111, # ñ[nj](represented as a combination of n and j) 'ò': 0, # ò 'ó': 0, # ó 'ô': 0, # ô 'õ': 0, # õ 'ö': 1, # ö[ø] '÷': 0b11111111, # ÷ 'ø': 1, # ø[ø] 'ù': 1, # ù 'ú': 1, # ú 'û': 1, # û 'ü': 1, # ü 'ý': 1, # ý 'þ': 0b00010101, # þ[ð̠](represented as a non-plosive T) 'ÿ': 1, # ÿ } _initial_phones = { 'a': 0b10000100, # a* 'b': 0b00100100, # b 'c': 0b00000110, # c 'd': 0b00001100, # d 'e': 0b11011000, # e* 'f': 0b00100010, # f 'g': 0b00000100, # g 'h': 0b00000010, # h 'i': 0b11111000, # i* 'j': 0b00000011, # j 'k': 0b00000101, # k 'l': 0b01010000, # l 'm': 0b00000001, # m 'n': 0b00001001, # n 'o': 0b10010100, # o* 'p': 0b00100101, # p 'q': 0b01010100, # q 'r': 0b01010001, # r 's': 0b00001010, # s 't': 0b00001110, # t 'u': 0b11100000, # u* 'v': 0b00100011, # v 'w': 0b00000000, # w 'x': 0b01000010, # x 'y': 0b11100100, # y* 'z': 0b01001010, # z 'ß': 0b00001011, # ß 'à': 0b10000101, # à 'á': 0b10000101, # á 'â': 0b10000000, # â 'ã': 0b10000110, # ã 'ä': 0b10100110, # ä [æ] 'å': 0b11000010, # å [oː] 'æ': 0b10100111, # æ [æ] 'ç': 0b01010100, # ç [t͡ʃ] 'è': 0b11011001, # è 'é': 0b11011001, # é 'ê': 0b11011001, # ê 'ë': 0b11000110, # ë [ə] or [œ] 'ì': 0b11111001, # ì 'í': 0b11111001, # í 'î': 0b11111001, # î 'ï': 0b11111001, # ï 'ð': 0b00001011, # ð [ð̠] (represented as a non-plosive T) 'ñ': 0b00001011, # ñ [nj] (represented as a combination of n and j) 'ò': 0b10010101, # ò 'ó': 0b10010101, # ó 'ô': 0b10010101, # ô 'õ': 0b10010101, # õ 'ö': 0b11011100, # ö [œ] or [ø] '÷': 0b11111111, # ÷ 'ø': 0b11011101, # ø [œ] or [ø] 'ù': 0b11100001, # ù 'ú': 0b11100001, # ú 'û': 0b11100001, # û 'ü': 0b11100101, # ü 'ý': 0b11100101, # ý 'þ': 0b00001011, # þ [ð̠] (represented as a non-plosive T) 'ÿ': 0b11100101, # ÿ } def __init__(self, max_length: int = 8) -> None: """Initialize Eudex instance. Parameters ---------- max_length : int The length in bits of the code returned (default 8) .. versionadded:: 0.4.0 """ self._max_length = max_length def encode(self, word: str) -> str: """Return the eudex phonetic hash of a word. Parameters ---------- word : str The word to transform Returns ------- str The eudex hash Examples -------- >>> pe = Eudex() >>> pe.encode('Colin') '432345564238053650' >>> pe.encode('Christopher') '433648490138894409' >>> pe.encode('Niall') '648518346341351840' >>> pe.encode('Smith') '720575940412906756' >>> pe.encode('Schmidt') '720589151732307997' .. versionadded:: 0.3.0 .. versionchanged:: 0.3.6 Encapsulated in class .. versionchanged:: 0.6.0 Made return a str instead of int """ # Lowercase input & filter unknown characters word = ''.join( char for char in word.lower() if char in self._initial_phones ) if not word: word = '÷' # Perform initial eudex coding of each character values = [self._initial_phones[word[0]]] values += [self._trailing_phones[char] for char in word[1:]] # Right-shift by one to determine if second instance should be skipped shifted_values = [_ >> 1 for _ in values] condensed_values = [values[0]] for n in range(1, len(shifted_values)): if shifted_values[n] != shifted_values[n - 1]: condensed_values.append(values[n]) # Add padding after first character & trim beyond max_length values = ( [condensed_values[0]] + [0] * max(0, self._max_length - len(condensed_values)) + condensed_values[1 : self._max_length] ) # Combine individual character values into eudex hash hash_value = 0 for val in values: hash_value = (hash_value << 8) | val return str(hash_value) if __name__ == '__main__': import doctest doctest.testmod()
gpl-3.0
5,644,673,661,007,270,000
27.491935
79
0.474526
false
diofant/diofant
diofant/tests/polys/test_fields.py
1
9032
"""Test sparse rational functions.""" import pytest from diofant import (CC, QQ, ZZ, CoercionFailed, I, Rational, field, ring, sqrt, symbols) from diofant.polys.fields import FracElement __all__ = () def test_FractionField___init__(): F1 = ZZ.inject('x', 'y').field F2 = ZZ.inject('x', 'y').field F3 = ZZ.inject('x', 'y', 'z').field assert F1.x == F1.gens[0] assert F1.y == F1.gens[1] assert F1.x == F2.x assert F1.y == F2.y assert F1.x != F3.x assert F1.y != F3.y F4 = ZZ.inject('gens').field assert type(F4.gens) is tuple def test_FractionField___hash__(): F, x, y, z = field('x y z', QQ) assert hash(F) def test_FractionField___eq__(): assert field('x y z', QQ)[0] == field('x y z', QQ)[0] assert field('x y z', QQ)[0] is field('x y z', QQ)[0] assert field('x y z', QQ)[0] != field('x y z', ZZ)[0] assert field('x y z', QQ)[0] is not field('x y z', ZZ)[0] assert field('x y z', ZZ)[0] != field('x y z', QQ)[0] assert field('x y z', ZZ)[0] is not field('x y z', QQ)[0] assert field('x y z', QQ)[0] != field('x y', QQ)[0] assert field('x y z', QQ)[0] is not field('x y', QQ)[0] assert field('x y', QQ)[0] != field('x y z', QQ)[0] assert field('x y', QQ)[0] is not field('x y z', QQ)[0] def test_FractionField_methods(): F = ZZ.inject('x').field assert F.domain_new(2) == ZZ(2) x = symbols('x') assert F(x**2 + x) == F.x**2 + F.x def test_FracElement___hash__(): F, x, y, z = field('x y z', QQ) assert hash(x*y/z) def test_FracElement_copy(): F, x, y, z = field('x y z', ZZ) f = x*y/3*z g = f.copy() assert f == g g.numerator[(1, 1, 1)] = 7 assert f != g def test_FracElement_as_expr(): F, x, y, z = field('x y z', ZZ) f = (3*x**2*y - x*y*z)/(7*z**3 + 1) X, Y, Z = F.symbols g = (3*X**2*Y - X*Y*Z)/(7*Z**3 + 1) assert f != g assert F.to_expr(f) == g X, Y, Z = symbols('x y z') g = (3*X**2*Y - X*Y*Z)/(7*Z**3 + 1) assert f != g def test_FracElement_from_expr(): x, y, z = symbols('x y z') F, X, Y, Z = field((x, y, z), ZZ) f = F.convert(1) assert f == 1 and isinstance(f, F.dtype) f = F.convert(Rational(3, 7)) assert f == F(3)/7 and isinstance(f, F.dtype) f = F.convert(x) assert f == X and isinstance(f, F.dtype) f = F.convert(Rational(3, 7)*x) assert f == 3*X/7 and isinstance(f, F.dtype) f = F.convert(1/x) assert f == 1/X and isinstance(f, F.dtype) f = F.convert(x*y*z) assert f == X*Y*Z and isinstance(f, F.dtype) f = F.convert(x*y/z) assert f == X*Y/Z and isinstance(f, F.dtype) f = F.convert(x*y*z + x*y + x) assert f == X*Y*Z + X*Y + X and isinstance(f, F.dtype) f = F.convert((x*y*z + x*y + x)/(x*y + 7)) assert f == (X*Y*Z + X*Y + X)/(X*Y + 7) and isinstance(f, F.dtype) f = F.convert(x**3*y*z + x**2*y**7 + 1) assert f == X**3*Y*Z + X**2*Y**7 + 1 and isinstance(f, F.dtype) pytest.raises(CoercionFailed, lambda: F.convert(2**x)) pytest.raises(CoercionFailed, lambda: F.convert(7*x + sqrt(2))) F, X, Y = field((2**x, y), ZZ) f = F.convert(2**(2*x) + 1) assert f == X**2 + 1 # issue sympy/sympy#20985 F, X = field(x, CC) assert F.convert(I/x) == F.convert(CC(0, 1))/X def test_FracElement_to_poly(): F, x, y = field('x y', ZZ) pytest.raises(ValueError, lambda: (x/y).to_poly()) def test_FracElement__pos_neg__(): F, x, y = field('x y', QQ) f = (7*x - 9)/y g = (-7*x + 9)/y assert +f == f assert +g == g assert -f == g assert -g == f def test_FracElement___add__(): F, x, y = field('x y', QQ) f, g = 1/x, 1/y assert f + g == g + f == (x + y)/(x*y) z = symbols('z') pytest.raises(TypeError, lambda: x + z) assert x + F.ring.gens[0] == F.ring.gens[0] + x == 2*x F, x, y = field('x y', ZZ) assert x + 3 == 3 + x assert x + QQ(3, 7) == QQ(3, 7) + x == (7*x + 3)/7 Fuv, u, v = field('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Fuv) f = (u*v + x)/(y + u*v) assert dict(f.numerator) == {(1, 0, 0, 0): 1, (0, 0, 0, 0): u*v} assert dict(f.denominator) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): u*v} Ruv, u, v = ring('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Ruv) f = (u*v + x)/(y + u*v) assert dict(f.numerator) == {(1, 0, 0, 0): 1, (0, 0, 0, 0): u*v} assert dict(f.denominator) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): u*v} def test_FracElement___sub__(): F, x, y = field('x y', QQ) f, g = 1/x, 1/y assert f - g == (-x + y)/(x*y) assert x - F.ring.gens[0] == F.ring.gens[0] - x == 0 F, x, y = field('x y', ZZ) assert x - 3 == -(3 - x) assert x - QQ(3, 7) == -(QQ(3, 7) - x) == (7*x - 3)/7 Fuv, u, v = field('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Fuv) f = (u*v - x)/(y - u*v) assert dict(f.numerator) == {(1, 0, 0, 0): -1, (0, 0, 0, 0): u*v} assert dict(f.denominator) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): -u*v} Ruv, u, v = ring('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Ruv) f = (u*v - x)/(y - u*v) assert dict(f.numerator) == {(1, 0, 0, 0): -1, (0, 0, 0, 0): u*v} assert dict(f.denominator) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): -u*v} Fuv, u, v = field('u v', ZZ) Rxyz, x, y, z = ring('x y z', Fuv) f = u - x assert dict(f) == {(0, 0, 0): u, (1, 0, 0): -Fuv.one} def test_FracElement___mul__(): F, x, y = field('x y', QQ) f, g = 1/x, 1/y assert f*g == g*f == 1/(x*y) assert x*F.ring.gens[0] == F.ring.gens[0]*x == x**2 F, x, y = field('x y', ZZ) assert x*3 == 3*x assert x*QQ(3, 7) == QQ(3, 7)*x == 3*x/7 Fuv, u, v = field('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Fuv) f = ((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1) assert dict(f.numerator) == {(1, 1, 0, 0): u + 1, (0, 0, 0, 0): 1} assert dict(f.denominator) == {(0, 0, 1, 0): v - 1, (0, 0, 0, 1): -u*v, (0, 0, 0, 0): -1} Ruv, u, v = ring('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Ruv) f = ((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1) assert dict(f.numerator) == {(1, 1, 0, 0): u + 1, (0, 0, 0, 0): 1} assert dict(f.denominator) == {(0, 0, 1, 0): v - 1, (0, 0, 0, 1): -u*v, (0, 0, 0, 0): -1} def test_FracElement___truediv__(): F, x, y = field('x y', QQ) f, g = 1/x, 1/y assert f/g == y/x assert x/F.ring.gens[0] == F.ring.gens[0]/x == 1 F, x, y = field('x y', ZZ) assert x*3 == 3*x assert x/QQ(3, 7) == (QQ(3, 7)/x)**-1 == 7*x/3 pytest.raises(ZeroDivisionError, lambda: x/0) pytest.raises(ZeroDivisionError, lambda: 1/(x - x)) pytest.raises(ZeroDivisionError, lambda: x/(x - x)) Fuv, u, v = field('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Fuv) f = (u*v)/(x*y) assert dict(f.numerator) == {(0, 0, 0, 0): u*v} assert dict(f.denominator) == {(1, 1, 0, 0): 1} g = (x*y)/(u*v) assert dict(g.numerator) == {(1, 1, 0, 0): 1} assert dict(g.denominator) == {(0, 0, 0, 0): u*v} Ruv, u, v = ring('u v', ZZ) Fxyzt, x, y, z, t = field('x y z t', Ruv) f = (u*v)/(x*y) assert dict(f.numerator) == {(0, 0, 0, 0): u*v} assert dict(f.denominator) == {(1, 1, 0, 0): 1} g = (x*y)/(u*v) assert dict(g.numerator) == {(1, 1, 0, 0): 1} assert dict(g.denominator) == {(0, 0, 0, 0): u*v} Fuv, u, v = field('u v', ZZ) Rxyz, x, y, z = ring('x y z', Fuv) pytest.raises(TypeError, lambda: u/x) def test_FracElement___pow__(): F, x, y = field('x y', QQ) f, g = 1/x, 1/y assert f**3 == 1/x**3 assert g**3 == 1/y**3 assert (f*g)**3 == 1/(x**3*y**3) assert (f*g)**-3 == (x*y)**3 pytest.raises(ZeroDivisionError, lambda: (x - x)**-3) def test_FracElement_diff(): F, x, y, z = field('x y z', ZZ) assert ((x**2 + y)/(z + 1)).diff(x) == 2*x/(z + 1) F, x, y = field('x y', QQ.algebraic_field(I)) assert ((x - y)/x).diff(x) == y/x**2 def test_FracElement___call__(): F, x, y, z = field('x y z', ZZ) f = (x**2 + 3*y)/z pytest.raises(ValueError, lambda: f(1, 1, 1, 1)) r = f(1, 1, 1) assert r == 4 and not isinstance(r, FracElement) pytest.raises(ZeroDivisionError, lambda: f(1, 1, 0)) Fz = ZZ.inject('z').field assert f(1, 1) == 4/Fz.z def test_FracElement_eval(): F, x, y, z = field('x y z', ZZ) Fyz = field('y z', ZZ)[0] f = (x**2 + 3*y)/z assert f.eval(x, 0) == 3*Fyz.y/Fyz.z pytest.raises(ZeroDivisionError, lambda: f.eval(z, 0)) def test_FracElement_compose(): F, x, y, z = field('x y z', QQ) f = x**3 assert f.compose([(x, x/(y + z)), (y, z/x)]) == x**3/(y**3 + 3*y**2*z + 3*y*z**2 + z**3) # issue sympy/sympy#20484 assert f.compose(x, x/(y + z)) == x**3/(y**3 + 3*y**2*z + 3*y*z**2 + z**3) def test_cache(): F1 = QQ.frac_field(-sqrt(2)) F2 = QQ.frac_field(-2*sqrt(2)) assert F1 != F2
bsd-3-clause
-4,978,628,535,541,162,000
24.586402
93
0.479849
false
rr-/szurubooru
server/szurubooru/func/image_hash.py
1
9886
import logging import math from datetime import datetime from io import BytesIO from typing import Any, Callable, List, Optional, Set, Tuple import numpy as np from PIL import Image import pillow_avif import pyheif from pyheif_pillow_opener import register_heif_opener register_heif_opener() from szurubooru import config, errors logger = logging.getLogger(__name__) # Math based on paper from H. Chi Wong, Marshall Bern and David Goldberg # Math code taken from https://github.com/ascribe/image-match # (which is licensed under Apache 2 license) LOWER_PERCENTILE = 5 UPPER_PERCENTILE = 95 IDENTICAL_TOLERANCE = 2 / 255.0 DISTANCE_CUTOFF = 0.45 N_LEVELS = 2 N = 9 P = None SAMPLE_WORDS = 16 MAX_WORDS = 63 SIG_CHUNK_BITS = 32 SIG_NUMS = 8 * N * N SIG_BASE = 2 * N_LEVELS + 2 SIG_CHUNK_WIDTH = int(SIG_CHUNK_BITS / math.log2(SIG_BASE)) SIG_CHUNK_NUMS = SIG_NUMS / SIG_CHUNK_WIDTH assert SIG_NUMS % SIG_CHUNK_WIDTH == 0 Window = Tuple[Tuple[float, float], Tuple[float, float]] NpMatrix = np.ndarray def _preprocess_image(content: bytes) -> NpMatrix: try: img = Image.open(BytesIO(content)) return np.asarray(img.convert("L"), dtype=np.uint8) except (IOError, ValueError): raise errors.ProcessingError( "Unable to generate a signature hash " "for this image." ) def _crop_image( image: NpMatrix, lower_percentile: float, upper_percentile: float ) -> Window: rw = np.cumsum(np.sum(np.abs(np.diff(image, axis=1)), axis=1)) cw = np.cumsum(np.sum(np.abs(np.diff(image, axis=0)), axis=0)) upper_column_limit = np.searchsorted( cw, np.percentile(cw, upper_percentile), side="left" ) lower_column_limit = np.searchsorted( cw, np.percentile(cw, lower_percentile), side="right" ) upper_row_limit = np.searchsorted( rw, np.percentile(rw, upper_percentile), side="left" ) lower_row_limit = np.searchsorted( rw, np.percentile(rw, lower_percentile), side="right" ) if lower_row_limit > upper_row_limit: lower_row_limit = int(lower_percentile / 100.0 * image.shape[0]) upper_row_limit = int(upper_percentile / 100.0 * image.shape[0]) if lower_column_limit > upper_column_limit: lower_column_limit = int(lower_percentile / 100.0 * image.shape[1]) upper_column_limit = int(upper_percentile / 100.0 * image.shape[1]) return ( (lower_row_limit, upper_row_limit), (lower_column_limit, upper_column_limit), ) def _normalize_and_threshold( diff_array: NpMatrix, identical_tolerance: float, n_levels: int ) -> None: mask = np.abs(diff_array) < identical_tolerance diff_array[mask] = 0.0 if np.all(mask): return positive_cutoffs = np.percentile( diff_array[diff_array > 0.0], np.linspace(0, 100, n_levels + 1) ) negative_cutoffs = np.percentile( diff_array[diff_array < 0.0], np.linspace(100, 0, n_levels + 1) ) for level, interval in enumerate( positive_cutoffs[i : i + 2] for i in range(positive_cutoffs.shape[0] - 1) ): diff_array[ (diff_array >= interval[0]) & (diff_array <= interval[1]) ] = (level + 1) for level, interval in enumerate( negative_cutoffs[i : i + 2] for i in range(negative_cutoffs.shape[0] - 1) ): diff_array[ (diff_array <= interval[0]) & (diff_array >= interval[1]) ] = -(level + 1) def _compute_grid_points( image: NpMatrix, n: float, window: Window = None ) -> Tuple[NpMatrix, NpMatrix]: if window is None: window = ((0, image.shape[0]), (0, image.shape[1])) x_coords = np.linspace(window[0][0], window[0][1], n + 2, dtype=int)[1:-1] y_coords = np.linspace(window[1][0], window[1][1], n + 2, dtype=int)[1:-1] return x_coords, y_coords def _compute_mean_level( image: NpMatrix, x_coords: NpMatrix, y_coords: NpMatrix, p: Optional[float] ) -> NpMatrix: if p is None: p = max([2.0, int(0.5 + min(image.shape) / 20.0)]) avg_grey = np.zeros((x_coords.shape[0], y_coords.shape[0])) for i, x in enumerate(x_coords): lower_x_lim = int(max([x - p / 2, 0])) upper_x_lim = int(min([lower_x_lim + p, image.shape[0]])) for j, y in enumerate(y_coords): lower_y_lim = int(max([y - p / 2, 0])) upper_y_lim = int(min([lower_y_lim + p, image.shape[1]])) avg_grey[i, j] = np.mean( image[lower_x_lim:upper_x_lim, lower_y_lim:upper_y_lim] ) return avg_grey def _compute_differentials(grey_level_matrix: NpMatrix) -> NpMatrix: flipped = np.fliplr(grey_level_matrix) right_neighbors = -np.concatenate( ( np.diff(grey_level_matrix), ( np.zeros(grey_level_matrix.shape[0]).reshape( (grey_level_matrix.shape[0], 1) ) ), ), axis=1, ) down_neighbors = -np.concatenate( ( np.diff(grey_level_matrix, axis=0), ( np.zeros(grey_level_matrix.shape[1]).reshape( (1, grey_level_matrix.shape[1]) ) ), ) ) left_neighbors = -np.concatenate( (right_neighbors[:, -1:], right_neighbors[:, :-1]), axis=1 ) up_neighbors = -np.concatenate((down_neighbors[-1:], down_neighbors[:-1])) diagonals = np.arange( -grey_level_matrix.shape[0] + 1, grey_level_matrix.shape[0] ) upper_left_neighbors = sum( [ np.diagflat( np.insert(np.diff(np.diag(grey_level_matrix, i)), 0, 0), i ) for i in diagonals ] ) upper_right_neighbors = sum( [ np.diagflat(np.insert(np.diff(np.diag(flipped, i)), 0, 0), i) for i in diagonals ] ) lower_right_neighbors = -np.pad( upper_left_neighbors[1:, 1:], (0, 1), mode="constant" ) lower_left_neighbors = -np.pad( upper_right_neighbors[1:, 1:], (0, 1), mode="constant" ) return np.dstack( np.array( [ upper_left_neighbors, up_neighbors, np.fliplr(upper_right_neighbors), left_neighbors, right_neighbors, np.fliplr(lower_left_neighbors), down_neighbors, lower_right_neighbors, ] ) ) def _words_to_int(word_array: NpMatrix) -> List[int]: width = word_array.shape[1] coding_vector = 3 ** np.arange(width) return np.dot(word_array + 1, coding_vector).astype(int).tolist() def _get_words(array: NpMatrix, k: int, n: int) -> NpMatrix: word_positions = np.linspace(0, array.shape[0], n, endpoint=False).astype( "int" ) assert k <= array.shape[0] assert word_positions.shape[0] <= array.shape[0] words = np.zeros((n, k)).astype("int8") for i, pos in enumerate(word_positions): if pos + k <= array.shape[0]: words[i] = array[pos : pos + k] else: temp = array[pos:].copy() temp.resize(k, refcheck=False) words[i] = temp words[words > 0] = 1 words[words < 0] = -1 return words def generate_signature(content: bytes) -> NpMatrix: im_array = _preprocess_image(content) image_limits = _crop_image( im_array, lower_percentile=LOWER_PERCENTILE, upper_percentile=UPPER_PERCENTILE, ) x_coords, y_coords = _compute_grid_points( im_array, n=N, window=image_limits ) avg_grey = _compute_mean_level(im_array, x_coords, y_coords, p=P) diff_matrix = _compute_differentials(avg_grey) _normalize_and_threshold( diff_matrix, identical_tolerance=IDENTICAL_TOLERANCE, n_levels=N_LEVELS ) return np.ravel(diff_matrix).astype("int8") def generate_words(signature: NpMatrix) -> List[int]: return _words_to_int(_get_words(signature, k=SAMPLE_WORDS, n=MAX_WORDS)) def normalized_distance( target_array: Any, vec: NpMatrix, nan_value: float = 1.0 ) -> List[float]: target_array = np.array(target_array).astype(int) vec = vec.astype(int) topvec = np.linalg.norm(vec - target_array, axis=1) norm1 = np.linalg.norm(vec, axis=0) norm2 = np.linalg.norm(target_array, axis=1) finvec = topvec / (norm1 + norm2) finvec[np.isnan(finvec)] = nan_value return finvec def pack_signature(signature: NpMatrix) -> bytes: """ Serializes the signature vector for efficient storage in a database. Shifts the range of the signature vector from [-N_LEVELS,+N_LEVELS] to [0, base] The vector can then be broken up into chunks, with each chunk consisting of SIG_CHUNK_WIDTH digits of radix `base`. This is then converted into a more packed array consisting of uint32 elements (for SIG_CHUNK_BITS = 32). """ coding_vector = np.flipud(SIG_BASE ** np.arange(SIG_CHUNK_WIDTH)) return ( np.array( [ np.dot(x, coding_vector) for x in np.reshape( signature + N_LEVELS, (-1, SIG_CHUNK_WIDTH) ) ] ) .astype(f"uint{SIG_CHUNK_BITS}") .tobytes() ) def unpack_signature(packed: bytes) -> NpMatrix: """ Deserializes the signature vector once recieved from the database. Functions as an inverse transformation of pack_signature() """ return np.ravel( np.array( [ [ int(digit) - N_LEVELS for digit in np.base_repr(e, base=SIG_BASE).zfill( SIG_CHUNK_WIDTH ) ] for e in np.frombuffer(packed, dtype=f"uint{SIG_CHUNK_BITS}") ] ).astype("int8") )
gpl-3.0
-1,512,972,429,940,477,200
30.787781
79
0.583552
false
lanhel/pyzombie
test/pyzombie/handlers/HandlerExecStartTestCase.py
1
3687
#!/usr/bin/env python # -*- coding: UTF-8 -*- #------------------------------------------------------------------------------- """pyzombie HTTP RESTful handler test cases.""" __author__ = ('Lance Finn Helsten',) __version__ = '1.0.1' __copyright__ = """Copyright 2009 Lance Finn Helsten ([email protected])""" __license__ = """ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __docformat__ = "reStructuredText en" import sys import os import io import re import unittest from time import sleep import http.client from pyzombie.Executable import Executable from pyzombie.Instance import DELTA_T from pyzombie.handlers import HandlerExecStart from MockRequest import MockRequest from HTTPResponse import HTTPResponse import TestSourceCLI class HandlerExecStartGetTest(unittest.TestCase): def runTest(self): req = MockRequest() hndlr = HandlerExecStart(req, {'execname':self.__class__.__name__}) hndlr.get() resp = HTTPResponse(req.wfile.getvalue()) self.assertEqual(resp.protocol, "HTTP/1.1") self.assertEqual(resp.code, str(http.client.OK)) self.assertEqual(resp.header["Content-Type"], "text/html;UTF-8") self.assertEqual(resp.md5, resp.header["ETag"]) self.assertEqual(int(resp.header["Content-Length"]), len(resp.body)) class HandlerExecStartPostTest(unittest.TestCase): def setUp(self): self.ex = Executable.getcached(self.__class__.__name__, mediatype="text/x-python") self.ex.writeimage(open(TestSourceCLI.__file__, "r")) self.boundary = """NoBodyExpectsTheSpanishInquisition""" environ = TestSourceCLI.ENVIRON environ = ["{0} = {1}".format(k, environ[k]) for k in environ.keys()] environ = os.linesep.join(environ) argv = TestSourceCLI.ARGV argv = ' '.join(argv) self.form = """ --{0} Content-Disposition: form-data; name="environ" {1} --{0} Content-Disposition: form-data; name="arguments" {2} --{0}-- """.format(self.boundary, environ, argv) self.form = self.form.replace(os.linesep, '\r\n') self.form = self.form.encode("UTF-8") def runTest(self): req = MockRequest() req.readbuf = io.BytesIO(self.form) req.headers["Content-Type"] = "multipart/form-data; boundary={0}".format(self.boundary) req.headers["Content-Length"] = str(len(self.form)) hndlr = HandlerExecStart(req, {'execname':self.ex.name}) hndlr.post() resp = HTTPResponse(req.wfile.getvalue()) self.assertEqual(resp.protocol, "HTTP/1.1") self.assertEqual(resp.code, str(http.client.CREATED)) self.assertIsNotNone(hndlr.inst.process) self.assertIsNone(hndlr.inst.process.returncode) hndlr.inst.stdin.write(TestSourceCLI.STDIN.encode("UTF-8")) self.assertIsNone(hndlr.inst.process.returncode) hndlr.inst.stdin.close() self.assertTrue(os.path.isdir(hndlr.inst.datadir)) while hndlr.inst.process.returncode is None: sleep(DELTA_T) TestSourceCLI.validateResults(self, self.__class__.__name__, 0, hndlr.inst.stdout, hndlr.inst.stderr)
apache-2.0
4,394,666,807,182,305,000
34.451923
95
0.652834
false
madedotcom/ouroboros
test/test_user_update.py
1
3633
import json from .fakes import with_fake_http import httpretty from expects import expect, equal from .matchers import have_json class when_adding_a_user_to_a_group(with_fake_http): def given_an_existing_user(self): self.start_mocking_http() self.fake_response('/users/foo', file='user-foo.json') self.expect_call('/users/foo', httpretty.PUT) def because_we_add_the_user_to_a_group(self): self.client.users.addgroup("foo", "new-group") def it_should_put_the_correct_body(self): expect(httpretty.last_request()).to( have_json({ "fullName": "bar", "groups": ["new-group"] })) class when_adding_a_duplicate_group(with_fake_http): def given_an_existing_user(self): self.start_mocking_http() self.fake_response('/users/admin', file='user-admin.json') self.expect_call('/users/admin', httpretty.PUT) def because_we_add_the_user_to_a_group(self): self.client.users.addgroup("admin", "$admins") def it_should_put_the_correct_body(self): expect(httpretty.last_request()).to( have_json({ "fullName": "Event Store Administrator", "groups": ["$admins"] })) class when_adding_multiple_groups(with_fake_http): def given_an_existing_user(self): self.start_mocking_http() self.fake_response('/users/admin', file='user-admin.json') self.expect_call('/users/admin', httpretty.PUT) def because_we_add_the_user_to_a_group(self): self.client.users.addgroup("admin", "$admins", "devs", "people") def it_should_put_the_correct_body(self): body = httpretty.last_request().body.decode('Utf-8') posted_groups = json.loads(body)["groups"] expect(set(["$admins", "devs", "people"])).to(equal( set(posted_groups))) class when_updating_the_name(with_fake_http): def given_an_existing_user(self): self.start_mocking_http() self.fake_response('/users/admin', file='user-admin.json') self.expect_call('/users/admin', httpretty.PUT) def because_we_add_the_user_to_a_group(self): self.client.users.rename("admin", "bob the mighty") def it_should_put_the_correct_body(self): expect(httpretty.last_request()).to( have_json({ "fullName": "bob the mighty", "groups": ["$admins"] })) class when_removing_a_user_from_a_group(with_fake_http): def given_an_existing_user(self): self.start_mocking_http() self.fake_response('/users/giddy', file='user-giddy.json') self.expect_call('/users/giddy', httpretty.PUT) def because_we_remove_a_group_from_the_user(self): self.client.users.removegroup('giddy', 'ops', 'cheeses') def it_should_put_the_correct_body(self): body = httpretty.last_request().body.decode('Utf-8') posted_groups = json.loads(body)["groups"] expect(set(["devs"])).to(equal( set(posted_groups))) class when_changing_a_user_password(with_fake_http): def given_an_existing_user(self): self.start_mocking_http() self.fake_response('/users/giddy', file='user-giddy.json') self.expect_call('/users/giddy/command/reset-password', httpretty.POST) def because_we_change_the_password(self): self.client.users.setpassword('giddy', 'k1ng0fl1thuan14') def it_should_have_posted_the_correct_body(self): expect(httpretty.last_request()).to(have_json({ 'newPassword': 'k1ng0fl1thuan14' }))
mit
8,020,372,362,614,564,000
32.027273
79
0.619323
false
DayGitH/Python-Challenges
DailyProgrammer/DP20141003C.py
1
2303
""" [10/03/2014] Challenge #182 [Hard] Unique Digits https://www.reddit.com/r/dailyprogrammer/comments/2i7dlh/10032014_challenge_182_hard_unique_digits/ #Description: An interesting problem to solve: Looking at the Base 10 number system it has the digits 0 1 2 3 4 5 6 7 8 9 If I were given the digits 5 7 and 9 how many unique numbers could be formed that would use all these digits once? For example some easy ones would be: 579 975 795 And so on. but also these would work as well. 111579 1120759 These could go on forever as you just add digits. There would be many numbers just padding numbers to the unique numbers. Some might think that these next three might be valid but they are not because they do not contain all 3 digits: 57 75 95 So to cap off the range let us say numbers that do not go beyond 7 digits (so 7 places in your numbers) I am also interested in other base number systems. Like how many unique numbers using 5 6 could I find in base 8 (octal) or A E 0 1 in a base 16 (hexidecimal) ? Your challenge is to be able to take 2 sets of inputs and find out how many unique digits up to 7 places can be found given those 2 inputs. #Input: <Base system> <digits> * Base system is a base counting system. This number can be between 2 to 16. * Digits will be a list of digits that are ALL shown only once in the number #Output: All the unique numbers given up to 7 digits long only using the digits given once. followed by their base 10 value. At the bottom of the listing a "count" of how many numbers you found. So say I was looking for base 2 and my unique digits were 1 I would see this: 1 - 1 10 - 2 100 - 4 1000 - 8 10000 - 16 100000 - 32 1000000 - 64 Count: 7 #challenge inputs: These are several pairings to run. For the sake of size do not list your outputs - Maybe just the "counts" you found. If you wish to share the outputs use like a gist or link the output for people to go look at. 2 1 8 3 5 6 10 1 3 9 16 A E 1 0 #challenge input to try: For all base systems 2 to 16 find the numbers 0 1 in them. #challenge difficulty This is an unknown. Not sure if easy, intermediate or hard. Regardless lets give it a try. Could be very easy. Could be very hard. """ def main(): pass if __name__ == "__main__": main()
mit
2,093,067,371,623,661,000
34.430769
119
0.729917
false
Dimonyga/mysql_galera_checker
mysqlchk.py
1
2207
#!/usr/bin/env python from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from mysql.connector import MySQLConnection, Error from iniparse import ConfigParser import sys, time, os.path from inc.daemon import Daemon PORT_NUMBER = 8080 MYSQL_CONFIG = '/etc/my.cnf.d/mysql-clients.cnf' class mysqlchk(Daemon): def read_db_config(self,filename='/etc/my.cnf', section='mysql'): parser = ConfigParser() parser.read(filename) db = {} if parser.has_section(section): items = parser.items(section) for item in items: db[item[0]] = item[1] else: raise Exception('{0} not found in the {1} file'.format(section, filename)) return db def connect(self): db_config = self.read_db_config(MYSQL_CONFIG) try: conn = MySQLConnection(**db_config) except Error as error: print(error) return conn def do_GET(self): sql= "SHOW STATUS LIKE 'wsrep_local_state';" if self.db.is_connected() == 0: self.db = connect() self.mysql = self.db.cursor() print('reconnect to mysql') self.mysql.execute (sql) row = self.mysql.fetchone() if row[1] == '4': self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write("OK") else: self.send_response(503) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write("ERR") return def run(self): try: db = self.connect() mysql = db.cursor() server = HTTPServer(('', PORT_NUMBER), mysqlchk) server.serve_forever() finally: print('shutting down the web server') if __name__ == "__main__": daemon = mysqlchk('/tmp/daemon-example.pid', '/dev/null', '/dev/null', '/dev/stderr') if len(sys.argv) == 2: if 'start' == sys.argv[1]: daemon.start() elif 'stop' == sys.argv[1]: daemon.stop() elif 'restart' == sys.argv[1]: daemon.restart() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart" % sys.argv[0] sys.exit(2)
gpl-2.0
-2,512,250,181,769,336,300
27.294872
93
0.595831
false
yoshrote/Columns
columns/lib/wordpress.py
1
9480
from __future__ import with_statement from BeautifulSoup import BeautifulSoup from xml.etree import ElementTree from columns.model import meta, Tag, Page, User, Article, Comment, Upload, init_model from columns.lib.atom import slugify from sqlalchemy import create_engine, or_, orm from ConfigParser import ConfigParser from paste.script.command import Command import re, datetime, os, traceback, urllib2, uuid WORDPRESS_DT_FORMAT = "%Y-%m-%d %H:%M:%S" def main(config, wp_file, static_path, base_wp_url, base_col_url): from columns.lib.app_globals import Globals db_url = config.get("app:main","sqlalchemy.url") engine = create_engine(db_url) init_model(engine) errors = [] with open(wp_file) as f: xmlstr = f.read() dom = ElementTree.fromstring(xmlstr) #import tags for x in dom.findall('channel/{http://wordpress.org/export/1.0/}tag'): tk = x.findtext('{http://wordpress.org/export/1.0/}tag_slug') tv = x.findtext('{http://wordpress.org/export/1.0/}tag_name') try: meta.Session.merge(Tag(id=unicode(tk), name=unicode(tv))) except: pass meta.Session.flush() #import users authors = set([]) for x in dom.findall('channel/item/{http://purl.org/dc/elements/1.1/}creator'): authors.add(x.text.lower()) for x in authors: if meta.Session.query(User).filter(User.name==unicode(x)).count() == 0: meta.Session.add(User(name=unicode(x), type=3)) meta.Session.flush() author_to_id = dict(meta.Session.query(User.name,User.id).all()) #create 'main' page if it doesn't exist try: main_page = meta.Session.query(Page).filter(Page.slug==u'main').one() except orm.exc.NoResultFound: main_page = meta.Session.merge( Page( title = u'Main', slug = u'main', stream_comment_style = u'summary', story_comment_style = u'list', visible = True, can_post = True, tweet = True, content = None ) ) meta.Session.flush() #import pages for x in dom.findall('channel/item'): if x.findtext('{http://wordpress.org/export/1.0/}post_type') != 'page': continue title = unicode(x.findtext('title')).strip() slug = unicode(slugify(title)) if slug == u'main': continue if meta.Session.query(Page).filter(Page.slug==slug).count() == 0: can_post = len(x.findall('{http://wordpress.org/export/1.0/}comment')) > 0 soup = BeautifulSoup(x.findtext('{http://purl.org/rss/1.0/modules/content/}encoded')) t_page = meta.Session.merge( Page( title = title, slug = slug, stream_comment_style = u'summary', story_comment_style = u'list', visible = x.findtext('{http://wordpress.org/export/1.0/}status') == "publish", can_post = can_post, tweet = False, content = unicode(soup), ) ) #add comments dummy_post = False t_post = None for comment in x.findall('{http://wordpress.org/export/1.0/}comment'): if dummy_post is False: t_post = Article( id=int(x.findtext('{http://wordpress.org/export/1.0/}post_id')), user_id=author_to_id.get(x.findtext('{http://purl.org/dc/elements/1.1/}creator').lower()), page_id=t_page.id, subject=unicode(t_page.title), date=datetime.datetime.strptime('2009-11-27 17:35:23',WORDPRESS_DT_FORMAT), published=True, permalink=None, can_comment=True, content=None, sticky=False ) dummy_post = True author_name = comment.findtext('{http://wordpress.org/export/1.0/}comment_author') author_email = comment.findtext('{http://wordpress.org/export/1.0/}comment_author_email') author_url = comment.findtext('{http://wordpress.org/export/1.0/}comment_author_url') if author_name is None and author_email is None and author_url is None: continue try: userid = author_to_id.get(author_name.lower(), None) if userid is not None: user_t = meta.Session.get(userid) author_name = user_t.name author_url = user_t.profile except: pass soup = BeautifulSoup(comment.findtext('{http://wordpress.org/export/1.0/}comment_content')) try: t_post.comments.append( Comment( author_name = unicode(author_name) if author_name is not None else None, author_email = unicode(author_email) if author_email is not None else None, author_url = unicode(author_url) if author_url is not None else None, parent_comment = None, subject = u'', date = datetime.datetime.strptime(comment.findtext('{http://wordpress.org/export/1.0/}comment_date'),WORDPRESS_DT_FORMAT), content = unicode(soup), ) ) except: pass if t_post is not None: t_page.posts.append(t_post) meta.Session.flush() static_file_path = os.path.join(static_path,'uploaded') #import uploads upload_old_to_new = {} for x in dom.findall('channel/item'): if x.findtext('{http://wordpress.org/export/1.0/}post_type') != 'attachment': continue src = x.findtext('{http://wordpress.org/export/1.0/}attachment_url') re_match = re.match(r'^(?P<basepath>.*\/uploads)\/(?P<year>\d+)\/(?P<month>\d+)\/(?P<file>.*)$',src) item = Upload() item.alt_text = unicode(x.findtext('{http://wordpress.org/export/1.0/}post_name')) item.description = None item.date = datetime.datetime(year=int(re_match.group('year')),month=int(re_match.group('month')),day=1) item.filepath = unicode(src.replace(re_match.group('basepath'),static_file_path)) meta.Session.add(item) meta.Session.flush() caption_regex = re.compile(ur'\[caption .*? caption=\"(.*?)\"\](.*)\[\/caption\]') replace_str = ur'<div class=\"img-block\">\2<span class=\"img-caption\">\1</span></div>' #import posts for x in dom.findall('channel/item'): if x.findtext('{http://wordpress.org/export/1.0/}post_type') != 'post': continue user_fk = author_to_id.get(x.findtext('{http://purl.org/dc/elements/1.1/}creator').lower()) page_fk = main_page.id post_pk = int(x.findtext('{http://wordpress.org/export/1.0/}post_id')) subject = x.findtext('title') published = x.findtext('{http://wordpress.org/export/1.0/}status') != "draft" date = None if not published else datetime.datetime.strptime(x.findtext('{http://wordpress.org/export/1.0/}post_date'),WORDPRESS_DT_FORMAT) permalink = None if not published else unicode(slugify('-'.join([date.date().strftime("%Y-%m-%d"),subject]))) can_comment = True content = x.findtext('{http://purl.org/rss/1.0/modules/content/}encoded') content = content.replace(u'%s/wp-content/uploads/'%base_wp_url,u'%s/uploaded/'%base_col_url) soup = BeautifulSoup(content) soup = caption_regex.sub(replace_str ,unicode(soup)) t_post = Article( id=post_pk, user_id=user_id, page_id=page_id, subject=unicode(subject), date=date, published=published, permalink=permalink, can_comment=can_comment, content=soup, sticky=False ) for tag in x.findall('category'): if tag.attrib.get('domain','') == 'tag' and tag.attrib.get('nicename',None) is not None: t_post.tags.append(meta.Session.query(Tag).get(unicode(tag.attrib['nicename']))) #add comments for comment in x.findall('{http://wordpress.org/export/1.0/}comment'): author_name = comment.findtext('{http://wordpress.org/export/1.0/}comment_author') author_email = comment.findtext('{http://wordpress.org/export/1.0/}comment_author_email') author_url = comment.findtext('{http://wordpress.org/export/1.0/}comment_author_url') if author_name is None and author_email is None and author_url is None: continue try: userid = author_to_id.get(author_name.lower(), None) if userid is not None: user_t = meta.Session.get(userid) author_name = user_t.name author_url = user_t.profile except: pass soup = BeautifulSoup(comment.findtext('{http://wordpress.org/export/1.0/}comment_content')) t_post.comments.append( Comment( author_name = unicode(author_name) if author_name is not None else None, author_email = unicode(author_email) if author_email is not None else None, author_url = unicode(author_url) if author_url is not None else None, parent_comment = None, subject = u'', date = datetime.datetime.strptime(comment.findtext('{http://wordpress.org/export/1.0/}comment_date'),WORDPRESS_DT_FORMAT), content = unicode(soup), ) ) meta.Session.add(t_post) meta.Session.flush() return '\n'.join(errors) class WordpressImporter(Command): # Parser configuration summary = "Import data from Wordpress XML" group_name = "columns" parser = Command.standard_parser(verbose=False) parser.set_defaults(config=os.path.join(os.path.dirname(__file__),'..','..',"production.ini"), here=os.path.join(os.path.dirname(__file__),'..','..')) parser.add_option("--config", metavar="CONFIG", action="store", dest="config", help="application config file [default: %default]") parser.add_option("--file", metavar="WPFILE", action="store", dest="wpfile", help="Wordpress xml file to import") parser.add_option("--here", metavar="HERE", action="store", dest="here", help="this directory") parser.add_option("--wpurl", metavar="WPURL", action="store", dest="wpurl", help="base url for the Wordpress instance") parser.add_option("--colurl", metavar="COLURL", action="store", dest="colurl", help="base url for the Columns instance") def command(self): try: config = ConfigParser({'here':self.options.here}) config.read(self.options.config) return main(config, self.options.wpfile, self.options.here, self.options.wpurl, self.options.colurl) except: return traceback.format_exc()
bsd-3-clause
879,172,627,925,431,900
38.831933
151
0.67616
false
pytorch/vision
references/video_classification/utils.py
1
7466
from collections import defaultdict, deque import datetime import time import torch import torch.distributed as dist import errno import os class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' if torch.cuda.is_available(): log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}', 'max mem: {memory:.0f}' ]) else: log_msg = self.delimiter.join([ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ]) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {}'.format(header, total_time_str)) def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target[None]) res = [] for k in topk: correct_k = correct[:k].flatten().sum(dtype=torch.float32) res.append(correct_k * (100.0 / batch_size)) return res def mkdir(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() elif hasattr(args, "rank"): pass else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}'.format( args.rank, args.dist_url), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) setup_for_distributed(args.rank == 0)
bsd-3-clause
5,851,222,362,432,394,000
28.393701
94
0.534691
false
MatrixGamesHub/mtxPython
src/mtx/objects/Exit.py
1
1511
""" mtxPython - A framework to create matrix games. Copyright (C) 2016 Tobias Stampfl <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation in version 3 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from ..baseObjects import TriggerObject from .. import UpdateAct from .. import RegisterObjectClass class Exit(TriggerObject): def __init__(self, id, symbol): TriggerObject.__init__(self, id, symbol) self._locked = symbol == 'E' @staticmethod def GetSymbols(): return "eE" def IsLocked(self): return self._locked def IsUnlocked(self): return not self._locked def Lock(self): self._symbol = 'E' self._locked = True self._cell._field._level._game.AddAct(UpdateAct(self.GetId(), 'locked', True)) def Unlock(self): self._symbol = 'e' self._locked = False self._cell._field._level._game.AddAct(UpdateAct(self.GetId(), 'locked', False)) RegisterObjectClass(Exit)
gpl-3.0
4,400,528,616,661,663,000
29.22
87
0.673726
false
python-security/pyt
tests/cfg/cfg_base_test_case.py
1
2015
from ..base_test_case import BaseTestCase class CFGBaseTestCase(BaseTestCase): def assertInCfg(self, connections): """Asserts that all connections in the connections list exists in the cfg, as well as that all connections not in the list do not exist. Args: connections(list[tuple]): the node at index 0 of the tuple has to be in the new_constraint set of the node at index 1 of the tuple. """ for connection in connections: self.assertIn( self.cfg.nodes[connection[0]], self.cfg.nodes[connection[1]].outgoing, str(connection) + " expected to be connected" ) self.assertIn( self.cfg.nodes[connection[1]], self.cfg.nodes[connection[0]].ingoing, str(connection) + " expected to be connected" ) nodes = len(self.cfg.nodes) for element in range(nodes): for sets in range(nodes): if not (element, sets) in connections: self.assertNotIn( self.cfg.nodes[element], self.cfg.nodes[sets].outgoing, "(%s <- %s)" % (element, sets) + " expected to be disconnected" ) self.assertNotIn( self.cfg.nodes[sets], self.cfg.nodes[element].ingoing, "(%s <- %s)" % (sets, element) + " expected to be disconnected" ) def assertLineNumber(self, node, line_number): self.assertEqual(node.line_number, line_number) def cfg_list_to_dict(self, list): """This method converts the CFG list to a dict, making it easier to find nodes to test. This method assumes that no nodes in the code have the same label. """ return {x.label: x for x in list}
gpl-2.0
-136,281,146,157,829,660
39.3
95
0.522084
false
MostlyOpen/odoo_addons_jcafb
myo_lab_test_cst/wizard/__init__.py
1
1724
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import lab_test_edit_wizard import lab_test_request_direct_mail_wizard import lab_test_person_import_wizard import lab_test_person_check_wizard import lab_test_anemia_dhc_import_wizard import lab_test_anemia_dhc_check_wizard import lab_test_anemia_dhc_validate_wizard import lab_test_anemia_dhc_transcribe_wizard import lab_test_parasito_swab_import_wizard import lab_test_parasito_swab_check_wizard import lab_test_parasito_swab_validate_wizard import lab_test_parasito_swab_transcribe_wizard import lab_test_urina_import_wizard import lab_test_urina_check_wizard import lab_test_urina_validate_wizard import lab_test_urina_transcribe_wizard import lab_test_result_validate_wizard import lab_test_result_parasito_refresh_wizard import lab_test_result_swab_refresh_wizard import lab_test_result_urina_refresh_wizard
agpl-3.0
-5,231,536,710,703,189,000
42.1
79
0.722738
false
sdss/marvin
python/marvin/tools/rss.py
1
19928
#!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews # @Date: 2016-04-11 # @Filename: rss.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) # # @Last modified by: José Sánchez-Gallego ([email protected]) # @Last modified time: 2018-07-30 19:42:17 from __future__ import division, print_function import os import warnings import astropy.io.ascii import astropy.table import astropy.units import astropy.wcs import numpy from astropy.io import fits import marvin from marvin.core.exceptions import MarvinError, MarvinUserWarning from marvin.utils.datamodel.drp import datamodel_rss from marvin.utils.datamodel.drp.base import Spectrum as SpectrumDataModel from .core import MarvinToolsClass from .cube import Cube from .mixins import NSAMixIn from .quantities.spectrum import Spectrum class RSS(MarvinToolsClass, NSAMixIn, list): """A class to interface with a MaNGA DRP row-stacked spectra file. This class represents a fully reduced DRP row-stacked spectra object, initialised either from a file, a database, or remotely via the Marvin API. Instances of `.RSS` are a list of `.RSSFiber` objects, one for each fibre and exposure. `.RSSFiber` are initialised lazily, containing only basic information. They need to be initialised by calling `.RSSFiber.load` (unless `.RSS.autoload` is ``True``, in which case the instance is loaded when first accessed). In addition to the input arguments supported by `~.MarvinToolsClass` and `~.NSAMixIn`, this class accepts an ``autoload`` keyword argument that defines whether `.RSSFiber` objects should be automatically loaded when they are accessed. """ _qualflag = 'DRP3QUAL' def __init__(self, input=None, filename=None, mangaid=None, plateifu=None, mode=None, data=None, release=None, autoload=True, drpall=None, download=None, nsa_source='auto'): MarvinToolsClass.__init__(self, input=input, filename=filename, mangaid=mangaid, plateifu=plateifu, mode=mode, data=data, release=release, drpall=drpall, download=download) NSAMixIn.__init__(self, nsa_source=nsa_source) #: An `astropy.table.Table` with the observing information associated #: with this RSS object. self.obsinfo = None #: If True, unloaded `.RSSFiber` instances are automatically loaded #: when accessed. Otherwise, they need to be loaded via `.RSSFiber.load`. self.autoload = autoload if self.data_origin == 'file': self._load_rss_from_file(data=self.data) elif self.data_origin == 'db': self._load_rss_from_db(data=self.data) elif self.data_origin == 'api': self._load_rss_from_api() Cube._init_attributes(self) # Checks that the drpver set in MarvinToolsClass matches the header header_drpver = self.header['VERSDRP3'].strip() header_drpver = 'v1_5_1' if header_drpver == 'v1_5_0' else header_drpver assert header_drpver == self._drpver, ('mismatch between cube._drpver={0} ' 'and header drpver={1}'.format(self._drpver, header_drpver)) # EXPNUM in obsinfo is a string. Cast it to int self.obsinfo['EXPNUM'] = self.obsinfo['EXPNUM'].astype(numpy.int32) # Inits self as an empty list. list.__init__(self, []) self._populate_fibres() def _set_datamodel(self): """Sets the datamodel for DRP.""" self.datamodel = datamodel_rss[self.release.upper()] self._bitmasks = datamodel_rss[self.release.upper()].bitmasks def __repr__(self): """Representation for RSS.""" return ('<Marvin RSS (mangaid={self.mangaid!r}, plateifu={self.plateifu!r}, ' 'mode={self.mode!r}, data_origin={self.data_origin!r})>'.format(self=self)) def __getitem__(self, fiberid): """Returns the `.RSSFiber` whose fiberid matches the input.""" rssfiber = super(RSS, self).__getitem__(fiberid) if self.autoload and not rssfiber.loaded: rssfiber.load() return rssfiber def _getFullPath(self): """Returns the full path of the file in the tree.""" if not self.plateifu: return None plate, ifu = self.plateifu.split('-') return super(RSS, self)._getFullPath('mangarss', ifu=ifu, drpver=self._drpver, plate=plate, wave='LOG') def download(self): """Downloads the cube using sdss_access - Rsync""" if not self.plateifu: return None plate, ifu = self.plateifu.split('-') return super(RSS, self).download('mangarss', ifu=ifu, drpver=self._drpver, plate=plate, wave='LOG') def getCube(self): """Returns the `~marvin.tools.cube.Cube` associated with this RSS.""" return Cube(plateifu=self.plateifu, mode=self.mode, release=self.release) def load_all(self): """Loads all the `.RSSFiber` associated to this `.RSS` instance.""" for rssfiber in self: if not rssfiber.loaded: rssfiber.load() def select_fibers(self, exposure_no=None, set=None, mjd=None): """Selects fibres that match one or multiple of the input parameters. Parameters ---------- exposure_no : int The exposure number. Ignored if ``None``. set : int The set id of the exposure. Ignored if ``None``. mjd : int The MJD of the exposure. Ignored if ``None``. Returns ------- rssfibers : list A list of `.RSSFiber` instances whose obsinfo matches all the input parameters. The `.RSS.autoload` option is respected. Example ------- >>> rss = marvin.tools.RSS('8485-1901') >>> fibers = rss.select_fibers(set=2) >>> fibers [<RSSFiber [ 2.22306705, 11.84955406, 9.65761662, ..., 0. , 0. , 0. ] 1e-17 erg / (Angstrom cm2 fiber s)>, <RSSFiber [2.18669987, 1.4861778 , 2.55065155, ..., 0. , 0. , 0. ] 1e-17 erg / (Angstrom cm2 fiber s)>, <RSSFiber [2.75228763, 5.53485441, 2.31695175, ..., 0. , 0. , 0. ] 1e-17 erg / (Angstrom cm2 fiber s)>] """ mask_exp = (self.obsinfo['EXPNUM'].astype(int) == exposure_no) if exposure_no else True mask_set = (self.obsinfo['SET'].astype(int) == set) if set else True mask_mjd = (self.obsinfo['MJD'].astype(int) == mjd) if mjd else True mask = mask_exp & mask_set & mask_mjd valid_exposures = numpy.where(mask)[0] n_exposures = len(self.obsinfo) n_fibres_per_exposure = self._nfibers // n_exposures fibre_to_exposure = numpy.arange(self._nfibers) // n_fibres_per_exposure fibres_in_valid_exposures = numpy.where(numpy.in1d(fibre_to_exposure, valid_exposures))[0] return [self[ii] for ii in fibres_in_valid_exposures] def _load_rss_from_file(self, data=None): """Initialises the RSS object from a file.""" if data is not None: assert isinstance(data, fits.HDUList), 'data is not an HDUList object' else: try: self.data = fits.open(self.filename) except (IOError, OSError) as err: raise OSError('filename {0} cannot be found: {1}'.format(self.filename, err)) self.header = self.data[1].header self.wcs = astropy.wcs.WCS(self.header) self.wcs = self.wcs.dropaxis(1) # The header creates an empty axis for the exposures. # Confirm that this is a RSS file assert 'XPOS' in self.data and self.header['CTYPE1'] == 'WAVE-LOG', \ 'invalid file type. It does not appear to be a LOGRSS.' self._wavelength = self.data['WAVE'].data self._shape = None self._nfibers = self.data['FLUX'].shape[0] self.obsinfo = astropy.table.Table(self.data['OBSINFO'].data) Cube._do_file_checks(self) def _load_rss_from_db(self, data=None): """Initialises the RSS object from the DB. At this time the DB does not contain enough information to successfully instantiate a RSS object so we hack the data access mode to try to use files. For users this should be irrelevant since they rarely will have a Marvin DB. For the API, it means the access to RSS data will happen via files. """ warnings.warn('DB mode is not working for RSS. Trying file access mode.', MarvinUserWarning) fullpath = self._getFullPath() if fullpath and os.path.exists(fullpath): self.filename = fullpath self.data_origin = 'file' self._load_rss_from_file() else: raise MarvinError('cannot find a valid RSS file for ' 'plateifu={self.plateifu!r}, release={self.release!r}' .format(self=self)) def _load_rss_from_api(self): """Initialises the RSS object using the remote API.""" # Checks that the RSS exists. routeparams = {'name': self.plateifu} url = marvin.config.urlmap['api']['getRSS']['url'].format(**routeparams) try: response = self._toolInteraction(url.format(name=self.plateifu)) except Exception as ee: raise MarvinError('found a problem when checking if remote RSS ' 'exists: {0}'.format(str(ee))) data = response.getData() self.header = fits.Header.fromstring(data['header']) self.wcs = astropy.wcs.WCS(fits.Header.fromstring(data['wcs_header'])) self._wavelength = data['wavelength'] self._nfibers = data['nfibers'] self.obsinfo = astropy.io.ascii.read(data['obsinfo']) if self.plateifu != data['plateifu']: raise MarvinError('remote RSS has a different plateifu!') return def _populate_fibres(self): """Populates the internal list of fibres.""" n_exposures = len(self.obsinfo) n_fibres_per_exposure = self._nfibers // n_exposures for fiberid in range(self._nfibers): exp_index = fiberid // n_fibres_per_exposure exp_obsinfo = self.obsinfo[[exp_index]] self.append(RSSFiber(fiberid, self, self._wavelength, load=False, obsinfo=exp_obsinfo, pixmask_flag=self.header['MASKNAME'])) class RSSFiber(Spectrum): """A `~astropy.units.Quantity` representing a fibre observation. Represents the spectral flux observed though a fibre, and associated with an `.RSS` object. In addition to the flux, it contains information about the inverse variance, mask, and other associated spectra defined in the datamodel. Parameters ---------- fiberid : int The fiberid (0-indexed row in the parent `.RSS` object) for this fibre observation. rss : `.RSS` The parent `.RSS` object with which this fibre observation is associated. wavelength : numpy.ndarray The wavelength positions of each array element, in Angstrom. load : bool Whether the information in the `.RSSFiber` should be loaded during instantiation. Defaults to lazy loading (use `.RSSFiber.load` to load the fibre information). obsinfo : astropy.table.Table A `~astropy.table.Table` with the information for the exposure to which this fibre observation belongs. kwargs : dict Additional keyword arguments to be passed to `.Spectrum`. """ def __new__(cls, fiberid, rss, wavelength, pixmask_flag=None, load=False, obsinfo=None, **kwargs): # For now we instantiate a mostly empty Spectrum. Proper instantiation # will happen in load(). array_size = len(wavelength) obj = super(RSSFiber, cls).__new__( cls, numpy.zeros(array_size, dtype=numpy.float64), wavelength, scale=None, unit=None,) obj._extra_attributes = ['fiberid', 'rss', 'loaded', 'obsinfo'] obj._spectra = [] return obj def __init__(self, fiberid, rss, wavelength, pixmask_flag=None, load=False, obsinfo=None, **kwargs): self.fiberid = fiberid self.rss = rss self.obsinfo = obsinfo self.pixmask_flag = pixmask_flag self.loaded = False if load: self.load() def __repr__(self): if not self.loaded: return ('<RSSFiber (plateifu={self.rss.plateifu!r}, ' 'fiberid={self.fiberid!r}, loaded={self.loaded!r})>'.format(self=self)) else: return super(RSSFiber, self).__repr__() def __array_finalize__(self, obj): if obj is None: return super(RSSFiber, self).__array_finalize__(obj) # Adds _extra_attributes from the previous object. if hasattr(obj, '_extra_attributes'): for attr in obj._extra_attributes: setattr(self, attr, getattr(obj, attr, None)) self._extra_attributes = getattr(obj, '_extra_attributes', None) # Adds the additional spectra from the previous object. if hasattr(obj, '_spectra'): for spectrum in obj._spectra: setattr(self, spectrum, getattr(obj, spectrum, None)) self._spectra = getattr(obj, '_spectra', None) def __getitem__(self, sl): new_obj = super(RSSFiber, self).__getitem__(sl) for spectra_name in self._spectra: current_spectrum = getattr(self, spectra_name, None) new_spectrum = None if current_spectrum is None else current_spectrum.__getitem__(sl) setattr(new_obj, spectra_name, new_spectrum) return new_obj def load(self): """Loads the fibre information.""" assert self.loaded is False, 'object already loaded.' # Depending on whether the parent RSS is a file or API-populated, we # select the data to use. if self.rss.data_origin == 'file': # If the data origin is a file we use the HDUList in rss.data rss_data = self.rss.data elif self.rss.data_origin == 'api': # If data origin is the API, we make a request for the data # associated with this fiberid for all the extensions in the file. url = marvin.config.urlmap['api']['getRSSFiber']['url'] try: response = self.rss._toolInteraction(url.format(name=self.rss.plateifu, fiberid=self.fiberid)) except Exception as ee: raise MarvinError('found a problem retrieving RSS fibre data for ' 'plateifu={!r}, fiberid={!r}: {}'.format( self.rss.plateifu, self.fiberid, str(ee))) api_data = response.getData() # Create a quick and dirty HDUList from the API data so that we # can parse it in the same way as if the data origin is file. rss_data = astropy.io.fits.HDUList([astropy.io.fits.PrimaryHDU()]) for ext in api_data: rss_data.append(astropy.io.fits.ImageHDU(data=api_data[ext], name=ext.upper())) else: raise ValueError('invalid data_origin={!r}'.format(self.rss.data_origin)) # Compile a list of all RSS datamodel extensions, either RSS or spectra datamodel_extensions = self.rss.datamodel.rss + self.rss.datamodel.spectra for extension in datamodel_extensions: # Retrieve the value (and mask and ivar, if associated) for each extension. value, ivar, mask = self._get_extension_data(extension, rss_data, data_origin=self.rss.data_origin) if extension.name == 'flux': self.value[:] = value[:] self.ivar = ivar self.mask = mask self._set_unit(extension.unit) else: new_spectrum = Spectrum(value, self.wavelength, ivar=ivar, mask=mask, unit=extension.unit) setattr(self, extension.name, new_spectrum) self._spectra.append(extension.name) self.loaded = True def _get_extension_data(self, extension, data, data_origin='file'): """Returns the value of an extension for this fibre, either from file or API. Parameters ---------- extension : datamodel object The datamodel object containing the information for the extension we want to retrieve. data : ~astropy.io.fits.HDUList An `~astropy.io.fits.HDUList` object containing the RSS information. """ # Determine if this is an RSS datamodel object or an spectrum. # If the origin is the API, the extension data contains a single spectrum, # not a row-stacked array, so we consider it a 1D array. is_extension_data_1D = isinstance(extension, SpectrumDataModel) or data_origin == 'api' value = data[extension.fits_extension()].data if extension.has_mask(): mask = data[extension.fits_extension('mask')].data else: mask = None if hasattr(extension, 'has_ivar') and extension.has_ivar(): ivar = data[extension.fits_extension('ivar')].data elif hasattr(extension, 'has_std') and extension.has_std(): std = data[extension.fits_extension('std')].data ivar = 1. / (std**2) else: ivar = None # If this is an RSS, gets the right row in the stacked spectra. if not is_extension_data_1D: value = value[self.fiberid, :] mask = mask[self.fiberid, :] if mask is not None else None ivar = ivar[self.fiberid, :] if ivar is not None else None return value, ivar, mask @property def masked(self): """Return a masked array where the mask is greater than zero.""" assert self.mask is not None, 'mask is not set' return numpy.ma.array(self.value, mask=(self.mask > 0)) def descale(self): """Returns a copy of the object in which the scale is unity. Note that this only affects to the core value of this quantity. Associated array attributes will not be modified. Example: >>> fiber.unit Unit("1e-17 erg / (Angstrom cm2 fiber s)") >>> fiber[100] <RSSFiber 0.270078063011169 1e-17 erg / (Angstrom cm2 fiber s)> >>> fiber_descaled = fiber.descale() >>> fiber_descaled.unit Unit("Angstrom cm2 fiber s") >>> fiber[100] <RSSFiber 2.70078063011169e-18 erg / (Angstrom cm2 fiber s)> """ if self.unit.scale == 1: return self value_descaled = self.value * self.unit.scale value_unit = astropy.units.CompositeUnit(1, self.unit.bases, self.unit.powers) if self.ivar is not None: ivar_descaled = self.ivar / (self.unit.scale ** 2) else: ivar_descaled = None copy_of_self = self.copy() copy_of_self.value[:] = value_descaled copy_of_self.ivar = ivar_descaled copy_of_self._set_unit(value_unit) return copy_of_self
bsd-3-clause
-6,008,613,979,454,197,000
36.033457
111
0.592451
false
nodep/NovaBox
docs/calcVoltage.py
1
5074
# this calculates the best combination of voltage dividers for the given voltages. # sort the output of this program to get least number of resistor values and voltage error sum. # standard 5% resistors_5p = [10,11,12,13,15,16,18,20,22,24,27,30, 33,36,39,43,47,51,56,62,68,75,82,91] # standard 2% resistors_2p = [10.0,10.5,11.0,11.5,12.1,12.7,13.3,14.0, 14.7,15.4,16.2,16.9,17.8,18.7,19.6,20.5, 21.5,22.6,23.7,24.9,26.1,27.4,28.7,30.1, 31.6,33.2,34.8,36.5,38.3,40.2,42.2,44.2, 46.4,48.7,51.1,53.6,56.2,59.0,61.9,64.9, 68.1,71.5,75.0,78.7,82.5,86.6,90.9,95.3] # standard 1% resistors_1p = [10.0,10.2,10.5,10.7,11.0,11.3,11.5,11.8, 12.1,12.4,12.7,13.0,13.3,13.7,14.0,14.3, 14.7,15.0,15.4,15.8,16.2,16.5,16.9,17.4, 17.8,18.2,18.7,19.1,19.6,20.0,20.5,21.0, 21.5,22.1,22.6,23.2,23.7,24.3,24.9,25.5, 26.1,26.7,27.4,28.0,28.7,29.4,30.1,30.9, 31.6,32.4,33.2,34.0,34.8,35.7,36.5,37.4, 38.3,39.2,40.2,41.2,42.2,43.2,44.2,45.3, 46.4,47.5,48.7,49.9,51.1,52.3,53.6,54.9, 56.2,57.6,59.0,60.4,61.9,63.4,64.9,66.5, 68.1,69.8,71.5,73.2,75.0,76.8,78.7,80.6, 82.5,84.5,86.6,88.7,90.9,93.1,95.3,97.6] def makeAllResistors(resIn): resistors = resIn.copy() for r in resIn: resistors.append(r * 10) for r in resIn: resistors.append(r * 100) for r in resIn: resistors.append(r * 1000) return resistors resistors = makeAllResistors(resistors_2p) ################################################## ################################################## ################################################## def getSet(vin, vtol, vref): dataSet = [] for r1 in resistors: for r2 in resistors: vout = r1/(r1+r2)*vin verr = abs(vout - vref) if vtol > verr: dataSet.append((r1, r2, verr)) if len(dataSet) == 0: raise RuntimeError("vtol is too limiting; data set is empty for voltage: {}".format(vin)) return (vin, dataSet) def calcResistors(): vtol = 0.05 # tolerance to abs(vout - vref) vref = 2.5 # reference voltage # input voltages for single LiIon 4.2V cell vins = (3.949, 3.777, 3.601, 3.477) maxResistors = 5 maxTotalError = vins[0] * .01 dsets = [] for vin in vins: dsets.append(getSet(vin, vtol, vref)) print ('# of resistors total error V max={} total current (mA) at vin1 Vout R1 R2 Vout R1 R2 Vout R1 R2 Vout R1 R2'.format(round(maxTotalError, 3))) combinationCounters = [0] * len(vins) done = False while not done: # sum the total verr for this combination tverr = 0 tcurr = 0 resistorValues = {} desc = '' for cnt in range(len(dsets)): dataEntry = dsets[cnt][1][combinationCounters[cnt]] vin = dsets[cnt][0] r1 = dataEntry[0] r2 = dataEntry[1] resistorValues[r1] = 1 resistorValues[r2] = 1 tverr += dataEntry[2] tcurr += vin / (r1 + r2) desc += '\t{}\t{}\t{}'.format(vin, r1, r2) if len(resistorValues) <= maxResistors and tverr <= maxTotalError: print ('{}\t{}\t{}{}'.format(len(resistorValues), round(tverr, 3), round(tcurr, 3), desc)) # increment combination counters done = True for setcnt in range(len(combinationCounters)): combinationCounters[setcnt] += 1 if combinationCounters[setcnt] < len(dsets[setcnt][1]): done = False break else: combinationCounters[setcnt] = 0 def calcMax6457(): lipoCell = 3.3 # cutoff voltage of a single LiPo cell vTripGoals = ( 4, # 4 cell LiPo 6, # 6 cell LiPo ) vTh = (1.093 + 1.151) / 2 # average of min and max from the datasheet results = [] for r1 in resistors: res = [r1] # add the upper resistor in the voltage divider - the one between VDD and IN1 in MAX6457 # r2 vTrip uA vErr vErr cell res.extend([ 0, 0, 0, 1000, 1000] * len(vTripGoals)) for r2 in resistors: # calc the trip and current for r1/r2 vTrip = (r1/r2 + 1) * vTh current = vTrip / (r1 + r2) # compare with previous values for cnt in range(len(vTripGoals)): vDest = vTripGoals[cnt] * lipoCell vErr = vTrip - vDest vErrCell = vErr / vTripGoals[cnt] # is this better? ndx = 1 + cnt*5 if abs(vErrCell) < abs(res[ndx + 4]): res[ndx] = r2 res[ndx + 1] = vTrip res[ndx + 2] = current * 1000 res[ndx + 3] = vErr res[ndx + 4] = vErrCell # find the min of vErrCell vErrCellMin = 0 for cnt in range(len(vTripGoals)): vErrCell = res[5 + cnt*5] vErrCellMin = min(vErrCell, vErrCellMin) res.append(vErrCellMin) results.append(res) # sort by vErrMin results.sort(key = lambda results: results[len(res) - 1]) header = 'r1 K' for vTripGoal in vTripGoals: header += '\tr2 K\tvTrip {}\tuA\tvErr\tvErr cell'.format(vTripGoal) print (header + '\tmin vErr cell') for res in results: if res[0] >= 470: row = '' for elem in res: row += '\t' if row else '' row += elem if isinstance(elem, str) else str(round(elem, 3)) print (row) calcMax6457()
gpl-3.0
8,456,453,649,515,112,000
27.858824
149
0.58376
false
saelo/willie-modules
asm.py
1
4715
# coding=utf8 """ asm.py - (dis)assembly features. (c) 2014 Samuel Groß """ from willie import web from willie.module import commands, nickname_commands, example from random import choice from binascii import hexlify, unhexlify import string import re import os from subprocess import Popen, PIPE @commands('disas', 'disas64', 'disassemble', 'disassemble64') @example('.disas 66556689e590c9c3') def disassemble(bot, trigger): """Disassemble x86 machine code.""" if not trigger.group(2): return bot.reply('Nothing to disassemble') try: arg = trigger.group(2) # remove all 0x while "0x" in arg: arg = arg.replace("0x","") # remove everything except hex arg = re.sub(r"[^a-fA-F0-9]", r"", arg) code = unhexlify(arg) except Exception: return bot.say('Invalid hex sequence') bits = 64 if '64' in trigger.group(1) else 32 filename = '/tmp/' + ''.join( choice(string.ascii_lowercase) for i in range(10)) + '.bin' with open(filename, 'wb') as f: f.write(code) result = Popen(['ndisasm', '-b', str(bits), '-o', '0x1000', filename], stdout=PIPE).stdout.read() os.remove(filename) for line in result.split('\n'): bot.say(line) @commands('as', 'as64', 'assemble', 'assemble64') @example('.as push ebp; mov ebp, esp; jmp 0x14') def assemble(bot, trigger): """Assemble x86 instructions.""" code = trigger.group(2) if not code: return bot.reply('Nothing to assemble') bits = 64 if '64' in trigger.group(1) else 32 filename = '/tmp/' + ''.join(choice(string.ascii_lowercase) for i in range(10)) + '.asm' with open(filename, 'w') as f: f.write('BITS %i\n' % bits + re.sub(r';\s*', ';\n', code)) p = Popen(['nasm', '-f', 'bin', '-o', filename[:-4], filename], stderr=PIPE) p.wait() os.remove(filename) for line in p.stderr.read().split('\n'): bot.say(line) if p.returncode == 0: with open(filename[:-4], 'rb') as f: raw = f.read() hex = hexlify(raw) if hex: bot.say(hex) os.remove(filename[:-4]) def x86jmp(bot, instr): """Display information about a x86 conditional jump.""" if instr not in jxx: return bot.say('I can\'t find anything about that instruction, sorry') bot.say('%s : %s' % (instr, jxx[instr])) def x86instr(bot, instr): """Display information about any x86 instruction thats no a conditional jump.""" raw = web.get('http://www.felixcloutier.com/x86/') match = re.search('<tr><td><a href="./(?P<page>[A-Z:]*).html">%s</a></td><td>(?P<desc>[^<]*)</td></tr>' % instr, raw) if not match: return bot.say('I can\'t find anything about that instruction, sorry') bot.say('%s : %s -- %s' % (instr, match.group('desc'), 'http://www.felixcloutier.com/x86/%s' % match.group('page'))) @commands('x86', 'instr', 'instruction') def instruction(bot, trigger): """Display information about an x86 instruction.""" instr = trigger.group(2) if not instr: return bot.reply('Give me an instruction') instr = instr.strip().upper() if 'J' == instr[0] and not instr == 'JMP': return x86jmp(bot, instr) x86instr(bot, instr) jxx = { 'JA' : 'Jump if above (CF=0 and ZF=0)', 'JAE' : 'Jump if above or equal (CF=0)', 'JB' : 'Jump if below (CF=1)', 'JBE' : 'Jump if below or equal (CF=1 or ZF=1)', 'JC' : 'Jump if carry (CF=1)', 'JCXZ' : 'Jump if CX register is 0', 'JECXZ': 'Jump if ECX register is 0', 'JRCXZ': 'Jump if RCX register is 0', 'JE' : 'Jump if equal (ZF=1)', 'JG' : 'Jump if greater (ZF=0 and SF=OF)', 'JGE' : 'Jump if greater or equal (SF=OF)', 'JL' : 'Jump if less (SF!=OF)', 'JLE' : 'Jump if less or equal (ZF=1 or SF!=OF)', 'JNA' : 'Jump if not above (CF=1 or ZF=1)', 'JNAE' : 'Jump if not above or equal (CF=1)', 'JNB' : 'Jump if not below (CF=0)', 'JNBE' : 'Jump if not below or equal (CF=0 and ZF=0)', 'JNC' : 'Jump if not carry (CF=0)', 'JNE' : 'Jump if not equal (ZF=0)', 'JNG' : 'Jump if not greater (ZF=1 or SF!=OF)', 'JNGE' : 'Jump if not greater or equal (SF!=OF)', 'JNL' : 'Jump if not less (SF=OF)', 'JNLE' : 'Jump if not less or equal (ZF=0 and SF=OF)', 'JNO' : 'Jump if not overflow (OF=0)', 'JNP' : 'Jump if not parity (PF=0)', 'JNS' : 'Jump if not sign (SF=0)', 'JNZ' : 'Jump if not zero (ZF=0)', 'JO' : 'Jump if overflow (OF=1)', 'JP' : 'Jump if parity (PF=1)', 'JPE' : 'Jump if parity even (PF=1)', 'JPO' : 'Jump if parity odd (PF=0)', 'JS' : 'Jump if sign (SF=1)' }
mit
5,803,989,795,326,820,000
31.736111
121
0.572974
false
CTSNE/NodeDefender
NodeDefender/db/sql/message.py
1
2337
from NodeDefender.db.sql import SQL from datetime import datetime from NodeDefender.db.sql.node import LocationModel import NodeDefender from flask import url_for class MessageModel(SQL.Model): ''' Representing one group containing iCPEs and Users ''' __tablename__ = 'message' id = SQL.Column(SQL.Integer, primary_key=True) date = SQL.Column(SQL.DateTime) subject = SQL.Column(SQL.String(50)) body = SQL.Column(SQL.String(180)) group_id = SQL.Column(SQL.Integer, SQL.ForeignKey('group.id')) user_id = SQL.Column(SQL.Integer, SQL.ForeignKey('user.id')) node_id = SQL.Column(SQL.Integer, SQL.ForeignKey('node.id')) icpe_id = SQL.Column(SQL.Integer, SQL.ForeignKey('icpe.id')) sensor_id = SQL.Column(SQL.Integer, SQL.ForeignKey('sensor.id')) def __init__(self, subject, body): self.subject = subject self.body = body self.date = datetime.now() def to_json(self): if self.group: group = self.group.name url = url_for('admin_view.admin_group', name = NodeDefender.serializer.dumps(group)) icon = 'fa fa-users fa-3x' else: group = False if self.user: user = self.user.email url = url_for('admin_view.admin_user', email = NodeDefender.serializer.dumps(user)) icon = 'fa fa-user fa-3x' else: user = False if self.node: node = self.node.name url = url_for('node_view.nodes_node', name = NodeDefender.serializer.dumps(node)) icon = 'fa fa-map-marker fa-3x' else: node = False if self.icpe: icpe = self.icpe.name url = "#" icon = 'fa fa-bug fa-3x' else: icpe = False if self.sensor: sensor = self.sensor.name url = "#" icon = 'fa fa-bug fa-3x' else: sensor = False return {'group' : group, 'user' : user,\ 'node' : node, 'icpe' : icpe, 'sensor' : sensor,\ 'subject' : self.subject, 'body' : self.body, 'date' : str(self.date), 'icon' : icon, 'url' : url}
mit
6,253,839,242,871,607,000
31.013699
68
0.531023
false
sbunatyan/tavrida
tavrida/dsfile.py
1
2950
import ConfigParser class DSFileEntry(object): """Record contains information about service exchanges.""" def __init__(self, service_name, service_exchange, notifications_exchange=None): super(DSFileEntry, self).__init__() self._service_name = service_name self._service_exchange = service_exchange self._notifications_exchange = notifications_exchange @property def service_name(self): return self._service_name @property def service_exchange(self): return self._service_exchange @property def notifications_exchange(self): return self._notifications_exchange class DSFile(object): """DSFile represents configuration file for Discovery Service. Configuration file has following format [service name] exchange=service exchange name notifications=service notifications exchange name (optional) [service name 2] ... You can use DSFile as a dict to get information about service by service name: dsf = dsfile.DSFile('dsfile.ini') print dsf['myservice'].service_exchange print dsf['myservice'].notifications_exchange You can get list of services using for service_name in dsf: ... or [x for x in dsf] or print list(dsf) or print dsf.services Raises ConfigParser.Error exception (or child exception) if input file is malformed. """ def __init__(self, filepath): """ :param filepath: file path :type filepath: string """ super(DSFile, self).__init__() self._filepath = filepath # Map from service_name to DSFileEntry self._entries = self._load(self._filepath) def _load(self, filepath): """Parses file and returns entries map """ entries = {} cp = ConfigParser.ConfigParser() # I don't use contextmanager here because context manager is # hard for testing. f = open(filepath) try: cp.readfp(f) finally: f.close() for service_name in cp.sections(): service_exchange = cp.get(service_name, "exchange") try: notifications_exchange = cp.get(service_name, "notifications") except ConfigParser.NoOptionError: notifications_exchange = None entry = DSFileEntry(service_name, service_exchange, notifications_exchange) entries[entry.service_name] = entry return entries def __getitem__(self, service_name): return self._entries[service_name] @property def services(self): """Returns list of service names.""" return self._entries.keys() def __iter__(self): return iter(self.services)
apache-2.0
3,520,143,772,545,682,000
27.365385
78
0.592203
false
mpirnat/adventofcode
day02/test.py
1
1072
#!/usr/bin/env python import unittest from day02 import paper_area_from_dimensions from day02 import ribbon_length_from_dimensions class TestFindWrappingPaperSquareFootage(unittest.TestCase): cases = ( ('2x3x4', 58), ('1x1x10', 43), ) def test_gets_square_footage(self): for (dimensions, expected) in self.cases: result = paper_area_from_dimensions(dimensions) self.assertEqual(result, expected, "Expected {dimensions} to yield {expected}, but got {result}".\ format(**locals())) class FindRibbonLength(unittest.TestCase): cases = ( ('2x3x4', 34), ('1x1x10', 14), ) def test_gets_ribbon_length(self): for (dimensions, expected) in self.cases: result = ribbon_length_from_dimensions(dimensions) self.assertEqual(result, expected, "Expected {dimensions} to yield {expected}, but got {result}".\ format(**locals())) if __name__ == '__main__': unittest.main()
mit
3,091,189,973,084,539,400
25.8
79
0.591418
false
stephanie-wang/ray
python/ray/tune/tests/test_api.py
1
27755
import shutil import copy import os import time import unittest from unittest.mock import patch import ray from ray.rllib import _register_all from ray import tune from ray.tune import DurableTrainable, Trainable, TuneError, Stopper from ray.tune import register_env, register_trainable, run_experiments from ray.tune.schedulers import TrialScheduler, FIFOScheduler from ray.tune.trial import Trial from ray.tune.result import (TIMESTEPS_TOTAL, DONE, HOSTNAME, NODE_IP, PID, EPISODES_TOTAL, TRAINING_ITERATION, TIMESTEPS_THIS_ITER, TIME_THIS_ITER_S, TIME_TOTAL_S, TRIAL_ID, EXPERIMENT_TAG) from ray.tune.logger import Logger from ray.tune.experiment import Experiment from ray.tune.resources import Resources from ray.tune.suggest import grid_search from ray.tune.suggest.suggestion import _MockSuggestionAlgorithm from ray.tune.utils import (flatten_dict, get_pinned_object, pin_in_object_store) from ray.tune.utils.mock import mock_storage_client, MOCK_REMOTE_DIR class TrainableFunctionApiTest(unittest.TestCase): def setUp(self): ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024) def tearDown(self): ray.shutdown() _register_all() # re-register the evicted objects def checkAndReturnConsistentLogs(self, results, sleep_per_iter=None): """Checks logging is the same between APIs. Ignore "DONE" for logging but checks that the scheduler is notified properly with the last result. """ class_results = copy.deepcopy(results) function_results = copy.deepcopy(results) class_output = [] function_output = [] scheduler_notif = [] class MockScheduler(FIFOScheduler): def on_trial_complete(self, runner, trial, result): scheduler_notif.append(result) class ClassAPILogger(Logger): def on_result(self, result): class_output.append(result) class FunctionAPILogger(Logger): def on_result(self, result): function_output.append(result) class _WrappedTrainable(Trainable): def _setup(self, config): del config self._result_iter = copy.deepcopy(class_results) def _train(self): if sleep_per_iter: time.sleep(sleep_per_iter) res = self._result_iter.pop(0) # This should not fail if not self._result_iter: # Mark "Done" for last result res[DONE] = True return res def _function_trainable(config, reporter): for result in function_results: if sleep_per_iter: time.sleep(sleep_per_iter) reporter(**result) class_trainable_name = "class_trainable" register_trainable(class_trainable_name, _WrappedTrainable) trials = run_experiments( { "function_api": { "run": _function_trainable, "loggers": [FunctionAPILogger], }, "class_api": { "run": class_trainable_name, "loggers": [ClassAPILogger], }, }, raise_on_failed_trial=False, scheduler=MockScheduler()) # Ignore these fields NO_COMPARE_FIELDS = { HOSTNAME, NODE_IP, TRIAL_ID, EXPERIMENT_TAG, PID, TIME_THIS_ITER_S, TIME_TOTAL_S, DONE, # This is ignored because FunctionAPI has different handling "timestamp", "time_since_restore", "experiment_id", "date", } self.assertEqual(len(class_output), len(results)) self.assertEqual(len(function_output), len(results)) def as_comparable_result(result): return { k: v for k, v in result.items() if k not in NO_COMPARE_FIELDS } function_comparable = [ as_comparable_result(result) for result in function_output ] class_comparable = [ as_comparable_result(result) for result in class_output ] self.assertEqual(function_comparable, class_comparable) self.assertEqual(sum(t.get(DONE) for t in scheduler_notif), 2) self.assertEqual( as_comparable_result(scheduler_notif[0]), as_comparable_result(scheduler_notif[1])) # Make sure the last result is the same. self.assertEqual( as_comparable_result(trials[0].last_result), as_comparable_result(trials[1].last_result)) return function_output, trials def testPinObject(self): X = pin_in_object_store("hello") @ray.remote def f(): return get_pinned_object(X) self.assertEqual(ray.get(f.remote()), "hello") def testFetchPinned(self): X = pin_in_object_store("hello") def train(config, reporter): get_pinned_object(X) reporter(timesteps_total=100, done=True) register_trainable("f1", train) [trial] = run_experiments({ "foo": { "run": "f1", } }) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100) def testRegisterEnv(self): register_env("foo", lambda: None) self.assertRaises(TypeError, lambda: register_env("foo", 2)) def testRegisterEnvOverwrite(self): def train(config, reporter): reporter(timesteps_total=100, done=True) def train2(config, reporter): reporter(timesteps_total=200, done=True) register_trainable("f1", train) register_trainable("f1", train2) [trial] = run_experiments({ "foo": { "run": "f1", } }) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 200) def testRegisterTrainable(self): def train(config, reporter): pass class A: pass class B(Trainable): pass register_trainable("foo", train) Experiment("test", train) register_trainable("foo", B) Experiment("test", B) self.assertRaises(TypeError, lambda: register_trainable("foo", B())) self.assertRaises(TuneError, lambda: Experiment("foo", B())) self.assertRaises(TypeError, lambda: register_trainable("foo", A)) self.assertRaises(TypeError, lambda: Experiment("foo", A)) def testTrainableCallable(self): def dummy_fn(config, reporter, steps): reporter(timesteps_total=steps, done=True) from functools import partial steps = 500 register_trainable("test", partial(dummy_fn, steps=steps)) [trial] = run_experiments({ "foo": { "run": "test", } }) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps) [trial] = tune.run(partial(dummy_fn, steps=steps)).trials self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps) def testBuiltInTrainableResources(self): class B(Trainable): @classmethod def default_resource_request(cls, config): return Resources(cpu=config["cpu"], gpu=config["gpu"]) def _train(self): return {"timesteps_this_iter": 1, "done": True} register_trainable("B", B) def f(cpus, gpus, queue_trials): return run_experiments( { "foo": { "run": "B", "config": { "cpu": cpus, "gpu": gpus, }, } }, queue_trials=queue_trials)[0] # Should all succeed self.assertEqual(f(0, 0, False).status, Trial.TERMINATED) self.assertEqual(f(1, 0, True).status, Trial.TERMINATED) self.assertEqual(f(1, 0, True).status, Trial.TERMINATED) # Too large resource request self.assertRaises(TuneError, lambda: f(100, 100, False)) self.assertRaises(TuneError, lambda: f(0, 100, False)) self.assertRaises(TuneError, lambda: f(100, 0, False)) # TODO(ekl) how can we test this is queued (hangs)? # f(100, 0, True) def testRewriteEnv(self): def train(config, reporter): reporter(timesteps_total=1) register_trainable("f1", train) [trial] = run_experiments({ "foo": { "run": "f1", "env": "CartPole-v0", } }) self.assertEqual(trial.config["env"], "CartPole-v0") def testConfigPurity(self): def train(config, reporter): assert config == {"a": "b"}, config reporter(timesteps_total=1) register_trainable("f1", train) run_experiments({ "foo": { "run": "f1", "config": { "a": "b" }, } }) def testLogdir(self): def train(config, reporter): assert "/tmp/logdir/foo" in os.getcwd(), os.getcwd() reporter(timesteps_total=1) register_trainable("f1", train) run_experiments({ "foo": { "run": "f1", "local_dir": "/tmp/logdir", "config": { "a": "b" }, } }) def testLogdirStartingWithTilde(self): local_dir = "~/ray_results/local_dir" def train(config, reporter): cwd = os.getcwd() assert cwd.startswith(os.path.expanduser(local_dir)), cwd assert not cwd.startswith("~"), cwd reporter(timesteps_total=1) register_trainable("f1", train) run_experiments({ "foo": { "run": "f1", "local_dir": local_dir, "config": { "a": "b" }, } }) def testLongFilename(self): def train(config, reporter): assert "/tmp/logdir/foo" in os.getcwd(), os.getcwd() reporter(timesteps_total=1) register_trainable("f1", train) run_experiments({ "foo": { "run": "f1", "local_dir": "/tmp/logdir", "config": { "a" * 50: tune.sample_from(lambda spec: 5.0 / 7), "b" * 50: tune.sample_from(lambda spec: "long" * 40), }, } }) def testBadParams(self): def f(): run_experiments({"foo": {}}) self.assertRaises(TuneError, f) def testBadParams2(self): def f(): run_experiments({ "foo": { "run": "asdf", "bah": "this param is not allowed", } }) self.assertRaises(TuneError, f) def testBadParams3(self): def f(): run_experiments({ "foo": { "run": grid_search("invalid grid search"), } }) self.assertRaises(TuneError, f) def testBadParams4(self): def f(): run_experiments({ "foo": { "run": "asdf", } }) self.assertRaises(TuneError, f) def testBadParams5(self): def f(): run_experiments({"foo": {"run": "PPO", "stop": {"asdf": 1}}}) self.assertRaises(TuneError, f) def testBadParams6(self): def f(): run_experiments({ "foo": { "run": "PPO", "resources_per_trial": { "asdf": 1 } } }) self.assertRaises(TuneError, f) def testBadStoppingReturn(self): def train(config, reporter): reporter() register_trainable("f1", train) def f(): run_experiments({ "foo": { "run": "f1", "stop": { "time": 10 }, } }) self.assertRaises(TuneError, f) def testNestedStoppingReturn(self): def train(config, reporter): for i in range(10): reporter(test={"test1": {"test2": i}}) with self.assertRaises(TuneError): [trial] = tune.run( train, stop={ "test": { "test1": { "test2": 6 } } }).trials [trial] = tune.run(train, stop={"test/test1/test2": 6}).trials self.assertEqual(trial.last_result["training_iteration"], 7) def testStoppingFunction(self): def train(config, reporter): for i in range(10): reporter(test=i) def stop(trial_id, result): return result["test"] > 6 [trial] = tune.run(train, stop=stop).trials self.assertEqual(trial.last_result["training_iteration"], 8) def testStoppingMemberFunction(self): def train(config, reporter): for i in range(10): reporter(test=i) class Stopclass: def stop(self, trial_id, result): return result["test"] > 6 [trial] = tune.run(train, stop=Stopclass().stop).trials self.assertEqual(trial.last_result["training_iteration"], 8) def testStopper(self): def train(config, reporter): for i in range(10): reporter(test=i) class CustomStopper(Stopper): def __init__(self): self._count = 0 def __call__(self, trial_id, result): print("called") self._count += 1 return result["test"] > 6 def stop_all(self): return self._count > 5 trials = tune.run(train, num_samples=5, stop=CustomStopper()).trials self.assertTrue(all(t.status == Trial.TERMINATED for t in trials)) self.assertTrue( any( t.last_result.get("training_iteration") is None for t in trials)) def testBadStoppingFunction(self): def train(config, reporter): for i in range(10): reporter(test=i) class CustomStopper: def stop(self, result): return result["test"] > 6 def stop(result): return result["test"] > 6 with self.assertRaises(TuneError): tune.run(train, stop=CustomStopper().stop) with self.assertRaises(TuneError): tune.run(train, stop=stop) def testEarlyReturn(self): def train(config, reporter): reporter(timesteps_total=100, done=True) time.sleep(99999) register_trainable("f1", train) [trial] = run_experiments({ "foo": { "run": "f1", } }) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100) def testReporterNoUsage(self): def run_task(config, reporter): print("hello") experiment = Experiment(run=run_task, name="ray_crash_repro") [trial] = ray.tune.run(experiment).trials print(trial.last_result) self.assertEqual(trial.last_result[DONE], True) def testErrorReturn(self): def train(config, reporter): raise Exception("uh oh") register_trainable("f1", train) def f(): run_experiments({ "foo": { "run": "f1", } }) self.assertRaises(TuneError, f) def testSuccess(self): def train(config, reporter): for i in range(100): reporter(timesteps_total=i) register_trainable("f1", train) [trial] = run_experiments({ "foo": { "run": "f1", } }) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99) def testNoRaiseFlag(self): def train(config, reporter): raise Exception() register_trainable("f1", train) [trial] = run_experiments( { "foo": { "run": "f1", } }, raise_on_failed_trial=False) self.assertEqual(trial.status, Trial.ERROR) def testReportInfinity(self): def train(config, reporter): for i in range(100): reporter(mean_accuracy=float("inf")) register_trainable("f1", train) [trial] = run_experiments({ "foo": { "run": "f1", } }) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result["mean_accuracy"], float("inf")) def testNestedResults(self): def create_result(i): return {"test": {"1": {"2": {"3": i, "4": False}}}} flattened_keys = list(flatten_dict(create_result(0))) class _MockScheduler(FIFOScheduler): results = [] def on_trial_result(self, trial_runner, trial, result): self.results += [result] return TrialScheduler.CONTINUE def on_trial_complete(self, trial_runner, trial, result): self.complete_result = result def train(config, reporter): for i in range(100): reporter(**create_result(i)) algo = _MockSuggestionAlgorithm() scheduler = _MockScheduler() [trial] = tune.run( train, scheduler=scheduler, search_alg=algo, stop={ "test/1/2/3": 20 }).trials self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result["test"]["1"]["2"]["3"], 20) self.assertEqual(trial.last_result["test"]["1"]["2"]["4"], False) self.assertEqual(trial.last_result[TRAINING_ITERATION], 21) self.assertEqual(len(scheduler.results), 20) self.assertTrue( all( set(result) >= set(flattened_keys) for result in scheduler.results)) self.assertTrue(set(scheduler.complete_result) >= set(flattened_keys)) self.assertEqual(len(algo.results), 20) self.assertTrue( all(set(result) >= set(flattened_keys) for result in algo.results)) with self.assertRaises(TuneError): [trial] = tune.run(train, stop={"1/2/3": 20}) with self.assertRaises(TuneError): [trial] = tune.run(train, stop={"test": 1}).trials def testReportTimeStep(self): # Test that no timestep count are logged if never the Trainable never # returns any. results1 = [dict(mean_accuracy=5, done=i == 99) for i in range(100)] logs1, _ = self.checkAndReturnConsistentLogs(results1) self.assertTrue(all(log[TIMESTEPS_TOTAL] is None for log in logs1)) # Test that no timesteps_this_iter are logged if only timesteps_total # are returned. results2 = [dict(timesteps_total=5, done=i == 9) for i in range(10)] logs2, _ = self.checkAndReturnConsistentLogs(results2) # Re-run the same trials but with added delay. This is to catch some # inconsistent timestep counting that was present in the multi-threaded # FunctionRunner. This part of the test can be removed once the # multi-threaded FunctionRunner is removed from ray/tune. # TODO: remove once the multi-threaded function runner is gone. logs2, _ = self.checkAndReturnConsistentLogs(results2, 0.5) # check all timesteps_total report the same value self.assertTrue(all(log[TIMESTEPS_TOTAL] == 5 for log in logs2)) # check that none of the logs report timesteps_this_iter self.assertFalse( any(hasattr(log, TIMESTEPS_THIS_ITER) for log in logs2)) # Test that timesteps_total and episodes_total are reported when # timesteps_this_iter and episodes_this_iter despite only return zeros. results3 = [ dict(timesteps_this_iter=0, episodes_this_iter=0) for i in range(10) ] logs3, _ = self.checkAndReturnConsistentLogs(results3) self.assertTrue(all(log[TIMESTEPS_TOTAL] == 0 for log in logs3)) self.assertTrue(all(log[EPISODES_TOTAL] == 0 for log in logs3)) # Test that timesteps_total and episodes_total are properly counted # when timesteps_this_iter and episodes_this_iter report non-zero # values. results4 = [ dict(timesteps_this_iter=3, episodes_this_iter=i) for i in range(10) ] logs4, _ = self.checkAndReturnConsistentLogs(results4) # The last reported result should not be double-logged. self.assertEqual(logs4[-1][TIMESTEPS_TOTAL], 30) self.assertNotEqual(logs4[-2][TIMESTEPS_TOTAL], logs4[-1][TIMESTEPS_TOTAL]) self.assertEqual(logs4[-1][EPISODES_TOTAL], 45) self.assertNotEqual(logs4[-2][EPISODES_TOTAL], logs4[-1][EPISODES_TOTAL]) def testAllValuesReceived(self): results1 = [ dict(timesteps_total=(i + 1), my_score=i**2, done=i == 4) for i in range(5) ] logs1, _ = self.checkAndReturnConsistentLogs(results1) # check if the correct number of results were reported self.assertEqual(len(logs1), len(results1)) def check_no_missing(reported_result, result): common_results = [reported_result[k] == result[k] for k in result] return all(common_results) # check that no result was dropped or modified complete_results = [ check_no_missing(log, result) for log, result in zip(logs1, results1) ] self.assertTrue(all(complete_results)) # check if done was logged exactly once self.assertEqual(len([r for r in logs1 if r.get("done")]), 1) def testNoDoneReceived(self): # repeat same test but without explicitly reporting done=True results1 = [ dict(timesteps_total=(i + 1), my_score=i**2) for i in range(5) ] logs1, trials = self.checkAndReturnConsistentLogs(results1) # check if the correct number of results were reported. self.assertEqual(len(logs1), len(results1)) def check_no_missing(reported_result, result): common_results = [reported_result[k] == result[k] for k in result] return all(common_results) # check that no result was dropped or modified complete_results1 = [ check_no_missing(log, result) for log, result in zip(logs1, results1) ] self.assertTrue(all(complete_results1)) def testDurableTrainable(self): class TestTrain(DurableTrainable): def _setup(self, config): self.state = {"hi": 1, "iter": 0} def _train(self): self.state["iter"] += 1 return {"timesteps_this_iter": 1, "done": True} def _save(self, path): return self.state def _restore(self, state): self.state = state sync_client = mock_storage_client() mock_get_client = "ray.tune.durable_trainable.get_cloud_sync_client" with patch(mock_get_client) as mock_get_cloud_sync_client: mock_get_cloud_sync_client.return_value = sync_client test_trainable = TestTrain(remote_checkpoint_dir=MOCK_REMOTE_DIR) checkpoint_path = test_trainable.save() test_trainable.train() test_trainable.state["hi"] = 2 test_trainable.restore(checkpoint_path) self.assertEqual(test_trainable.state["hi"], 1) self.addCleanup(shutil.rmtree, MOCK_REMOTE_DIR) def testCheckpointDict(self): class TestTrain(Trainable): def _setup(self, config): self.state = {"hi": 1} def _train(self): return {"timesteps_this_iter": 1, "done": True} def _save(self, path): return self.state def _restore(self, state): self.state = state test_trainable = TestTrain() result = test_trainable.save() test_trainable.state["hi"] = 2 test_trainable.restore(result) self.assertEqual(test_trainable.state["hi"], 1) trials = run_experiments({ "foo": { "run": TestTrain, "checkpoint_at_end": True } }) for trial in trials: self.assertEqual(trial.status, Trial.TERMINATED) self.assertTrue(trial.has_checkpoint()) def testMultipleCheckpoints(self): class TestTrain(Trainable): def _setup(self, config): self.state = {"hi": 1, "iter": 0} def _train(self): self.state["iter"] += 1 return {"timesteps_this_iter": 1, "done": True} def _save(self, path): return self.state def _restore(self, state): self.state = state test_trainable = TestTrain() checkpoint_1 = test_trainable.save() test_trainable.train() checkpoint_2 = test_trainable.save() self.assertNotEqual(checkpoint_1, checkpoint_2) test_trainable.restore(checkpoint_2) self.assertEqual(test_trainable.state["iter"], 1) test_trainable.restore(checkpoint_1) self.assertEqual(test_trainable.state["iter"], 0) trials = run_experiments({ "foo": { "run": TestTrain, "checkpoint_at_end": True } }) for trial in trials: self.assertEqual(trial.status, Trial.TERMINATED) self.assertTrue(trial.has_checkpoint()) def testIterationCounter(self): def train(config, reporter): for i in range(100): reporter(itr=i, timesteps_this_iter=1) register_trainable("exp", train) config = { "my_exp": { "run": "exp", "config": { "iterations": 100, }, "stop": { "timesteps_total": 100 }, } } [trial] = run_experiments(config) self.assertEqual(trial.status, Trial.TERMINATED) self.assertEqual(trial.last_result[TRAINING_ITERATION], 100) self.assertEqual(trial.last_result["itr"], 99) if __name__ == "__main__": import pytest import sys sys.exit(pytest.main(["-v", __file__]))
apache-2.0
4,064,561,105,439,785,000
31.652941
79
0.537741
false
bkaganyildiz/StreamBasedNotification
StreamBasedNotifs/StreamBasedNotifs/settings.py
1
3723
""" Django settings for StreamBasedNotifs project. Generated by 'django-admin startproject' using Django 1.10.4. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os import os.path Temp_Path = os.path.realpath('.') # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '3tsbkof$522cnus5qvv7gkj)%+ly7j8z%r61n0t3$s&%yyu(2u' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'capture', 'channels', 'celery', 'background_task', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'StreamBasedNotifs.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [Temp_Path +"/templates"], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'StreamBasedNotifs.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ] STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR),"static_cdn") MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' CHANNEL_LAYERS = { 'default': { 'BACKEND': 'asgi_redis.RedisChannelLayer', 'CONFIG': { 'hosts': [('demo.scorebeyond.com', 8007)], }, 'ROUTING': 'routing.channel_routing', } } BACKGROUND_TASK_RUN_ASYNC=True
gpl-3.0
-4,743,300,722,820,291,000
24.861111
91
0.676873
false
brandonsturgeon/auction_automation
lib/sites/framework.py
1
2207
class BasicSite(): """ Module for the http://www.BasicSite.com site """ def __init__(self, auction_helper): self.base_url = "" self.url = "" self.AuctionHelper = auction_helper self.require_js = False def determine_auction_type(self, text): return self.AuctionHelper.determine_auction_type(text) def get_direct_link_soup(self, link): """ Takes a link and returns a processed soup """ soup = self.AuctionHelper.get_direct_link_soup(link) return soup def clean_up(self, text): """ Takes a string and encodes it to utf-8, gets rid of excess spaces/tabs """ return " ".join(text.encode("utf-8").strip().split()) def process_page(self, soup): print "Processing {}".format(self.url) auctions = [] for auction in enumerable: print "----------------------------------------" # Find the type of auction #auction_type = self.determine_auction_type(auction_data.text) #print "" #print "Title: {}".format(auction_title) #print "Type: {}".format(auction_type) #print "More info link: {}".format(more_info_link) #print "Description: {}".format(description) #print "Auction Location: {}".format(auction_location) #print "Auction Begin Time: {}".format(auction_begin_time) #print "Auction End Time: {}".format(auction_end_time) #print "" #struct = { # "title": auction_title, # "type": auction_type, # "more_info_link": more_info_link, # "description": description, # "auction_location": auction_location, # "auction_begin_time": auction_begin_time, # "auction_end_time": auction_end_time, #} ## Convert all the values away from unicode #struct = {k: str(v) for k,v in struct.iteritems()} #auctions.append(struct) print "----------------------------------------" print "Finished processing auctions for {}".format(self.base_url) return auctions
mit
-4,928,986,184,203,300,000
35.783333
86
0.534209
false
Python-Yarn/Yarn
setup.py
1
1435
#!/usr/bin/env python from setuptools import setup # TODO: Handle automatic versioning from build system VERSION = '0.0.0' setup( name='yarn', version=VERSION, description='Yarn is a tool for remote command execution and product deployment.', author='Jason L McFarland', author_email='[email protected]', packages=['yarn',], # test_suite='nose.collector', # tests_require=['nose', 'paramiko'], install_requires=['paramiko>=1.13'], entry_points = {'console_scripts': ['yarn = yarn.yarn:main',]}, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Unix', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Python Modules', 'Topic :: System', 'Topic :: Software Distribution', 'Topic :: System :: Clustering', 'Topic :: System :: Systems Administration', ], )
mit
6,095,233,603,633,385,000
34.875
86
0.609756
false
onyb/dune
core/exceptions/__init__.py
1
1241
class UnsupportedPlatformException(Exception): """ Exception raised for platforms unsupported by Dune """ pass class OPAMConfigurationError(Exception): """ Exception raised if OPAM configuration is not found """ pass class InsufficientPrivilegeError(Exception): """ Exception raised if API server is launched with insufficient privileges """ pass class ExcessivePrivilegeError(Exception): """ Exception raised if API server is launched as root """ pass class UnikernelLibraryNotFound(Exception): """ Exception raised if the unikernel build tools are not found by the API server """ pass class MessageBrokerException(Exception): """ Exception raised if the message broker is not found by the API server """ pass class RedisServerNotFound(Exception): """ Exception raised if the Redis Server is not running """ pass class MongoDBServerNotFound(Exception): """ Exception raised if the MongoDB server is not running """ pass class RedisQueueException(MessageBrokerException): """ Exception raised if the Python Redis Queue is not found by the API server, or is not running any workers """
apache-2.0
6,269,148,778,540,242,000
19.683333
108
0.69299
false
crash-g/BoardGameBot
boardgamebot/request_manager.py
1
12721
"""This module is the core of the bot. It communicates with all the other parts of the application. """ import sys import logging import exceptions import constants from tools import input_parser from tools import history_manager from tools import http from tools import output_formatter from objects import chat_history from objects import answer logger = logging.getLogger("request_manager") # reraises BggUnreachable, NoResultFound and InvalidXmlStructure def _searchByName(name, chatId): """Searches for a boardgame using part of the name. Args: name (str): Part of the name of the game. chatId (int): The ID of the chat where the request came from. Returns: .answer.TelegramAnswer: An object containing all the information to be sent. """ return _searchList(name, http.searchByName, chatId) # reraises BggUnreachable, NoResultFound and InvalidXmlStructure def _searchByNameExact(name, chatId): """Searches for a boardgame by name, trying to match the name exactly. Args: name (str): The name of the game. chatId (int): The ID of the chat where the request came from. Returns: .answer.TelegramAnswer: An object containing all the information to be sent. """ return _searchList(name, http.searchByNameExact, chatId) # reraises BggUnreachable, NoResultFound and InvalidXmlStructure all def _searchList(searchString, httpSearch, chatId): """Called by all functions that expect a list of games as result. If the match is unique, the result of :func:`_searchById` is returned instead. Args: searchString (str): The string to pass to the search function. httpSearch (Callable[[str],game.gameList]): The function to use to search. chatId (int): The ID of the chat where the request came from. Returns: .answer.TelegramAnswer: An object containing all the information to be sent. """ gameList = httpSearch(searchString) gameList.setOriginalSearch(searchString) if (1 == gameList.length()): id_ = gameList.get(0).id_ return _searchById(id_, chatId) history_manager.updateLastGameList(gameList, chatId) return output_formatter.formatGameList(gameList) # reraises BggUnreachable, NoResultFound and InvalidXmlStructure def _searchById(id_, chatId, more=False): """Searches for a boardgame by ID. Args: id_ (int): The ID of the game to search. chatId (int): The ID of the chat where the request came from. more (bool): True if the answer should show additional info. Returns: .answer.TelegramAnswer: An object containing all the information to be sent. Raises: .exceptions.NoResultFound: If no game corresponds to the ID. """ game = http.searchById(id_) formattedGame = output_formatter.formatGame(game, more) history_manager.updateLastGame(game, formattedGame.formattedAnswer, chatId) return formattedGame # reraises BggUnreachable, NoResultFound and InvalidXmlStructure def _searchByIdInline(id_): """Searches for a game by ID and returns an inline answer. Args: id_ (int): The ID of the game to search. Returns: answer.TelegramInlineAnswer: An object containing all the information about a single entry in the list of results which is to be returned. """ game = http.searchById(id_) return output_formatter.formatInlineGame(game) # reraises BggUnreachable, NoResultFound and InvalidXmlStructure def _searchInlineList(searchString, httpSearch, offset): """Searches for a list of games by name (exact or partial). Args: searchString (str): The (partial) name of the game. httpSearch (Callable[[str],game.gameList]): The function to use to search. offset (int): The offset to apply to the result list before starting to parse the results. Returns: answer.TelegramInlineAnswerList: An object containing all the information which is to be returned. """ inlineList = answer.TelegramInlineAnswerList(36000, False) gameList = httpSearch(searchString) lastIndex = min(offset + constants.INLINE_LIST_PAGE_SIZE, gameList.length()) for index in range(offset, lastIndex): inlineList.addInlineAnswer(_searchByIdInline(gameList.get(index).id_)) if lastIndex < gameList.length(): inlineList.setNextOffset(str(lastIndex)) return inlineList def _gameFromList(pos, chatId): """Returns a game from the most recent search list of the chat. Args: pos (int): The position of the game in the list. chatId (int): The ID of the chat where the request came from. Returns: .game.Game: The game at the given position in the list. """ id_ = history_manager.getGameIdFromRecentList(pos, chatId) return _searchById(id_, chatId) # CALLBACK METHODS def _processGameCallback(data, chatId, msgId): """Processes the press of a callback button associated to a game. Args: data (str): The callback data associated to the button. chatId (int): The ID of the chat that originated the query. msgId (int): The ID of the message associated to the callback button. Returns: .answer.TelegramAnswer: An object containing all the information to be sent. """ firstChar, id_ = input_parser.parseCallbackGameData(data) more = "m" == firstChar if msgId != history_manager.getLastGameMsgId(chatId): answer = _searchById(id_, chatId, more) history_manager.setMsgId(chatId, msgId) else: game = history_manager.getLastGame(chatId) answer = output_formatter.formatGame(game, more) answer.setType("e") return answer def _processListCallback(data, chatId, msgId): """Processes the press of a callback button associated to a list of games. Args: data (str): The callback data associated to the button. chatId (int): The ID of the chat that originated the query. msgId (int): The ID of the message associated to the callback button. Returns: .answer.TelegramAnswer: An object containing all the information to be sent. """ firstChar, searchString, offset = input_parser.parseCallbackListData(data) if msgId != history_manager.getLastGameListMsgId(chatId): gameList = http.searchByName(searchString) gameList.setOriginalSearch(searchString) gameList.setOffset(int(offset)) history_manager.updateLastGameList(gameList, chatId) history_manager.setMsgId(chatId, msgId) else: gameList = history_manager.getLastGameList(chatId) if "n" == firstChar: newOffset = gameList.offset + constants.LIST_PAGE_SIZE else: newOffset = gameList.offset - constants.LIST_PAGE_SIZE return _changePage(gameList, newOffset) def _changePage(gameList, offset): """Change the page in a list of results. Args: gameList (.game.GameList): the list of games to display. offset (int): The new offset of the list. Returns: .answer.TelegramAnswer: an object containing all the information to be sent. Raises: .exceptions.ListNavigationOutOfBound: If for some reason the position is out of bound. """ if offset < 0 or offset >= gameList.length(): logger.error("New offset is out of bound, this should not happen.") raise exceptions.ListNavigationOutOfBound() gameList.setOffset(offset) answer = output_formatter.formatGameList(gameList) answer.setType("e") return answer # PUBLIC def processCommand(command, msg, chatId=None): """Entry point of this module for normal queries. This is used to process user input in the form of a command string and a message body. Args: command (str): The command to process. msg (str): An optional argument to the command. May be None. chatId (int): The ID of the chat that sent the message. It is used to update the chat history. Returns: .answer.TelegramAnswer: An answer to the message, containing the required info or an error message. """ try: # start and help are default telegram commands if "start" == command: logger.debug("start") return output_formatter.formatHelp() elif "help" == command: logger.debug("help") return output_formatter.formatHelp() elif "i" == command or "id" == command: logger.debug("id") return _searchById(msg, chatId) elif "b" == command or "boardgame" == command: logger.debug("boardgame") return _searchByName(msg, chatId) elif "e" == command or "exact" == command: logger.debug("exact") return _searchByNameExact(msg, chatId) elif "L" == command: logger.debug("gameFromList") return _gameFromList(msg, chatId) else: return output_formatter.formatCommandNotSupported(command) except exceptions.NoResultFound: return output_formatter.formatNoResultFound() except (exceptions.BggUnreachable, exceptions.InvalidXmlStructure): # TODO differentiate? return output_formatter.formatBggUnreachable() except (exceptions.ChatHistoryNotFound, exceptions.MissingFromChatHistory): return output_formatter.formatHistoryNotFound() except exceptions.GameListIndexOutOfBound as err: return output_formatter.formatGameListIndexNotValid(err.index) def processCallback(data, chatId, msgId): """Entry point of this module for callback queries. This is used to process user input in the form of a data string associated to the callback button. Args: data (str): The data associated to the callback button. chatId (int): The ID of the chat where the query originated. It is used to update the chat history. msgId (int): The ID of the message associated to the callback buttons. Returns: .answer.TelegramAnswer: An answer to the message, containing the required info or an error message. """ try: firstChar = data[:1] if 'g' == firstChar: return _processGameCallback(data[1:], chatId, msgId) elif 'l' == firstChar: return _processListCallback(data[1:], chatId, msgId) else: return output_formatter.formatBadCallbackData() except (exceptions.ChatHistoryNotFound, exceptions.MissingFromChatHistory): return output_formatter.formatHistoryNotFoundCallback() except exceptions.StaleListCallback: return output_formatter.formatStaleList() except (exceptions.ListNavigationOutOfBound, exceptions.BadCallbackData): return output_formatter.formatBadCallbackData() def processInline(command, msg, userId, listOffset=0): """Entry point of this module for inline queries. This is used to process user input in the form of a command string and a message body. Args: command (str): An optional command, used to recognize internal queries (like queries by ID). msg (str): The message to process. userId (int): The ID of the user that sent the message. It is used to retrieve the user history. listOffset (int): an optional offset used for pagination. Returns: .answer.TelegramAnswer: An answer to the message, containing the required info or an error message. """ try: if listOffset is None: listOffset = 0 if command: if "i" == command: logger.debug("Inline query by ID") inlineList = answer.TelegramInlineAnswerList(36000, False) game = _searchByIdInline(msg) inlineList.addInlineAnswer(game) return inlineList else: logger.error("Inline command " + command + " is not supported.") elif "r" == msg: logger.debug("Inline recent games") return history_manager.getRecentGames(userId) elif len(msg) < constants.INLINE_EXACT_QUERY_THRESHOLD: logger.debug("Inline exact search") return _searchInlineList(msg, http.searchByNameExact, listOffset) else: logger.debug("Inline non-exact search") return _searchInlineList(msg, http.searchByName, listOffset) except exceptions.NoResultFound: pass # do nothing if nothing is found except: # in case of any problem, send default result logger.exception("Error in inline query.") return constants.INLINE_DEFAULT
mit
8,809,473,913,156,834,000
38.141538
100
0.678013
false
LittleBuster/CheckYourMemory
startWnd.py
1
8305
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'startWnd.ui' # # Created: Tue Aug 19 01:43:54 2014 # by: PyQt5 UI code generator 5.2.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_StartForm(object): def setupUi(self, StartForm): StartForm.setObjectName("StartForm") StartForm.resize(604, 347) StartForm.setMinimumSize(QtCore.QSize(604, 347)) StartForm.setMaximumSize(QtCore.QSize(604, 347)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("images/app.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) StartForm.setWindowIcon(icon) self.label = QtWidgets.QLabel(StartForm) self.label.setGeometry(QtCore.QRect(-30, -90, 641, 521)) self.label.setText("") self.label.setPixmap(QtGui.QPixmap("images/desktopwallpapers.org.ua-2725.jpg")) self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(StartForm) self.label_2.setGeometry(QtCore.QRect(0, -10, 601, 81)) font = QtGui.QFont() font.setFamily("Arial Black") self.label_2.setFont(font) self.label_2.setAlignment(QtCore.Qt.AlignCenter) self.label_2.setObjectName("label_2") self.label_3 = QtWidgets.QLabel(StartForm) self.label_3.setGeometry(QtCore.QRect(0, 50, 601, 91)) font = QtGui.QFont() font.setFamily("Arial Black") self.label_3.setFont(font) self.label_3.setAlignment(QtCore.Qt.AlignCenter) self.label_3.setObjectName("label_3") self.label_5 = QtWidgets.QLabel(StartForm) self.label_5.setGeometry(QtCore.QRect(110, 150, 291, 51)) font = QtGui.QFont() font.setFamily("Arial Black") self.label_5.setFont(font) self.label_5.setAlignment(QtCore.Qt.AlignCenter) self.label_5.setObjectName("label_5") self.edDiff = QtWidgets.QLineEdit(StartForm) self.edDiff.setGeometry(QtCore.QRect(410, 160, 61, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(15) font.setBold(True) font.setItalic(False) font.setWeight(75) self.edDiff.setFont(font) self.edDiff.setStyleSheet("QLineEdit {\n" " background-color: rgba(23, 115, 255, 137);\n" " border-width: 1px;\n" "color:rgb(255, 255, 255);\n" " border-color: rgb(255, 255, 255);\n" " border-style: solid;\n" " border-radius: 5px;\n" "}") self.edDiff.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.edDiff.setObjectName("edDiff") self.edCount = QtWidgets.QLineEdit(StartForm) self.edCount.setGeometry(QtCore.QRect(410, 230, 61, 31)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(15) font.setBold(True) font.setItalic(False) font.setWeight(75) self.edCount.setFont(font) self.edCount.setStyleSheet("QLineEdit {\n" " background-color: rgba(23, 115, 255, 137);\n" " border-width: 1px;\n" "color:rgb(255, 255, 255);\n" " border-color: rgb(255, 255, 255);\n" " border-style: solid;\n" " border-radius: 5px;\n" "}") self.edCount.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.edCount.setObjectName("edCount") self.label_6 = QtWidgets.QLabel(StartForm) self.label_6.setGeometry(QtCore.QRect(110, 220, 261, 51)) font = QtGui.QFont() font.setFamily("Arial Black") self.label_6.setFont(font) self.label_6.setAlignment(QtCore.Qt.AlignCenter) self.label_6.setObjectName("label_6") self.pbStart = QtWidgets.QPushButton(StartForm) self.pbStart.setGeometry(QtCore.QRect(310, 290, 211, 41)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.pbStart.setFont(font) self.pbStart.setStyleSheet("QPushButton {\n" " background-color:qlineargradient(spread:reflect, x1:0.515, y1:1, x2:0.528, y2:0, stop:0 rgba(0, 143, 250, 255), stop:0.655502 rgba(0, 29, 203, 255), stop:1 rgba(190, 255, 255, 255));\n" " color:rgb(255, 255, 255);\n" " border-width: 1px;\n" " border-style: solid;\n" " border-radius: 10px;\n" " min-width: 80px;\n" "}\n" "\n" "QPushButton:hover {\n" " background-color:qlineargradient(spread:reflect, x1:0.515, y1:1, x2:0.528, y2:0, stop:0 rgba(0, 143, 250, 255), stop:0.655502 rgba(82, 100, 203, 255), stop:1 rgba(190, 255, 255, 255));\n" "}\n" "\n" "QPushButton:pressed {\n" " background-color:qlineargradient(spread:reflect, x1:0.515, y1:1, x2:0.528, y2:0, stop:0 rgba(0, 143, 250, 255), stop:0.596154 rgba(85, 250, 255, 255), stop:1 rgba(190, 255, 255, 255));\n" "}") icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap("images/apply_5183.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.pbStart.setIcon(icon1) self.pbStart.setIconSize(QtCore.QSize(24, 24)) self.pbStart.setObjectName("pbStart") self.pbExit = QtWidgets.QPushButton(StartForm) self.pbExit.setGeometry(QtCore.QRect(90, 290, 211, 41)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.pbExit.setFont(font) self.pbExit.setStyleSheet("QPushButton {\n" " background-color:qlineargradient(spread:reflect, x1:0.515, y1:1, x2:0.528, y2:0, stop:0 rgba(0, 143, 250, 255), stop:0.655502 rgba(0, 29, 203, 255), stop:1 rgba(190, 255, 255, 255));\n" " color:rgb(255, 255, 255);\n" " border-width: 1px;\n" " border-style: solid;\n" " border-radius: 10px;\n" " min-width: 80px;\n" "}\n" "\n" "QPushButton:hover {\n" " background-color:qlineargradient(spread:reflect, x1:0.515, y1:1, x2:0.528, y2:0, stop:0 rgba(0, 143, 250, 255), stop:0.655502 rgba(82, 100, 203, 255), stop:1 rgba(190, 255, 255, 255));\n" "}\n" "\n" "QPushButton:pressed {\n" " background-color:qlineargradient(spread:reflect, x1:0.515, y1:1, x2:0.528, y2:0, stop:0 rgba(0, 143, 250, 255), stop:0.596154 rgba(85, 250, 255, 255), stop:1 rgba(190, 255, 255, 255));\n" "}") icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap("images/exit.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.pbExit.setIcon(icon2) self.pbExit.setIconSize(QtCore.QSize(24, 24)) self.pbExit.setObjectName("pbExit") self.label_7 = QtWidgets.QLabel(StartForm) self.label_7.setGeometry(QtCore.QRect(170, 314, 261, 51)) font = QtGui.QFont() font.setFamily("Arial") font.setPointSize(8) self.label_7.setFont(font) self.label_7.setAlignment(QtCore.Qt.AlignCenter) self.label_7.setObjectName("label_7") self.retranslateUi(StartForm) QtCore.QMetaObject.connectSlotsByName(StartForm) def retranslateUi(self, StartForm): _translate = QtCore.QCoreApplication.translate StartForm.setWindowTitle(_translate("StartForm", "Check Your Memory")) self.label_2.setText(_translate("StartForm", "<html><head/><body><p><span style=\" font-size:48pt; color:#00ffc8;\">Check</span></p></body></html>")) self.label_3.setText(_translate("StartForm", "<html><head/><body><p><span style=\" font-size:48pt; color:#00ffc8;\">Your Memory</span></p></body></html>")) self.label_5.setText(_translate("StartForm", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffffff;\">Уровень сложности:</span></p></body></html>")) self.edDiff.setText(_translate("StartForm", "1")) self.edCount.setText(_translate("StartForm", "1")) self.label_6.setText(_translate("StartForm", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffffff;\">Количество ходов:</span></p></body></html>")) self.pbStart.setText(_translate("StartForm", "Поехали!")) self.pbExit.setText(_translate("StartForm", "Выход")) self.label_7.setText(_translate("StartForm", "<html><head/><body><p><span style=\" font-size:10pt; color:#ffffff;\">Denisov Foundation (c) 2014</span></p></body></html>"))
gpl-3.0
-7,055,602,198,253,922,000
46.482759
192
0.640644
false
alexheretic/apart-gtk
src/gtktools.py
1
1183
from gi.repository import Gtk def rows(grid: Gtk.Grid) -> int: return max(map(lambda child: grid.child_get_property(child, 'top-attach'), grid.get_children()), default=-1) + 1 class GridRowTenant: """Tool for managing one-time adding and later removing of exclusive owners of rows of a shared grid""" def __init__(self, grid: Gtk.Grid): self.grid = grid self.base_row = rows(grid) self.attached = [] def attach(self, widget, left=0, top=0, height=1, width=1): self.grid.attach(widget, left=left, top=self.base_row + top, height=height, width=width) self.attached.append(widget) if hasattr(self.grid, 'on_row_change'): self.grid.on_row_change() def all_row_numbers(self): return map(lambda c: self.grid.child_get_property(c, 'top-attach'), self.attached) def evict(self): for row in reversed(sorted(set(self.all_row_numbers()))): self.grid.remove_row(row) if hasattr(self.grid, 'on_row_change'): self.grid.on_row_change() top = self.grid.get_child_at(top=0, left=0) if top and type(top) is Gtk.Separator: top.hide()
gpl-3.0
-3,770,943,297,761,088,500
35.96875
116
0.623838
false
3dfxsoftware/cbss-addons
lct_hr/report/payslip_report_pdf.py
2
6517
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from report import report_sxw from datetime import datetime class payslip_report_pdf(report_sxw.rml_parse): _name = 'payslip_report_pdf' _description = "Employee Payslips" def __init__(self, cr, uid, name, context): super(payslip_report_pdf, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'payslips': self.get_payslip_data(cr, uid, context=context), }) # Not sure how well this will perform on big data sets. The yearly stuff is # duplicating a ton of lookups. If it turns out this performs badly, rewrite # to use queries instead of ORM. def get_payslip_data(self, cr, uid, context=None): retval = {} payslip_obj = self.pool.get('hr.payslip') payslip_ids = context.get('active_ids') payslips = payslip_obj.browse(cr, uid, payslip_ids, context=context) for payslip in payslips: sen_yr, sen_mon, sen_day = self.pool.get('hr.employee')\ .get_seniority_ymd(cr, uid, payslip.employee_id.id, context=context) seniority = '%dA, %dM, %dJ' % (sen_yr, sen_mon, sen_day) # Leaves leave_obj = self.pool.get('hr.holidays') leave_ids = leave_obj.search(cr, uid, [('employee_id', '=', payslip.employee_id.id)], context=context) leaves = leave_obj.browse(cr, uid, leave_ids, context=context) leaves_acquired = sum([x.number_of_days for x in leaves \ if x.state == 'validate' \ and x.type == 'add'\ and x.holiday_status_id.limit == False]) or 0.0 holidays = [x for x in leaves \ if x.state == 'validate' \ and x.type == 'remove' \ and x.date_from.split()[0] >= payslip.date_from.split()[0] \ and x.date_to.split()[0] <= payslip.date_to.split()[0]] # leaves_taken = sum([x.number_of_days for x in leaves \ # if x.state == 'validate' \ # and x.type == 'remove'\ # and x.holiday_status_id.limit == False]) leaves_remaining = sum([x.number_of_days for x in leaves\ if x.state == 'validate' \ and x.holiday_status_id.limit == False]) or 0.0 retval[payslip] = { # 'lines': lines, 'seniority': seniority, 'leaves_acquired': leaves_acquired, # 'leaves_taken': leaves_taken, 'leaves_remaining': leaves_remaining, 'holidays': holidays, } retval[payslip].update(self.get_salarial_data(cr, uid, payslip, yearly=False, context=context)) # Yearly stuff jan_1 = payslip.date_from.split('-')[0] + '-01-01' slip_end = payslip.date_to.split()[0] yr_slip_ids = payslip_obj.search(cr, uid, [('employee_id', '=', payslip.employee_id.id), ('date_from', '>=', jan_1), ('date_to', '<=', slip_end)], context=context) yearly_data = dict.fromkeys(['gross_year', 'salarial_costs_year', 'patronal_costs_year', 'net_salary_year', 'benefits_in_kind_year', 'worked_hours_year', 'worked_days_year'], 0) for yr_slip in payslip_obj.browse(cr, uid, yr_slip_ids, context=context): data = self.get_salarial_data(cr, uid, yr_slip, yearly=True, context=context) for key in data.keys(): yearly_data[key] += data.get(key, 0) retval[payslip].update(yearly_data) return retval def get_salarial_data(self, cr, uid, payslip, yearly=False, context=None): retval = {} keys = ['gross', 'salarial_costs', 'patronal_costs', 'net_salary', 'benefits_in_kind', 'worked_hours', 'worked_days'] lines = payslip.get_visible_lines(context=context) gross = sum(x.total for x in lines if x.sequence in [1999]) salarial_costs = sum(x.total for x in lines if x.sequence in [2040]) patronal_costs = sum(x.total for x in lines if x.sequence in [2041]) net_salary = sum(x.total for x in lines if x.sequence in [5000]) benefits_in_kind = sum(x.total for x in lines if x.sequence in [1009]) # For now, it's 160, except the 1st month, when it's prorata. days_in_service = (datetime.strptime(payslip.date_to, '%Y-%m-%d') \ - datetime.strptime(payslip.employee_id.start_date, '%Y-%m-%d')).days days_in_month = (datetime.strptime(payslip.date_to, '%Y-%m-%d') \ - datetime.strptime(payslip.date_from, '%Y-%m-%d')).days worked_hours = int(160 * min(1, float(days_in_service) / days_in_month)) # worked_hours = sum([x.number_of_hours for x in payslip.worked_days_line_ids]) worked_days = sum([x.number_of_days for x in payslip.worked_days_line_ids]) if not yearly: retval['lines'] = lines for key in keys: retval[key] = locals().get(key) else: for key in keys: retval[key + '_year'] = locals().get(key) return retval report_sxw.report_sxw('report.webkit.payslip_report_pdf', 'hr.payslip', 'lct_hr/report/payslip_report.html.mako', parser=payslip_report_pdf)
gpl-2.0
-818,315,447,750,691,600
46.919118
87
0.553629
false
CodeYellowBV/django-binder
binder/management/commands/define_groups.py
1
2410
from django.db import transaction from django.core.management.base import BaseCommand from django.conf import settings from django.utils.translation import gettext as _ from django.contrib.auth.models import Group, Permission, ContentType class Command(BaseCommand): help = _('Define user groups/roles to their required specifications') @transaction.atomic def handle(self, *args, **options): # Delete any stale groups Group.objects.exclude(name__in=settings.GROUP_PERMISSIONS).delete() for group_name in settings.GROUP_PERMISSIONS: group, _ = Group.objects.get_or_create(name=group_name) # Get all groups that are contained by this group groups_to_expand = [group_name] groups = set() while groups_to_expand: group_name = groups_to_expand.pop() if group_name not in groups: groups.add(group_name) groups_to_expand.extend(settings.GROUP_CONTAINS.get(group_name, [])) # Collect all permissions for these groups perms = set() for group_name in groups: for perm_name in settings.GROUP_PERMISSIONS[group_name]: try: app, other = perm_name.split('.') if ':' in other: action_and_model, scope = other.split(':') else: action_and_model = other action, model = action_and_model.split('_') content_type = ContentType.objects.get( app_label=app, model=model, ) perm = Permission.objects.get( content_type=content_type, codename=other, ) perms.add(perm) except ContentType.DoesNotExist: raise RuntimeError( 'Model for ' + perm_name + ' does not exist' ) except Permission.DoesNotExist: raise RuntimeError( 'Permission ' + perm_name + ' does not exist' ) group.permissions.set(perms)
mit
-2,166,528,551,319,578,400
39.166667
88
0.499585
false
chantera/biaffineparser
src/utils/training/callbacks.py
1
4715
# fmt: off __all__ = ["ProgressCallback", "PrintCallback", "MonitorCallback", "EarlyStopCallback", "SaveCallback"] # noqa # fmt: on import operator import os import torch from tqdm import tqdm from utils.training.trainer import Callback # isort: skip class ProgressCallback(Callback): def __init__(self): self.training_pbar = None self.evaluation_pbar = None def on_train_begin(self, context): self._ensure_close(train=True) self.training_pbar = tqdm() def on_train_end(self, context, metrics): self._ensure_close(train=True) def on_evaluate_begin(self, context): self._ensure_close(eval=True) self.evaluation_pbar = tqdm(leave=self.training_pbar is None) def on_evaluate_end(self, context, metrics): self._ensure_close(eval=True) def on_loop_begin(self, context): pbar = self.training_pbar if context.train else self.evaluation_pbar pbar.reset(context.num_batches) if context.train: pbar.set_postfix({"epoch": context.epoch}) def on_step_end(self, context, output): pbar = self.training_pbar if context.train else self.evaluation_pbar pbar.update(1) def _ensure_close(self, train=False, eval=False): if train: if self.training_pbar is not None: self.training_pbar.close() self.training_pbar = None if eval: if self.evaluation_pbar is not None: self.evaluation_pbar.close() self.evaluation_pbar = None def __del__(self): self._ensure_close(train=True, eval=True) class PrintCallback(Callback): def __init__(self, printer=None): self.printer = printer or tqdm.write def on_loop_end(self, context, metrics): label = "train" if context.train else "eval" loss = metrics[f"{label}/loss"] message = f"[{label}] epoch {context.epoch} - loss: {loss:.4f}" prefix = label + "/" for key, val in metrics.items(): if not isinstance(val, float) or not key.startswith(prefix): continue key = key.split("/", 1)[1] if key == "loss": continue message += f", {key}: {val:.4f}" self.printer(message) class MonitorCallback(Callback): def __init__(self, monitor="eval/loss", mode="min"): self.monitor = monitor self.count = 0 self.mode = mode if self.mode == "min": self.monitor_op = operator.lt self.best = float("inf") elif self.mode == "max": self.monitor_op = operator.gt self.best = float("-inf") else: raise ValueError(f"invalid mode: {self.mode}") def on_evaluate_end(self, context, metrics): current_val = metrics[self.monitor] if self.monitor_op(current_val, self.best): self.best = current_val self.count = 0 else: self.count += 1 class EarlyStopCallback(MonitorCallback): def __init__(self, monitor="eval/loss", patience=3, mode="min"): super().__init__(monitor, mode) self.patience = patience def on_evaluate_end(self, context, metrics): super().on_evaluate_end(context, metrics) if self.count >= self.patience: context.trainer.terminate() class SaveCallback(Callback): def __init__(self, output_dir, prefix="", mode="latest", monitor=None): if mode not in {"latest", "min", "max"}: raise ValueError(f"invalid mode: {self.mode}") self.output_dir = output_dir self.prefix = prefix self.monitor = MonitorCallback(monitor, mode) if monitor else None self._checkpoints = [] def on_evaluate_end(self, context, metrics): if self.monitor: self.monitor.on_evaluate_end(context, metrics) if self.monitor.count > 0: return trainer = context.trainer # TODO: add other configuration checkpoint = { "model": trainer.model.state_dict(), "optimizer": trainer.optimizer.state_dict(), "scheduler": trainer.scheduler.state_dict() if trainer.scheduler else None, "trainer_config": trainer.config, "trainer_state": trainer._state, } file = os.path.join(self.output_dir, f"{self.prefix}step-{context.global_step}.ckpt") torch.save(checkpoint, file) checkpoints = [] for ckpt_path in self._checkpoints: if os.path.exists(ckpt_path): os.remove(ckpt_path) checkpoints.append(file) self._checkpoints = checkpoints
apache-2.0
-3,761,235,620,844,864,000
31.517241
111
0.59088
false
uclapi/uclapi
backend/uclapi/oauth/scoping.py
1
4409
# Storage of the scope map # The purpose of this setup is that the OAuth scope of any app can be stored # in a single field. This way, we can easily add more scopes later. # We have a BigIntegerField to work with, which means 64 bits of storage. # This translates into 64 types of scope, each of which can be checked with a # bit mask. # We do not have any OAuth scopes needed yet, but the current plan is: # roombookings": (0, "Private room bookings data"), # "timetable": (1, "Private timetable data"), # "uclu": (2, "Private UCLU data"), # "moodle": (3, "Private Moodle data") # E.g. roombookings has scope 0, which is # 0000000000000000000000000000000000000000000000000000000000000001b. # This is because the 0th bit (LSB) is set to 1. # roombookings + uclu = 101b, or a scope number of 2^2 + 2^0 = 4 + 1 = 5 class Scopes: SCOPE_MAP = { "timetable": (1, "Personal Timetable"), "student_number": (2, "Student Number"), } def __init__(self, scope_map=None): if scope_map: self.SCOPE_MAP = scope_map # Add a scope to the scope number given and return the new number def add_scope(self, current, scope_name): try: scope_shift = self.SCOPE_MAP[scope_name][0] except KeyError: return current return (current | (1 << scope_shift)) # Check whether a scope is present in the current scope number given def check_scope(self, current, scope_name): try: scope_shift = self.SCOPE_MAP[scope_name][0] except KeyError: return False return ((1 << scope_shift) & current) > 0 # Remove a scope from the current scope number def remove_scope(self, current, scope_name): try: scope_shift = self.SCOPE_MAP[scope_name][0] except KeyError: return current if current & 1 << scope_shift > 0: return ~(~current + (1 << scope_shift)) else: return current # Produce a dictionary with the scope information. Example: # { # "roombookings": True, # "timetable": False, # ... # } def scope_dict(self, current, pretty_print=True): scopes = [] for x in self.SCOPE_MAP.keys(): if self.check_scope(current, x): if pretty_print: scope = { "name": x, "description": self.SCOPE_MAP[x][1] } else: scope = { "id": self.SCOPE_MAP[x][0], "name": x } scopes.append(scope) return scopes # Same as above, but list all possible scopes along with whether they are # included in the current state given. # This is used by the dashboard. def scope_dict_all(self, current, pretty_print=True): scopes = [] for x in self.SCOPE_MAP.keys(): if pretty_print: scope = { "name": x, "description": self.SCOPE_MAP[x][1], "enabled": self.check_scope(current, x) } else: scope = { "id": self.SCOPE_MAP[x][0], "name": x, "enabled": self.check_scope(current, x) } scopes.append(scope) return scopes # Get available scopes for showing to the user def get_all_scopes(self, pretty_print=True): scopes = [] for x in self.SCOPE_MAP.keys(): if pretty_print: scope = { "name": x, "description": self.SCOPE_MAP[x][1] } else: scope = { "id": self.SCOPE_MAP[x][0], "name": x } scopes.append(scope) return scopes # Dump the scope map so that developers can track scopes with it def get_scope_map(self): scopes = [] for x in self.SCOPE_MAP.keys(): scope = { "name": x, "id": self.SCOPE_MAP[x][0], "description": self.SCOPE_MAP[x][1] } scopes.append(scope) scopes = sorted(scopes, key=lambda k: k["id"]) return scopes
mit
5,179,115,751,130,359,000
32.150376
77
0.518031
false
dan2082/KSPData
main.py
1
4220
import logging import os import sys from tkinter import Frame, PanedWindow, Toplevel, Menu from tkinter.filedialog import askopenfilename, asksaveasfilename from ksp_data import KSPData from models.ksp_object import KSPObject from views.frame_kspobject import FrameKSPObject from views.treeview_kspelements import TreeviewKSPElements from views.treeview_kspobjects import TreeviewKSPObjects class Main(Frame): def __init__(self, master=None): Frame.__init__(self, master) self.pack(expand=1, fill='both') self.master.geometry('1440x900') paned_window = PanedWindow(self) self.treeview_kspelements = TreeviewKSPElements(self) self.treeview_kspobjects = TreeviewKSPObjects(self) paned_window.pack(expand=1, fill='both') paned_window.add(self.treeview_kspelements) paned_window.add(self.treeview_kspobjects) menubar = Menu(self) filemenu = Menu(self) filemenu.add_command(label='Open', command=self._open) filemenu.add_command(label='Save', command=self._save) filemenu.add_command(label='Save As', command=self._save_as) filemenu.add_separator() filemenu.add_command(label='Exit', command=self.master.destroy) menubar.add_cascade(menu=filemenu, label='File') insertmenu = Menu(self) insertmenu.add_command(label='KSP Element', command=self._insert_element, state='disabled') insertmenu.add_command(label='KSP Object', command=self._insert_object) menubar.add_cascade(menu=insertmenu, label='Insert') self.master.config(menu=menubar) def populate(self, kspelement): self._game = kspelement self._populate() def _populate(self): self.treeview_kspelements.populate(self._game) self.treeview_kspobjects.populate(self._game) def kspelement_selected(self, id_, kspelement=None): if kspelement is None: kspelement = self._game kspelement_by_id = KSPData.get_kspelement_by_id(kspelement, id_) self.treeview_kspobjects.populate(kspelement_by_id) def kspobject_double_1(self, id_, kspelement=None): if kspelement is None: kspelement = self._game kspobject_by_id = KSPData.get_kspobject_by_id(kspelement, id_) self.create_frame_kspobject(kspobject_by_id) def create_frame_kspobject(self, kspobject): logging.info(kspobject.id_) toplevel = Toplevel(self) FrameKSPObject(toplevel).populate(kspobject) def update_kspobject(self, name, value, id_, kspelement=None): if kspelement is None: kspelement = self._game kspobject_by_id = KSPData.get_kspobject_by_id(kspelement, id_) kspobject_by_id.name = name kspobject_by_id.value = value self.treeview_kspobjects.populate(kspelement) def delete_kspobject(self, id_, kspelement=None): if kspelement is None: kspelement = self._game kspobject_by_id = KSPData.get_kspobject_by_id(kspelement, id_) kspelement.kspobjects.remove(kspobject_by_id) self.treeview_kspobjects.populate(kspelement) def _open(self): self._filename = askopenfilename() if os.path.isfile(self._filename): self._game = KSPData.parse(self._filename) self._populate() def _save(self): KSPData.save(self._game, self._filename) def _save_as(self): filename = asksaveasfilename() KSPData.save(self._game, filename) def _insert_element(self): pass def _insert_object(self): id_ = int(self.treeview_kspelements.selection()[0]) kspelement_by_id = KSPData.get_kspelement_by_id(self._game, id_) kspobject = KSPObject('name', 'value') kspelement_by_id.kspobjects.append(kspobject) self.create_frame_kspobject(kspobject) def test(): Main().mainloop() if __name__ == '__main__': level = logging.NOTSET stream = sys.stdout logging.basicConfig(level=level, stream=stream) test()
gpl-3.0
-1,382,434,338,623,917,300
32.768
99
0.641469
false
jbalogh/zamboni
apps/addons/tests/test_models.py
1
68227
# -*- coding: utf-8 -*- from contextlib import nested import itertools import json import os from datetime import datetime, timedelta import tempfile from urlparse import urlparse from django import forms from django.contrib.auth.models import AnonymousUser from django.conf import settings from django.core import mail from django.core.cache import cache from django.db import IntegrityError from django.utils import translation from mock import patch, Mock from nose.tools import eq_, assert_not_equal import amo import amo.tests import addons.search from amo import set_user from amo.helpers import absolutify from amo.signals import _connect, _disconnect from addons.models import (Addon, AddonCategory, AddonDependency, AddonRecommendation, AddonType, AddonUpsell, BlacklistedGuid, Category, Charity, CompatOverride, CompatOverrideRange, FrozenAddon, Persona, Preview) from applications.models import Application, AppVersion from devhub.models import ActivityLog from files.models import File, Platform from files.tests.test_models import TestLanguagePack, UploadTest from market.models import Price, AddonPremium from reviews.models import Review from translations.models import TranslationSequence, Translation from users.models import UserProfile from versions.models import ApplicationsVersions, Version from versions.compare import version_int from webapps.models import Webapp class TestAddonManager(amo.tests.TestCase): fixtures = ['addons/featured', 'addons/test_manager', 'base/collections', 'base/featured', 'bandwagon/featured_collections', 'base/addon_5299_gcal'] def setUp(self): set_user(None) @patch.object(settings, 'NEW_FEATURES', False) def test_featured(self): eq_(Addon.objects.featured(amo.FIREFOX).count(), Addon.objects.listed(amo.FIREFOX) .filter(feature__application=amo.FIREFOX.id).count()) @patch.object(settings, 'NEW_FEATURES', True) def test_new_featured(self): # TODO: remove this when NEW_FEATURES goes away. It's here because # build() was already called in setUp(). from addons.cron import reset_featured_addons reset_featured_addons() eq_(Addon.objects.featured(amo.FIREFOX).count(), 3) def test_listed(self): Addon.objects.filter(id=5299).update(disabled_by_user=True) q = Addon.objects.listed(amo.FIREFOX, amo.STATUS_PUBLIC) eq_(len(q.all()), 4) addon = q[0] eq_(addon.id, 2464) # Disabling hides it. addon.disabled_by_user = True addon.save() # Should be 3 now, since the one is now disabled. eq_(q.count(), 3) # If we search for public or unreviewed we find it. addon.disabled_by_user = False addon.status = amo.STATUS_UNREVIEWED addon.save() eq_(q.count(), 3) eq_(Addon.objects.listed(amo.FIREFOX, amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED).count(), 4) # Can't find it without a file. addon.versions.get().files.get().delete() eq_(q.count(), 3) def test_public(self): public = Addon.objects.public() for a in public: assert_not_equal( a.id, 3, 'public() must not return unreviewed add-ons') def test_reviewed(self): addons = Addon.objects.reviewed() for a in addons: assert a.status in amo.REVIEWED_STATUSES, (a.id, a.status) def test_unreviewed(self): """ Tests for unreviewed addons. """ exp = Addon.objects.unreviewed() for addon in exp: assert addon.status in amo.UNREVIEWED_STATUSES, ( "unreviewed() must return unreviewed addons.") def test_valid(self): addon = Addon.objects.get(pk=5299) addon.update(disabled_by_user=True) objs = Addon.objects.valid() for addon in objs: assert addon.status in amo.LISTED_STATUSES assert not addon.disabled_by_user def test_valid_disabled_by_user(self): before = Addon.objects.valid_and_disabled().count() addon = Addon.objects.get(pk=5299) addon.update(disabled_by_user=True) eq_(Addon.objects.valid_and_disabled().count(), before) def test_valid_disabled_by_admin(self): before = Addon.objects.valid_and_disabled().count() addon = Addon.objects.get(pk=5299) addon.update(status=amo.STATUS_DISABLED) eq_(Addon.objects.valid_and_disabled().count(), before) class TestAddonManagerFeatured(amo.tests.TestCase): # TODO(cvan): Merge with above once new featured add-ons are enabled. fixtures = ['addons/featured', 'bandwagon/featured_collections', 'base/collections', 'base/featured'] @patch.object(settings, 'NEW_FEATURES', True) def test_new_featured(self): f = Addon.objects.featured(amo.FIREFOX) eq_(f.count(), 3) eq_(sorted(x.id for x in f), [2464, 7661, 15679]) f = Addon.objects.featured(amo.SUNBIRD) assert not f.exists() class TestNewAddonVsWebapp(amo.tests.TestCase): def test_addon_from_kwargs(self): a = Addon(type=amo.ADDON_EXTENSION) assert isinstance(a, Addon) def test_webapp_from_kwargs(self): w = Addon(type=amo.ADDON_WEBAPP) assert isinstance(w, Webapp) def test_addon_from_db(self): a = Addon.objects.create(type=amo.ADDON_EXTENSION) assert isinstance(a, Addon) assert isinstance(Addon.objects.get(id=a.id), Addon) def test_webapp_from_db(self): a = Addon.objects.create(type=amo.ADDON_WEBAPP) assert isinstance(a, Webapp) assert isinstance(Addon.objects.get(id=a.id), Webapp) class TestAddonModels(amo.tests.TestCase): fixtures = ['base/apps', 'base/collections', 'base/featured', 'base/users', 'base/addon_5299_gcal', 'base/addon_3615', 'base/addon_3723_listed', 'base/addon_6704_grapple.json', 'base/addon_4594_a9', 'base/addon_4664_twitterbar', 'base/thunderbird', 'addons/featured', 'addons/invalid_latest_version', 'addons/blacklisted', 'bandwagon/featured_collections'] def setUp(self): TranslationSequence.objects.create(id=99243) # TODO(andym): use Mock appropriately here. self.old_version = amo.FIREFOX.latest_version amo.FIREFOX.latest_version = '3.6.15' def tearDown(self): amo.FIREFOX.latest_version = self.old_version def test_current_version(self): """ Tests that we get the current (latest public) version of an addon. """ a = Addon.objects.get(pk=3615) eq_(a.current_version.id, 81551) def test_current_version_listed(self): a = Addon.objects.get(pk=3723) eq_(a.current_version.id, 89774) def test_current_version_listed_no_version(self): Addon.objects.filter(pk=3723).update(_current_version=None) Version.objects.filter(addon=3723).delete() a = Addon.objects.get(pk=3723) eq_(a.current_version, None) def test_latest_version(self): """ Tests that we get the latest version of an addon. """ a = Addon.objects.get(pk=3615) eq_(a.latest_version.id, Version.objects.filter(addon=a).latest().id) def test_latest_version_no_version(self): Addon.objects.filter(pk=3723).update(_current_version=None) Version.objects.filter(addon=3723).delete() a = Addon.objects.get(pk=3723) eq_(a.latest_version, None) def test_latest_version_ignore_beta(self): a = Addon.objects.get(pk=3615) v1 = Version.objects.create(addon=a, version='1.0') File.objects.create(version=v1) eq_(a.latest_version.id, v1.id) v2 = Version.objects.create(addon=a, version='2.0beta') File.objects.create(version=v2, status=amo.STATUS_BETA) eq_(a.latest_version.id, v1.id) # Still should be f1 def test_current_beta_version(self): a = Addon.objects.get(pk=5299) eq_(a.current_beta_version.id, 50000) @patch.object(settings, 'NEW_FEATURES', False) def test_current_version_mixed_statuses(self): """Mixed file statuses are evil (bug 558237).""" a = Addon.objects.get(pk=3895) # Last version has pending files, so second to last version is # considered "current". eq_(a.current_version.id, 78829) # Fix file statuses on last version. v = Version.objects.get(pk=98217) v.files.update(status=amo.STATUS_PUBLIC) # Wipe caches. cache.clear() a.update_version() # Make sure the updated version is now considered current. eq_(a.current_version.id, v.id) def test_delete(self): """Test deleting add-ons.""" a = Addon.objects.get(pk=3615) a.name = u'é' a.delete('bye') eq_(len(mail.outbox), 1) assert BlacklistedGuid.objects.filter(guid=a.guid) def test_delete_url(self): """Test deleting addon has URL in the email.""" a = Addon.objects.get(pk=4594) a.delete('bye') assert absolutify(a.get_url_path()) in mail.outbox[0].body def test_delete_searchengine(self): """ Test deleting searchengines (which have no guids) should not barf up the deletion machine. """ a = Addon.objects.get(pk=4594) a.delete('bye') eq_(len(mail.outbox), 1) def test_delete_status_gone_wild(self): """ Test deleting add-ons where the higheststatus is zero, but there's a non-zero status. """ a = Addon.objects.get(pk=3615) a.status = amo.STATUS_UNREVIEWED a.highest_status = 0 a.delete('bye') eq_(len(mail.outbox), 1) assert BlacklistedGuid.objects.filter(guid=a.guid) def test_delete_incomplete(self): """Test deleting incomplete add-ons.""" a = Addon.objects.get(pk=3615) a.status = 0 a.highest_status = 0 a.save() a.delete(None) eq_(len(mail.outbox), 0) assert not BlacklistedGuid.objects.filter(guid=a.guid) def test_incompatible_latest_apps(self): a = Addon.objects.get(pk=3615) eq_(a.incompatible_latest_apps(), []) av = ApplicationsVersions.objects.get(pk=47881) av.max = AppVersion.objects.get(pk=97) # Firefox 2.0 av.save() a = Addon.objects.get(pk=3615) eq_(a.incompatible_latest_apps(), [amo.FIREFOX]) # Check a search engine addon. a = Addon.objects.get(pk=4594) eq_(a.incompatible_latest_apps(), []) def test_incompatible_asterix(self): av = ApplicationsVersions.objects.get(pk=47881) av.max = AppVersion.objects.create(application_id=amo.FIREFOX.id, version_int=version_int('5.*'), version='5.*') av.save() a = Addon.objects.get(pk=3615) eq_(a.incompatible_latest_apps(), []) def test_icon_url(self): """ Tests for various icons. 1. Test for an icon that exists. 2. Test for default THEME icon. 3. Test for default non-THEME icon. """ a = Addon.objects.get(pk=3615) expected = (settings.ADDON_ICON_URL % (3615, 32, 0)).rstrip('/0') assert a.icon_url.startswith(expected) a = Addon.objects.get(pk=6704) a.icon_type = None assert a.icon_url.endswith('/icons/default-theme.png'), ( "No match for %s" % a.icon_url) a = Addon.objects.get(pk=3615) a.icon_type = None assert a.icon_url.endswith('icons/default-32.png') def test_icon_url_default(self): a = Addon.objects.get(pk=3615) a.update(icon_type='') default = 'icons/default-32.png' eq_(a.icon_url.endswith(default), True) eq_(a.get_icon_url(32).endswith(default), True) eq_(a.get_icon_url(32, use_default=True).endswith(default), True) eq_(a.get_icon_url(32, use_default=False), None) def test_thumbnail_url(self): """ Test for the actual thumbnail URL if it should exist, or the no-preview url. """ a = Addon.objects.get(pk=4664) a.thumbnail_url.index('/previews/thumbs/20/20397.png?modified=') a = Addon.objects.get(pk=5299) assert a.thumbnail_url.endswith('/icons/no-preview.png'), ( "No match for %s" % a.thumbnail_url) def test_is_unreviewed(self): """Test if add-on is unreviewed or not""" # public add-on a = Addon.objects.get(pk=3615) assert not a.is_unreviewed(), 'public add-on: is_unreviewed=False' # unreviewed add-on a = Addon(status=amo.STATUS_UNREVIEWED) assert a.is_unreviewed(), 'sandboxed add-on: is_unreviewed=True' a.status = amo.STATUS_PENDING assert a.is_unreviewed(), 'pending add-on: is_unreviewed=True' def test_is_selfhosted(self): """Test if an add-on is listed or hosted""" # hosted a = Addon.objects.get(pk=3615) assert not a.is_selfhosted(), 'hosted add-on => !is_selfhosted()' # listed a.status = amo.STATUS_LISTED assert a.is_selfhosted(), 'listed add-on => is_selfhosted()' def test_is_no_restart(self): a = Addon.objects.get(pk=3615) f = a.current_version.all_files[0] eq_(f.no_restart, False) eq_(a.is_no_restart(), False) f.update(no_restart=True) eq_(Addon.objects.get(pk=3615).is_no_restart(), True) a.versions.all().delete() a._current_version = None eq_(a.is_no_restart(), False) def test_is_featured(self): """Test if an add-on is globally featured""" a = Addon.objects.get(pk=1003) assert a.is_featured(amo.FIREFOX, 'en-US'), ( 'globally featured add-on not recognized') def test_has_full_profile(self): """Test if an add-on's developer profile is complete (public).""" addon = lambda: Addon.objects.get(pk=3615) assert not addon().has_full_profile() a = addon() a.the_reason = 'some reason' a.save() assert not addon().has_full_profile() a.the_future = 'some future' a.save() assert addon().has_full_profile() a.the_reason = '' a.the_future = '' a.save() assert not addon().has_full_profile() def test_has_profile(self): """Test if an add-on's developer profile is (partially or entirely) completed. """ addon = lambda: Addon.objects.get(pk=3615) assert not addon().has_profile() a = addon() a.the_reason = 'some reason' a.save() assert addon().has_profile() a.the_future = 'some future' a.save() assert addon().has_profile() a.the_reason = '' a.the_future = '' a.save() assert not addon().has_profile() def test_has_eula(self): addon = lambda: Addon.objects.get(pk=3615) assert addon().has_eula a = addon() a.eula = '' a.save() assert not addon().has_eula a.eula = 'eula' a.save() assert addon().has_eula def newlines_helper(self, string_before): addon = Addon.objects.get(pk=3615) addon.privacy_policy = string_before addon.save() return addon.privacy_policy.localized_string_clean def test_newlines_normal(self): before = ("Paragraph one.\n" "This should be on the very next line.\n\n" "Should be two nl's before this line.\n\n\n" "Should be three nl's before this line.\n\n\n\n" "Should be four nl's before this line.") after = before # Nothing special; this shouldn't change. eq_(self.newlines_helper(before), after) def test_newlines_ul(self): before = ("<ul>\n\n" "<li>No nl's between the ul and the li.</li>\n\n" "<li>No nl's between li's.\n\n" "But there should be two before this line.</li>\n\n" "</ul>") after = ("<ul>" "<li>No nl's between the ul and the li.</li>" "<li>No nl's between li's.\n\n" "But there should be two before this line.</li>" "</ul>") eq_(self.newlines_helper(before), after) def test_newlines_ul_tight(self): before = ("There should be one nl between this and the ul.\n" "<ul><li>test</li><li>test</li></ul>\n" "There should be no nl's above this line.") after = ("There should be one nl between this and the ul.\n" "<ul><li>test</li><li>test</li></ul>" "There should be no nl's above this line.") eq_(self.newlines_helper(before), after) def test_newlines_ul_loose(self): before = ("There should be two nl's between this and the ul.\n\n" "<ul><li>test</li><li>test</li></ul>\n\n" "There should be one nl above this line.") after = ("There should be two nl's between this and the ul.\n\n" "<ul><li>test</li><li>test</li></ul>\n" "There should be one nl above this line.") eq_(self.newlines_helper(before), after) def test_newlines_blockquote_tight(self): before = ("There should be one nl below this.\n" "<blockquote>Hi</blockquote>\n" "There should be no nl's above this.") after = ("There should be one nl below this.\n" "<blockquote>Hi</blockquote>" "There should be no nl's above this.") eq_(self.newlines_helper(before), after) def test_newlines_blockquote_loose(self): before = ("There should be two nls below this.\n\n" "<blockquote>Hi</blockquote>\n\n" "There should be one nl above this.") after = ("There should be two nls below this.\n\n" "<blockquote>Hi</blockquote>\n" "There should be one nl above this.") eq_(self.newlines_helper(before), after) def test_newlines_inline(self): before = ("If we end a paragraph w/ a <b>non-block-level tag</b>\n\n" "<b>The newlines</b> should be kept") after = before # Should stay the same eq_(self.newlines_helper(before), after) def test_newlines_code_inline(self): before = ("Code tags aren't blocks.\n\n" "<code>alert(test);</code>\n\n" "See?") after = before # Should stay the same eq_(self.newlines_helper(before), after) def test_newlines_li_newlines(self): before = ("<ul><li>\nxx</li></ul>") after = ("<ul><li>xx</li></ul>") eq_(self.newlines_helper(before), after) before = ("<ul><li>xx\n</li></ul>") after = ("<ul><li>xx</li></ul>") eq_(self.newlines_helper(before), after) before = ("<ul><li>xx\nxx</li></ul>") after = ("<ul><li>xx\nxx</li></ul>") eq_(self.newlines_helper(before), after) before = ("<ul><li></li></ul>") after = ("<ul><li></li></ul>") eq_(self.newlines_helper(before), after) # All together now before = ("<ul><li>\nxx</li> <li>xx\n</li> <li>xx\nxx</li> " "<li></li>\n</ul>") after = ("<ul><li>xx</li> <li>xx</li> <li>xx\nxx</li> " "<li></li></ul>") eq_(self.newlines_helper(before), after) def test_newlines_empty_tag(self): before = ("This is a <b></b> test!") after = before eq_(self.newlines_helper(before), after) def test_newlines_empty_tag_nested(self): before = ("This is a <b><i></i></b> test!") after = before eq_(self.newlines_helper(before), after) def test_newlines_empty_tag_block_nested(self): b = ("Test.\n\n<blockquote><ul><li></li></ul></blockquote>\ntest.") a = ("Test.\n\n<blockquote><ul><li></li></ul></blockquote>test.") eq_(self.newlines_helper(b), a) def test_newlines_empty_tag_block_nested_spaced(self): before = ("Test.\n\n<blockquote>\n\n<ul>\n\n<li>" "</li>\n\n</ul>\n\n</blockquote>\ntest.") after = ("Test.\n\n<blockquote><ul><li></li></ul></blockquote>test.") eq_(self.newlines_helper(before), after) def test_newlines_li_newlines_inline(self): before = ("<ul><li>\n<b>test\ntest\n\ntest</b>\n</li>" "<li>Test <b>test</b> test.</li></ul>") after = ("<ul><li><b>test\ntest\n\ntest</b></li>" "<li>Test <b>test</b> test.</li></ul>") eq_(self.newlines_helper(before), after) def test_newlines_li_all_inline(self): before = ("Test with <b>no newlines</b> and <code>block level " "stuff</code> to see what happens.") after = before # Should stay the same eq_(self.newlines_helper(before), after) def test_newlines_spaced_blocks(self): before = ("<blockquote>\n\n<ul>\n\n<li>\n\ntest\n\n</li>\n\n" "</ul>\n\n</blockquote>") after = "<blockquote><ul><li>test</li></ul></blockquote>" eq_(self.newlines_helper(before), after) def test_newlines_spaced_inline(self): before = "Line.\n\n<b>\nThis line is bold.\n</b>\n\nThis isn't." after = before eq_(self.newlines_helper(before), after) def test_newlines_nested_inline(self): before = "<b>\nThis line is bold.\n\n<i>This is also italic</i></b>" after = before eq_(self.newlines_helper(before), after) def test_newlines_xss_script(self): before = "<script>\n\nalert('test');\n</script>" after = "&lt;script&gt;\n\nalert('test');\n&lt;/script&gt;" eq_(self.newlines_helper(before), after) def test_newlines_xss_inline(self): before = "<b onclick=\"alert('test');\">test</b>" after = "<b>test</b>" eq_(self.newlines_helper(before), after) def test_newlines_attribute_link_doublequote(self): before = '<a href="http://google.com">test</a>' parsed = self.newlines_helper(before) assert parsed.endswith('google.com" rel="nofollow">test</a>') def test_newlines_attribute_singlequote(self): before = "<abbr title='laugh out loud'>lol</abbr>" after = '<abbr title="laugh out loud">lol</abbr>' eq_(self.newlines_helper(before), after) def test_newlines_attribute_doublequote(self): before = '<abbr title="laugh out loud">lol</abbr>' after = before eq_(self.newlines_helper(before), after) def test_newlines_attribute_nestedquotes_doublesingle(self): before = '<abbr title="laugh \'out\' loud">lol</abbr>' after = before eq_(self.newlines_helper(before), after) def test_newlines_attribute_nestedquotes_singledouble(self): before = '<abbr title=\'laugh "out" loud\'>lol</abbr>' after = before eq_(self.newlines_helper(before), after) def test_newlines_unclosed_b(self): before = ("<b>test") after = ("<b>test</b>") eq_(self.newlines_helper(before), after) def test_newlines_unclosed_b_wrapped(self): before = ("This is a <b>test") after = ("This is a <b>test</b>") eq_(self.newlines_helper(before), after) def test_newlines_unclosed_li(self): before = ("<ul><li>test</ul>") after = ("<ul><li>test</li></ul>") eq_(self.newlines_helper(before), after) def test_newlines_malformed_faketag(self): before = "<madonna" after = "" eq_(self.newlines_helper(before), after) def test_newlines_correct_faketag(self): before = "<madonna>" after = "&lt;madonna&gt;" eq_(self.newlines_helper(before), after) def test_newlines_malformed_tag(self): before = "<strong" after = "" eq_(self.newlines_helper(before), after) def test_newlines_malformed_faketag_surrounded(self): before = "This is a <test of bleach" after = 'This is a &lt;test of="" bleach=""&gt;' # Output is ugly, but not much we can do. Bleach+html5lib is adamant # this is a tag. eq_(self.newlines_helper(before), after) def test_newlines_malformed_tag_surrounded(self): before = "This is a <strong of bleach" after = "This is a <strong></strong>" # Bleach interprets 'of' and 'bleach' as attributes, and strips them. # Good? No. Any way around it? Not really. eq_(self.newlines_helper(before), after) def test_newlines_less_than(self): before = "3 < 5" after = "3 &lt; 5" eq_(self.newlines_helper(before), after) def test_newlines_less_than_tight(self): before = "abc 3<5 def" after = "abc 3&lt;5 def" eq_(self.newlines_helper(before), after) def test_app_categories(self): addon = lambda: Addon.objects.get(pk=3615) c22 = Category.objects.get(id=22) c22.name = 'CCC' c22.save() c23 = Category.objects.get(id=23) c23.name = 'BBB' c23.save() c24 = Category.objects.get(id=24) c24.name = 'AAA' c24.save() cats = addon().all_categories eq_(cats, [c22, c23, c24]) for cat in cats: eq_(cat.application.id, amo.FIREFOX.id) cats = [c24, c23, c22] app_cats = [(amo.FIREFOX, cats)] eq_(addon().app_categories, app_cats) tb = Application.objects.get(id=amo.THUNDERBIRD.id) c = Category(application=tb, name='XXX', type=addon().type, count=1, weight=1) c.save() AddonCategory.objects.create(addon=addon(), category=c) c24.save() # Clear the app_categories cache. app_cats += [(amo.THUNDERBIRD, [c])] eq_(addon().app_categories, app_cats) def test_review_replies(self): """ Make sure that developer replies are not returned as if they were original reviews. """ addon = Addon.objects.get(id=3615) u = UserProfile.objects.get(pk=999) version = addon.current_version new_review = Review(version=version, user=u, rating=2, body='hello', addon=addon) new_review.save() new_reply = Review(version=version, user=addon.authors.all()[0], addon=addon, reply_to=new_review, rating=2, body='my reply') new_reply.save() review_list = [r.pk for r in addon.reviews] assert new_review.pk in review_list, ( 'Original review must show up in review list.') assert new_reply.pk not in review_list, ( 'Developer reply must not show up in review list.') def test_takes_contributions(self): a = Addon(status=amo.STATUS_PUBLIC, wants_contributions=True, paypal_id='$$') assert a.takes_contributions a.status = amo.STATUS_UNREVIEWED assert not a.takes_contributions a.status = amo.STATUS_PUBLIC a.wants_contributions = False assert not a.takes_contributions a.wants_contributions = True a.paypal_id = None assert not a.takes_contributions a.charity_id = 12 assert a.takes_contributions def test_show_beta(self): # Addon.current_beta_version will be empty, so show_beta is False. a = Addon(status=amo.STATUS_PUBLIC) assert not a.show_beta @patch('addons.models.Addon.current_beta_version') def test_show_beta_with_beta_version(self, beta_mock): beta_mock.return_value = object() # Fake current_beta_version to return something truthy. a = Addon(status=amo.STATUS_PUBLIC) assert a.show_beta # We have a beta version but status has to be public. a.status = amo.STATUS_UNREVIEWED assert not a.show_beta def test_update_logs(self): addon = Addon.objects.get(id=3615) set_user(UserProfile.objects.all()[0]) addon.versions.all().delete() entries = ActivityLog.objects.all() eq_(entries[0].action, amo.LOG.CHANGE_STATUS.id) def test_can_request_review_waiting_period(self): now = datetime.now() a = Addon.objects.create(type=1) v = Version.objects.create(addon=a) # The first LITE version is only 5 days old, no dice. first_f = File.objects.create(status=amo.STATUS_LITE, version=v) first_f.update(datestatuschanged=now - timedelta(days=5), created=now - timedelta(days=20)) # TODO(andym): can this go in Addon.objects.create? bug 618444 a.update(status=amo.STATUS_LITE) eq_(a.can_request_review(), ()) # Now the first LITE is > 10 days old, change can happen. first_f.update(datestatuschanged=now - timedelta(days=11)) # Add a second file, to be sure that we test the date # of the first created file. second_f = File.objects.create(status=amo.STATUS_LITE, version=v) second_f.update(datestatuschanged=now - timedelta(days=5)) eq_(a.status, amo.STATUS_LITE) eq_(a.can_request_review(), (amo.STATUS_PUBLIC,)) def test_days_until_full_nomination(self): # Normalize to 12am for reliable day subtraction: now = datetime.now().date() a = Addon.objects.create(type=1) v = Version.objects.create(addon=a) f = File.objects.create(status=amo.STATUS_LITE, version=v) a.update(status=amo.STATUS_LITE) f.update(datestatuschanged=now - timedelta(days=4)) eq_(a.days_until_full_nomination(), 6) f.update(datestatuschanged=now - timedelta(days=1)) eq_(a.days_until_full_nomination(), 9) f.update(datestatuschanged=now - timedelta(days=10)) eq_(a.days_until_full_nomination(), 0) f.update(datestatuschanged=now) eq_(a.days_until_full_nomination(), 10) # Only calculate days from first submitted version: f.update(datestatuschanged=now - timedelta(days=2), created=now - timedelta(days=2)) # Ignore this one: f2 = File.objects.create(status=amo.STATUS_LITE, version=v) f2.update(datestatuschanged=now - timedelta(days=1), created=now - timedelta(days=1)) eq_(a.days_until_full_nomination(), 8) # Wrong status: a.update(status=amo.STATUS_PUBLIC) f.update(datestatuschanged=now - timedelta(days=4)) eq_(a.days_until_full_nomination(), 0) def setup_files(self, status): addon = Addon.objects.create(type=1) version = Version.objects.create(addon=addon) File.objects.create(status=status, version=version) return addon, version def test_no_change_disabled_user(self): addon, version = self.setup_files(amo.STATUS_UNREVIEWED) addon.update(status=amo.STATUS_PUBLIC) addon.update(disabled_by_user=True) version.save() eq_(addon.status, amo.STATUS_PUBLIC) assert addon.is_disabled def test_no_change_disabled(self): addon = Addon.objects.create(type=1) version = Version.objects.create(addon=addon) addon.update(status=amo.STATUS_DISABLED) version.save() eq_(addon.status, amo.STATUS_DISABLED) assert addon.is_disabled def test_can_alter_in_prelim(self): addon, version = self.setup_files(amo.STATUS_LITE) addon.update(status=amo.STATUS_LITE) version.save() eq_(addon.status, amo.STATUS_LITE) def test_removing_public(self): addon, version = self.setup_files(amo.STATUS_UNREVIEWED) addon.update(status=amo.STATUS_PUBLIC) version.save() eq_(addon.status, amo.STATUS_UNREVIEWED) def test_removing_public_with_prelim(self): addon, version = self.setup_files(amo.STATUS_LITE) addon.update(status=amo.STATUS_PUBLIC) version.save() eq_(addon.status, amo.STATUS_LITE) def test_can_request_review_no_files(self): addon = Addon.objects.get(pk=3615) addon.versions.all()[0].files.all().delete() eq_(addon.can_request_review(), ()) def check(self, status, exp, kw={}): addon = Addon.objects.get(pk=3615) changes = {'status': status, 'disabled_by_user': False} changes.update(**kw) addon.update(**changes) eq_(addon.can_request_review(), exp) def test_can_request_review_null(self): self.check(amo.STATUS_NULL, (amo.STATUS_LITE, amo.STATUS_PUBLIC)) def test_can_request_review_null_disabled(self): self.check(amo.STATUS_NULL, (), {'disabled_by_user': True}) def test_can_request_review_unreviewed(self): self.check(amo.STATUS_UNREVIEWED, (amo.STATUS_PUBLIC,)) def test_can_request_review_nominated(self): self.check(amo.STATUS_NOMINATED, (amo.STATUS_LITE,)) def test_can_request_review_public(self): self.check(amo.STATUS_PUBLIC, ()) def test_can_request_review_disabled(self): self.check(amo.STATUS_DISABLED, ()) def test_can_request_review_lite(self): self.check(amo.STATUS_LITE, (amo.STATUS_PUBLIC,)) def test_can_request_review_lite_and_nominated(self): self.check(amo.STATUS_LITE_AND_NOMINATED, ()) def test_can_request_review_purgatory(self): self.check(amo.STATUS_PURGATORY, (amo.STATUS_LITE, amo.STATUS_PUBLIC,)) def test_none_homepage(self): # There was an odd error when a translation was set to None. Addon.objects.create(homepage=None, type=amo.ADDON_EXTENSION) def test_slug_isdigit(self): a = Addon.objects.create(type=1, name='xx', slug='123') eq_(a.slug, '123~') a.slug = '44' a.save() eq_(a.slug, '44~') def test_slug_isblacklisted(self): # When an addon is uploaded, it doesn't use the form validation, # so we'll just mangle the slug if its blacklisted. a = Addon.objects.create(type=1, name='xx', slug='validate') eq_(a.slug, 'validate~') a.slug = 'validate' a.save() eq_(a.slug, 'validate~') def delete(self): addon = Addon.objects.get(id=3615) eq_(len(mail.outbox), 0) addon.delete('so long and thanks for all the fish') eq_(len(mail.outbox), 1) def test_delete_to(self): self.delete() eq_(mail.outbox[0].to, [settings.FLIGTAR]) def test_delete_by(self): try: user = Addon.objects.get(id=3615).authors.all()[0] set_user(user) self.delete() assert 'DELETED BY: 55021' in mail.outbox[0].body finally: set_user(None) def test_delete_by_unknown(self): self.delete() assert 'DELETED BY: Unknown' in mail.outbox[0].body def test_view_source(self): # view_source should default to True. a = Addon.objects.create(type=1) assert a.view_source @patch('files.models.File.hide_disabled_file') def test_admin_disabled_file_hidden(self, hide_mock): a = Addon.objects.get(id=3615) a.status = amo.STATUS_PUBLIC a.save() assert not hide_mock.called a.status = amo.STATUS_DISABLED a.save() assert hide_mock.called @patch('files.models.File.hide_disabled_file') def test_user_disabled_file_hidden(self, hide_mock): a = Addon.objects.get(id=3615) a.disabled_by_user = False a.save() assert not hide_mock.called a.disabled_by_user = True a.save() assert hide_mock.called def test_set_nomination(self): a = Addon.objects.get(id=3615) a.update(status=amo.STATUS_NULL) for s in (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED): a.versions.latest().update(nomination=None) a.update(status=s) assert a.versions.latest().nomination def test_new_version_inherits_nomination(self): a = Addon.objects.get(id=3615) ver = 10 for st in (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED): a.update(status=st) old_ver = a.versions.latest() v = Version.objects.create(addon=a, version=str(ver)) eq_(v.nomination, old_ver.nomination) ver += 1 def test_beta_version_does_not_inherit_nomination(self): a = Addon.objects.get(id=3615) a.update(status=amo.STATUS_LISTED) v = Version.objects.create(addon=a, version='1.0') v.nomination = None v.save() a.update(status=amo.STATUS_NOMINATED) File.objects.create(version=v, status=amo.STATUS_BETA, filename='foobar.xpi') v.version = '1.1' v.save() eq_(v.nomination, None) def test_lone_version_does_not_inherit_nomination(self): a = Addon.objects.get(id=3615) Version.objects.all().delete() v = Version.objects.create(addon=a, version='1.0') eq_(v.nomination, None) def test_reviwed_addon_does_not_inherit_nomination(self): a = Addon.objects.get(id=3615) ver = 10 for st in (amo.STATUS_PUBLIC, amo.STATUS_BETA, amo.STATUS_LISTED): a.update(status=st) v = Version.objects.create(addon=a, version=str(ver)) eq_(v.nomination, None) ver += 1 def test_nomination_no_version(self): # Check that the on_change method still works if there are no versions. a = Addon.objects.get(id=3615) a.versions.all().delete() a.update(status=amo.STATUS_NOMINATED) def test_nomination_already_set(self): addon = Addon.objects.get(id=3615) earlier = datetime.today() - timedelta(days=2) addon.versions.latest().update(nomination=earlier) addon.update(status=amo.STATUS_NOMINATED) eq_(addon.versions.latest().nomination.date(), earlier.date()) def test_category_transform(self): addon = Addon.objects.get(id=3615) cats = addon.categories.filter(application=amo.FIREFOX.id) names = [c.name for c in cats] assert addon.get_category(amo.FIREFOX.id).name in names class TestAddonGetURLPath(amo.tests.TestCase): def test_get_url_path(self): addon = Addon(slug='woo') eq_(addon.get_url_path(), '/en-US/firefox/addon/woo/') def test_get_url_path_more(self): addon = Addon(slug='woo') eq_(addon.get_url_path(more=True), '/en-US/firefox/addon/woo/more') class TestAddonModelsFeatured(amo.tests.TestCase): fixtures = ['addons/featured', 'bandwagon/featured_collections', 'base/addon_3615', 'base/collections', 'base/featured'] def setUp(self): # Addon._featured keeps an in-process cache we need to clear. if hasattr(Addon, '_featured'): del Addon._featured def _test_featured_random(self): f = Addon.featured_random(amo.FIREFOX, 'en-US') eq_(sorted(f), [1001, 1003, 2464, 3481, 7661, 15679]) f = Addon.featured_random(amo.FIREFOX, 'fr') eq_(sorted(f), [1001, 1003, 2464, 7661, 15679]) f = Addon.featured_random(amo.SUNBIRD, 'en-US') eq_(f, []) @patch.object(settings, 'NEW_FEATURES', False) def test_featured_random(self): self._test_featured_random() @patch.object(settings, 'NEW_FEATURES', True) def test_new_featured_random(self): self._test_featured_random() class TestBackupVersion(amo.tests.TestCase): fixtures = ['addons/update'] def setUp(self): self.version_1_2_0 = 105387 self.addon = Addon.objects.get(pk=1865) set_user(None) def setup_new_version(self): for version in Version.objects.filter(pk__gte=self.version_1_2_0): appversion = version.apps.all()[0] appversion.min = AppVersion.objects.get(version='4.0b1') appversion.save() def test_no_backup_version(self): self.addon.update_version() eq_(self.addon.backup_version, None) eq_(self.addon.current_version.version, '1.2.2') def test_no_current_version(self): Version.objects.all().delete() self.addon.update(_current_version=None) eq_(self.addon.backup_version, None) eq_(self.addon.current_version, None) def test_has_backup_version(self): self.setup_new_version() assert self.addon.update_version() eq_(self.addon.backup_version.version, '1.1.3') eq_(self.addon.current_version.version, '1.2.2') def test_backup_version(self): self.setup_new_version() assert self.addon.update_version() eq_(self.addon.backup_version.version, '1.1.3') def test_firefox_versions(self): self.setup_new_version() assert self.addon.update_version() backup = self.addon.backup_version.compatible_apps[amo.FIREFOX] eq_(backup.max.version, '3.7a5pre') eq_(backup.min.version, '3.0.12') current = self.addon.current_version.compatible_apps[amo.FIREFOX] eq_(current.max.version, '4.0b8pre') eq_(current.min.version, '3.0.12') def test_version_signals(self): self.setup_new_version() version = self.addon.versions.all()[0] assert not self.addon.backup_version version.save() assert Addon.objects.get(pk=1865).backup_version class TestCategoryModel(amo.tests.TestCase): def test_category_url(self): """Every type must have a url path for its categories.""" for t in amo.ADDON_TYPE.keys(): if t == amo.ADDON_DICT: continue # Language packs don't have categories. cat = Category(type=AddonType(id=t), slug='omg') assert cat.get_url_path() class TestPersonaModel(amo.tests.TestCase): def test_image_urls(self): mypersona = Persona(id=1234, persona_id=9876) assert mypersona.thumb_url.endswith('/7/6/9876/preview.jpg') assert mypersona.preview_url.endswith('/7/6/9876/preview_large.jpg') def test_update_url(self): p = Persona(id=1234, persona_id=9876) assert p.update_url.endswith('9876') class TestPreviewModel(amo.tests.TestCase): fixtures = ['base/previews'] def test_as_dict(self): expect = ['caption', 'full', 'thumbnail'] reality = sorted(Preview.objects.all()[0].as_dict().keys()) eq_(expect, reality) class TestAddonRecommendations(amo.tests.TestCase): fixtures = ['base/addon-recs'] def test_scores(self): ids = [5299, 1843, 2464, 7661, 5369] scores = AddonRecommendation.scores(ids) q = AddonRecommendation.objects.filter(addon__in=ids) for addon, recs in itertools.groupby(q, lambda x: x.addon_id): for rec in recs: eq_(scores[addon][rec.other_addon_id], rec.score) class TestAddonDependencies(amo.tests.TestCase): fixtures = ['base/addon_5299_gcal', 'base/addon_3615', 'base/addon_3723_listed', 'base/addon_6704_grapple', 'base/addon_4664_twitterbar'] def test_dependencies(self): ids = [3615, 3723, 4664, 6704] a = Addon.objects.get(id=5299) for dependent_id in ids: AddonDependency(addon=a, dependent_addon=Addon.objects.get(id=dependent_id)).save() eq_(sorted([a.id for a in a.dependencies.all()]), sorted(ids)) eq_(list(a.dependencies.all()), a.all_dependencies) def test_unique_dependencies(self): a = Addon.objects.get(id=5299) b = Addon.objects.get(id=3615) AddonDependency.objects.create(addon=a, dependent_addon=b) try: AddonDependency.objects.create(addon=a, dependent_addon=b) except IntegrityError: pass eq_(list(a.dependencies.values_list('id', flat=True)), [3615]) class TestListedAddonTwoVersions(amo.tests.TestCase): fixtures = ['addons/listed-two-versions'] def test_listed_two_versions(self): Addon.objects.get(id=2795) # bug 563967 class TestFlushURLs(amo.tests.TestCase): fixtures = ['base/addon_5579', 'base/previews', 'base/addon_4664_twitterbar', 'addons/persona'] def setUp(self): settings.ADDON_ICON_URL = ( '%s/%s/%s/images/addon_icon/%%d-%%d.png?modified=%%s' % ( settings.STATIC_URL, settings.LANGUAGE_CODE, settings.DEFAULT_APP)) settings.PREVIEW_THUMBNAIL_URL = (settings.STATIC_URL + '/img/uploads/previews/thumbs/%s/%d.png?modified=%d') settings.PREVIEW_FULL_URL = (settings.STATIC_URL + '/img/uploads/previews/full/%s/%d.png?modified=%d') _connect() def tearDown(self): _disconnect() def is_url_hashed(self, url): return urlparse(url).query.find('modified') > -1 @patch('amo.tasks.flush_front_end_cache_urls.apply_async') def test_addon_flush(self, flush): addon = Addon.objects.get(pk=159) addon.icon_type = "image/png" addon.save() for url in (addon.thumbnail_url, addon.icon_url): assert url in flush.call_args[1]['args'][0] assert self.is_url_hashed(url), url @patch('amo.tasks.flush_front_end_cache_urls.apply_async') def test_preview_flush(self, flush): addon = Addon.objects.get(pk=4664) preview = addon.previews.all()[0] preview.save() for url in (preview.thumbnail_url, preview.image_url): assert url in flush.call_args[1]['args'][0] assert self.is_url_hashed(url), url class TestAddonFromUpload(UploadTest): fixtures = ('base/apps', 'base/users') def setUp(self): super(TestAddonFromUpload, self).setUp() u = UserProfile.objects.get(pk=999) set_user(u) self.platform = Platform.objects.create(id=amo.PLATFORM_MAC.id) for version in ('3.0', '3.6.*'): AppVersion.objects.create(application_id=1, version=version) self.addCleanup(translation.deactivate) def webapp(self): return os.path.join(settings.ROOT, 'apps/devhub/tests/addons/mozball.webapp') def test_blacklisted_guid(self): BlacklistedGuid.objects.create(guid='guid@xpi') with self.assertRaises(forms.ValidationError) as e: Addon.from_upload(self.get_upload('extension.xpi'), [self.platform]) eq_(e.exception.messages, ['Duplicate UUID found.']) def test_xpi_attributes(self): addon = Addon.from_upload(self.get_upload('extension.xpi'), [self.platform]) eq_(addon.name, 'xpi name') eq_(addon.guid, 'guid@xpi') eq_(addon.type, amo.ADDON_EXTENSION) eq_(addon.status, amo.STATUS_NULL) eq_(addon.homepage, 'http://homepage.com') eq_(addon.summary, 'xpi description') eq_(addon.description, None) eq_(addon.slug, 'xpi-name') def test_manifest_url(self): upload = self.get_upload(abspath=self.webapp()) addon = Addon.from_upload(upload, [self.platform]) assert addon.is_webapp() eq_(addon.manifest_url, upload.name) def test_xpi_version(self): addon = Addon.from_upload(self.get_upload('extension.xpi'), [self.platform]) v = addon.versions.get() eq_(v.version, '0.1') eq_(v.files.get().platform_id, self.platform.id) eq_(v.files.get().status, amo.STATUS_UNREVIEWED) def test_xpi_for_multiple_platforms(self): platforms = [Platform.objects.get(pk=amo.PLATFORM_LINUX.id), Platform.objects.get(pk=amo.PLATFORM_MAC.id)] addon = Addon.from_upload(self.get_upload('extension.xpi'), platforms) v = addon.versions.get() eq_(sorted([f.platform.id for f in v.all_files]), sorted([p.id for p in platforms])) def test_search_attributes(self): addon = Addon.from_upload(self.get_upload('search.xml'), [self.platform]) eq_(addon.name, 'search tool') eq_(addon.guid, None) eq_(addon.type, amo.ADDON_SEARCH) eq_(addon.status, amo.STATUS_NULL) eq_(addon.homepage, None) eq_(addon.description, None) eq_(addon.slug, 'search-tool') eq_(addon.summary, 'Search Engine for Firefox') def test_search_version(self): addon = Addon.from_upload(self.get_upload('search.xml'), [self.platform]) v = addon.versions.get() eq_(v.version, datetime.now().strftime('%Y%m%d')) eq_(v.files.get().platform_id, amo.PLATFORM_ALL.id) eq_(v.files.get().status, amo.STATUS_UNREVIEWED) def test_no_homepage(self): addon = Addon.from_upload(self.get_upload('extension-no-homepage.xpi'), [self.platform]) eq_(addon.homepage, None) def test_default_locale(self): # Make sure default_locale follows the active translation. addon = Addon.from_upload(self.get_upload('search.xml'), [self.platform]) eq_(addon.default_locale, 'en-US') translation.activate('es-ES') addon = Addon.from_upload(self.get_upload('search.xml'), [self.platform]) eq_(addon.default_locale, 'es-ES') def test_webapp_default_locale_override(self): with nested(tempfile.NamedTemporaryFile('w', suffix='.webapp'), open(self.webapp())) as (tmp, mf): mf = json.load(mf) mf['default_locale'] = 'gb' tmp.write(json.dumps(mf)) tmp.flush() upload = self.get_upload(abspath=tmp.name) addon = Addon.from_upload(upload, [self.platform]) eq_(addon.default_locale, 'gb') def test_browsing_locale_does_not_override(self): translation.activate('gb') upload = self.get_upload(abspath=self.webapp()) # en-US default addon = Addon.from_upload(upload, [self.platform]) eq_(addon.default_locale, 'en-US') # not gb REDIRECT_URL = 'http://outgoing.mozilla.org/v1/' class TestCharity(amo.tests.TestCase): fixtures = ['base/charity.json'] @patch.object(settings, 'REDIRECT_URL', REDIRECT_URL) def test_url(self): charity = Charity(name="a", paypal="b", url="http://foo.com") charity.save() assert charity.outgoing_url.startswith(REDIRECT_URL) @patch.object(settings, 'REDIRECT_URL', REDIRECT_URL) def test_url_foundation(self): foundation = Charity.objects.get(pk=amo.FOUNDATION_ORG) assert not foundation.outgoing_url.startswith(REDIRECT_URL) class TestFrozenAddons(amo.tests.TestCase): def test_immediate_freeze(self): # Adding a FrozenAddon should immediately drop the addon's hotness. a = Addon.objects.create(type=1, hotness=22) FrozenAddon.objects.create(addon=a) eq_(Addon.objects.get(id=a.id).hotness, 0) class TestRemoveLocale(amo.tests.TestCase): def test_remove(self): a = Addon.objects.create(type=1) a.name = {'en-US': 'woo', 'el': 'yeah'} a.description = {'en-US': 'woo', 'el': 'yeah', 'he': 'ola'} a.save() a.remove_locale('el') qs = (Translation.objects.filter(localized_string__isnull=False) .values_list('locale', flat=True)) eq_(sorted(qs.filter(id=a.name_id)), ['en-US']) eq_(sorted(qs.filter(id=a.description_id)), ['en-US', 'he']) def test_remove_version_locale(self): addon = Addon.objects.create(type=amo.ADDON_THEME) version = Version.objects.create(addon=addon) version.releasenotes = {'fr': 'oui'} version.save() addon.remove_locale('fr') assert not (Translation.objects.filter(localized_string__isnull=False) .values_list('locale', flat=True)) class TestAddonWatchDisabled(amo.tests.TestCase): def setUp(self): self.addon = Addon(type=amo.ADDON_THEME, disabled_by_user=False, status=amo.STATUS_PUBLIC) self.addon.save() @patch('addons.models.File.objects.filter') def test_no_disabled_change(self, file_mock): mock = Mock() file_mock.return_value = [mock] self.addon.save() assert not mock.unhide_disabled_file.called assert not mock.hide_disabled_file.called @patch('addons.models.File.objects.filter') def test_disable_addon(self, file_mock): mock = Mock() file_mock.return_value = [mock] self.addon.update(disabled_by_user=True) assert not mock.unhide_disabled_file.called assert mock.hide_disabled_file.called @patch('addons.models.File.objects.filter') def test_admin_disable_addon(self, file_mock): mock = Mock() file_mock.return_value = [mock] self.addon.update(status=amo.STATUS_DISABLED) assert not mock.unhide_disabled_file.called assert mock.hide_disabled_file.called @patch('addons.models.File.objects.filter') def test_enable_addon(self, file_mock): mock = Mock() file_mock.return_value = [mock] self.addon.update(status=amo.STATUS_DISABLED) mock.reset_mock() self.addon.update(status=amo.STATUS_PUBLIC) assert mock.unhide_disabled_file.called assert not mock.hide_disabled_file.called class TestSearchSignals(amo.tests.ESTestCase): es = True def setUp(self): super(TestSearchSignals, self).setUp() addons.search.setup_mapping() self.addCleanup(self.cleanup) def cleanup(self): for index in settings.ES_INDEXES.values(): self.es.delete_index_if_exists(index) def test_no_addons(self): eq_(Addon.search().count(), 0) def test_create(self): addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name='woo') self.refresh() eq_(Addon.search().count(), 1) eq_(Addon.search().query(name='woo')[0].id, addon.id) def test_update(self): addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name='woo') self.refresh() eq_(Addon.search().count(), 1) addon.name = 'yeah' addon.save() self.refresh() eq_(Addon.search().count(), 1) eq_(Addon.search().query(name='woo').count(), 0) eq_(Addon.search().query(name='yeah')[0].id, addon.id) def test_delete(self): addon = Addon.objects.create(type=amo.ADDON_EXTENSION, name='woo') self.refresh() eq_(Addon.search().count(), 1) addon.delete('woo') self.refresh() eq_(Addon.search().count(), 0) class TestLanguagePack(TestLanguagePack): def setUp(self): super(TestLanguagePack, self).setUp() self.platform = Platform.objects.create(id=amo.PLATFORM_ANDROID.id) def test_extract(self): File.objects.create(platform=self.platform, version=self.version, filename=self.xpi_path('langpack-localepicker')) assert 'title=Select a language' in self.addon.get_localepicker() def test_extract_no_file(self): File.objects.create(platform=self.platform, version=self.version, filename=self.xpi_path('langpack')) eq_(self.addon.get_localepicker(), '') def test_extract_no_files(self): eq_(self.addon.get_localepicker(), '') def test_extract_not_language_pack(self): self.addon.update(type=amo.ADDON_LPAPP) eq_(self.addon.get_localepicker(), '') def test_extract_not_platform_all(self): self.mac = Platform.objects.create(id=amo.PLATFORM_MAC.id) File.objects.create(platform=self.mac, version=self.version, filename=self.xpi_path('langpack')) eq_(self.addon.get_localepicker(), '') class TestMarketplace(amo.tests.TestCase): def setUp(self): self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION) def test_is_premium(self): assert not self.addon.is_premium() self.addon.update(premium_type=amo.ADDON_PREMIUM) assert self.addon.is_premium() def test_can_be_premium_status(self): for status in amo.STATUS_CHOICES.keys(): self.addon.update(status=status) if status in amo.PREMIUM_STATUSES: assert self.addon.can_become_premium() else: assert not self.addon.can_become_premium() def test_webapp_can_become_premium(self): self.addon.update(type=amo.ADDON_WEBAPP) for status in amo.STATUS_CHOICES.keys(): self.addon.update(status=status) assert self.addon.can_become_premium(), status def test_can_be_premium_type(self): for type in amo.ADDON_TYPES.keys(): self.addon.update(type=type) if type in [amo.ADDON_EXTENSION, amo.ADDON_WEBAPP, amo.ADDON_LPAPP, amo.ADDON_DICT, amo.ADDON_THEME]: assert self.addon.can_become_premium() else: assert not self.addon.can_become_premium() def test_can_not_be_purchased(self): assert not self.addon.can_be_purchased() def test_can_still_not_be_purchased(self): self.addon.update(premium_type=amo.ADDON_PREMIUM) assert not self.addon.can_be_purchased() def test_can_be_purchased(self): for status in amo.REVIEWED_STATUSES: self.addon.update(premium_type=amo.ADDON_PREMIUM, status=status) assert self.addon.can_be_purchased() def test_transformer(self): other = Addon.objects.create(type=amo.ADDON_EXTENSION) price = Price.objects.create(price='1.00') self.addon.update(type=amo.ADDON_PREMIUM) AddonPremium.objects.create(addon=self.addon, price=price) assert getattr(Addon.objects.get(pk=self.addon.pk), 'premium') assert not getattr(Addon.objects.get(pk=other.pk), 'premium') class TestAddonUpsell(amo.tests.TestCase): def setUp(self): self.one = Addon.objects.create(type=amo.ADDON_EXTENSION, name='free') self.two = Addon.objects.create(type=amo.ADDON_EXTENSION, name='premium') self.upsell = AddonUpsell.objects.create(free=self.one, premium=self.two, text='yup') def test_create_upsell(self): eq_(self.one.upsell.premium, self.two) eq_(self.one.upsell.text, 'yup') eq_(self.two.upsell, None) class TestAddonPurchase(amo.tests.TestCase): fixtures = ['base/users'] def setUp(self): self.user = UserProfile.objects.get(pk=999) self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION, premium_type=amo.ADDON_PREMIUM, name='premium') def test_no_premium(self): self.addon.addonpurchase_set.create(user=self.user) self.addon.update(premium_type=amo.ADDON_FREE) assert not self.addon.has_purchased(self.user) def test_has_purchased(self): self.addon.addonpurchase_set.create(user=self.user) assert self.addon.has_purchased(self.user) def test_not_purchased(self): assert not self.addon.has_purchased(self.user) def test_anonymous(self): assert not self.addon.has_purchased(None) assert not self.addon.has_purchased(AnonymousUser) class TestWatermarkHash(amo.tests.TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): self.addon = Addon.objects.get(pk=3615) self.user = UserProfile.objects.get(email='[email protected]') def test_watermark_change_email(self): hsh = self.addon.get_watermark_hash(self.user) self.user.update(email='[email protected]') eq_(hsh, self.addon.get_watermark_hash(self.user)) def test_check_hash(self): hsh = self.addon.get_watermark_hash(self.user) eq_(self.user, self.addon.get_user_from_hash(self.user.email, hsh)) def test_check_hash_messed(self): hsh = self.addon.get_watermark_hash(self.user) hsh = hsh + 'asd' eq_(None, self.addon.get_user_from_hash(self.user.email, hsh)) def test_check_user_change(self): self.user.update(email='[email protected]') hsh = self.addon.get_watermark_hash(self.user) eq_(self.user, self.addon.get_user_from_hash('[email protected]', hsh)) def test_check_user_multiple(self): hsh = self.addon.get_watermark_hash(self.user) self.user.update(email='[email protected]') UserProfile.objects.create(email='[email protected]') eq_(self.user, self.addon.get_user_from_hash('[email protected]', hsh)) def test_cant_takeover(self): hsh = self.addon.get_watermark_hash(self.user) self.user.delete() UserProfile.objects.create(email='[email protected]') eq_(None, self.addon.get_user_from_hash('[email protected]', hsh)) class TestCompatOverride(amo.tests.TestCase): def setUp(self): app = Application.objects.create(id=1) one = CompatOverride.objects.create(guid='one') CompatOverrideRange.objects.create(compat=one, app=app) two = CompatOverride.objects.create(guid='two') CompatOverrideRange.objects.create(compat=two, app=app, min_version='1', max_version='2') CompatOverrideRange.objects.create(compat=two, app=app, min_version='1', max_version='2', min_app_version='3', max_app_version='4') def check(self, obj, **kw): """Check that key/value pairs in kw match attributes of obj.""" for key, expected in kw.items(): actual = getattr(obj, key) eq_(actual, expected, '[%s] %r != %r' % (key, actual, expected)) def test_is_hosted(self): c = CompatOverride.objects.create(guid='a') assert not c.is_hosted() a = Addon.objects.create(type=1, guid='b') c = CompatOverride.objects.create(guid='b') assert c.is_hosted() def test_override_type(self): one = CompatOverride.objects.get(guid='one') # The default is incompatible. c = CompatOverrideRange.objects.create(compat=one, app_id=1) eq_(c.override_type(), 'incompatible') c = CompatOverrideRange.objects.create(compat=one, app_id=1, type=0) eq_(c.override_type(), 'compatible') def test_guid_match(self): # We hook up the add-on automatically if we see a matching guid. addon = Addon.objects.create(id=1, guid='oh yeah', type=1) c = CompatOverride.objects.create(guid=addon.guid) eq_(c.addon_id, addon.id) c = CompatOverride.objects.create(guid='something else') assert c.addon is None def test_transformer(self): compats = list(CompatOverride.objects .transform(CompatOverride.transformer)) ranges = list(CompatOverrideRange.objects.all()) # If the transformer works then we won't have any more queries. with self.assertNumQueries(0): for c in compats: eq_(c.compat_ranges, [r for r in ranges if r.compat_id == c.id]) def test_collapsed_ranges(self): # Test that we get back the right structures from collapsed_ranges(). c = CompatOverride.objects.get(guid='one') r = c.collapsed_ranges() eq_(len(r), 1) compat_range = r[0] self.check(compat_range, type='incompatible', min='*', max='*') eq_(len(compat_range.apps), 1) self.check(compat_range.apps[0], app=amo.FIREFOX, min='*', max='*') def test_collapsed_ranges_multiple_versions(self): c = CompatOverride.objects.get(guid='one') CompatOverrideRange.objects.create(compat=c, app_id=1, min_version='1', max_version='2', min_app_version='3', max_app_version='3.*') r = c.collapsed_ranges() eq_(len(r), 2) self.check(r[0], type='incompatible', min='*', max='*') eq_(len(r[0].apps), 1) self.check(r[0].apps[0], app=amo.FIREFOX, min='*', max='*') self.check(r[1], type='incompatible', min='1', max='2') eq_(len(r[1].apps), 1) self.check(r[1].apps[0], app=amo.FIREFOX, min='3', max='3.*') def test_collapsed_ranges_different_types(self): # If the override ranges have different types they should be separate # entries. c = CompatOverride.objects.get(guid='one') CompatOverrideRange.objects.create(compat=c, app_id=1, type=0, min_app_version='3', max_app_version='3.*') r = c.collapsed_ranges() eq_(len(r), 2) self.check(r[0], type='compatible', min='*', max='*') eq_(len(r[0].apps), 1) self.check(r[0].apps[0], app=amo.FIREFOX, min='3', max='3.*') self.check(r[1], type='incompatible', min='*', max='*') eq_(len(r[1].apps), 1) self.check(r[1].apps[0], app=amo.FIREFOX, min='*', max='*') def test_collapsed_ranges_multiple_apps(self): c = CompatOverride.objects.get(guid='two') r = c.collapsed_ranges() eq_(len(r), 1) compat_range = r[0] self.check(compat_range, type='incompatible', min='1', max='2') eq_(len(compat_range.apps), 2) self.check(compat_range.apps[0], app=amo.FIREFOX, min='*', max='*') self.check(compat_range.apps[1], app=amo.FIREFOX, min='3', max='4') def test_collapsed_ranges_multiple_apps(self): c = CompatOverride.objects.get(guid='two') r = c.collapsed_ranges() eq_(len(r), 1) compat_range = r[0] self.check(compat_range, type='incompatible', min='1', max='2') eq_(len(compat_range.apps), 2) self.check(compat_range.apps[0], app=amo.FIREFOX, min='*', max='*') self.check(compat_range.apps[1], app=amo.FIREFOX, min='3', max='4') def test_collapsed_ranges_multiple_versions_and_apps(self): c = CompatOverride.objects.get(guid='two') CompatOverrideRange.objects.create(min_version='5', max_version='6', compat=c, app_id=1) r = c.collapsed_ranges() eq_(len(r), 2) self.check(r[0], type='incompatible', min='1', max='2') eq_(len(r[0].apps), 2) self.check(r[0].apps[0], app=amo.FIREFOX, min='*', max='*') self.check(r[0].apps[1], app=amo.FIREFOX, min='3', max='4') self.check(r[1], type='incompatible', min='5', max='6') eq_(len(r[1].apps), 1) self.check(r[1].apps[0], app=amo.FIREFOX, min='*', max='*')
bsd-3-clause
4,149,593,830,496,497,700
35.003166
79
0.59536
false
I-sektionen/i-portalen
wsgi/iportalen_django/exchange_portal/urls.py
1
1753
from django.conf.urls import url, include from . import views app_name = 'exchange_portal' exchange_portal_patterns = [ url(r'^$', view=views.Exchange_Portal, name="exchange_portal"), url(r'^admin/$', view=views.Admin, name="admin"), url(r'^feedback/$', view=views.Add_Feedback, name="feedback"), url(r'^important_dates/$', view=views.Important_Dates, name='important_dates'), url(r'^contact/$', view=views.Contact, name='contact'), url(r'^school/(?P<pk>[0-9]+)/$', view=views.Exchange_School, name='school'), url(r'^search-autocomplete/$', view=views.Search_Autocomplete.as_view(), name='search_autocomplete'), url(r'^travel_stories/$', view=views.Travel_Stories, name="travel_stories"), url(r'^travel_story/(?P<pk>[0-9]+)/$', view=views.single_travel_story, name='travel_story'), url(r'^(?P<continent>\w{0,50})/$', view=views.continent, name="continent"), url(r'asien/(?P<country>\w{0,50})$', view=views.continent_filtered, name="country"), url(r'nordamerika/(?P<country>\w{0,50})$', view=views.continent_filtered, name="country"), url(r'europa/(?P<country>\w{0,50})$', view=views.continent_filtered, name="country"), url(r'afrika/(?P<country>\w{0,50})$', view=views.continent_filtered, name="country"), url(r'oceanien/(?P<country>\w{0,50})$', view=views.continent_filtered, name="country"), url(r'sydamerika/(?P<country>\w{0,50})$', view=views.continent_filtered, name="country") ] urlpatterns = [url(r'^', include(exchange_portal_patterns, namespace=app_name))]
mit
-6,569,624,389,402,226,000
61.607143
106
0.584712
false
chapmanb/svtyper
scripts/vcf_paste.py
1
4673
#!/usr/bin/env python import argparse, sys from argparse import RawTextHelpFormatter __author__ = "Colby Chiang ([email protected])" __version__ = "$Revision: 0.0.1 $" __date__ = "$Date: 2015-04-13 14:31 $" # -------------------------------------- # define functions def get_args(): parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description="\ vcf_paste.py\n\ author: " + __author__ + "\n\ version: " + __version__ + "\n\ description: Paste VCFs from multiple samples") # parser.add_argument('-a', '--argA', metavar='argA', type=str, required=True, help='description of argument') # parser.add_argument('-b', '--argB', metavar='argB', required=False, help='description of argument B') # parser.add_argument('-c', '--flagC', required=False, action='store_true', help='sets flagC to true') parser.add_argument('-m', '--master', type=argparse.FileType('r'), default=None, help='VCF file to set first 8 columns of variant info [first file in vcf_list]') parser.add_argument('-q', '--sum_quals', required=False, action='store_true', help='Sum QUAL scores of input VCFs as output QUAL score') parser.add_argument('vcf_list', metavar='vcf', nargs='*', type=argparse.FileType('r'), default=None, help='VCF file(s) to join') # parse the arguments args = parser.parse_args() if len(args.vcf_list) < 1: parser.print_help() exit(1) # send back the user input return args # primary function def svt_join(master, sum_quals, vcf_list): # if master not provided, set as first VCF if master is None: master = open(vcf_list[0].name) sample_list = [] # print header while 1: master_line = master.readline() if not master_line: break if master_line[:2] != "##": break print (master_line.rstrip()) # get sample names for vcf in vcf_list: while 1: line = vcf.readline() if not line: break if line[:2] == "##": continue if line[0] == "#": line_v = line.rstrip().split('\t') for sample in line_v[9:]: sample_list.append(sample) break print '\t'.join(master_line.rstrip().split('\t')[:8] + ['FORMAT'] + sample_list) # iterate through VCF body while 1: master_line = master.readline() if not master_line: break master_v = master_line.rstrip().split('\t') master_chrom = master_v[0] master_pos = master_v[1] out_v = master_v[:8] # output array of fields qual = float(out_v[5]) format = None # column 9, VCF format field. for vcf in vcf_list: line = vcf.readline() if not line: sys.stderr.write('\nError: VCF files differ in length\n') exit(1) line_v = line.rstrip().split('\t') line_chrom = line_v[0] line_pos = line_v[1] # set FORMAT field as format in first VCF. # cannot extract this from master, since it may have # been altered in the processing of the VCFs. if format is None: format = line_v[8] out_v.append(format) # ensure that each VCF position agrees with the master if (master_chrom != line_chrom or master_pos != line_pos): sys.stderr.write('\nError: variant in %s (%s:%s) conflicts with master (%s:%s)\n' % (vcf.name, line_chrom, line_pos, master_chrom, master_pos)) exit(1) # ensure that the format for all VCFs agree with the first if (format != line_v[8]): sys.stderr.write('\nError: format in %s (%s) conflicts with first VCF (%s)\n' % (vcf.name, line_v[8], format)) exit(1) qual += float(line_v[5]) out_v = out_v + line_v[9:] if sum_quals: out_v[5] = qual sys.stdout.write( '\t'.join(map(str, out_v)) + '\n') # close files master.close() for vcf in vcf_list: vcf.close() return # -------------------------------------- # main function def main(): # parse the command line args args = get_args() # call primary function svt_join(args.master, args.sum_quals, args.vcf_list) # initialize the script if __name__ == '__main__': try: sys.exit(main()) except IOError, e: if e.errno != 32: # ignore SIGPIPE raise
mit
4,319,263,748,814,037,000
32.862319
165
0.53777
false
jinzekid/codehub
python/练习/练习-三级菜单.py
1
2092
# Author: Jason Lu menu = { '北京':{ '海淀':{ '五道口':{ 'soho':{}, '网易':{}, 'google':{} }, '中关村':{ '爱奇艺':{}, '汽车之家':{}, 'youku':{}, }, '上地':{ '百度':{}, }, }, '昌平':{ '沙河':{ '老男孩':{}, '北航':{}, }, '天通苑':{}, '回龙观':{}, }, '朝阳':{}, '东城':{}, }, '上海':{ '闵行':{ "人民广场":{ '炸鸡店':{} } }, '闸北':{ '火车战':{ '携程':{} } }, '浦东':{}, }, '山东':{}, } exit_flag = False # 第一版本 while not exit_flag: for i in menu: print(i) choice = input("选择进入>>:") if choice in menu: while not exit_flag: for i2 in menu[choice]: print(i2) choice2 = input("选择进入>>:") if choice2 in menu[choice]: while not exit_flag: for i3 in menu[choice][choice2]: print(i3) choice3 = input("选择进入>>:") if choice3 in menu[choice][choice2]: for i4 in menu[choice][choice2][choice3]: print("\t\t", i4) choice4 = input("最后一层, 按b返回>>:") if choice4 == "b": pass elif choice4 == "q": exit_flag = True if choice3 == "b": break elif choice3 == "q": exit_flag = True if choice2 == "b": break elif choice2 == "q": exit_flag = True
gpl-3.0
-9,118,270,210,886,829,000
21.564706
65
0.270073
false