{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); Please click here to verify your account.
Thank you!
HexPhotos\"\r\n\t\t\tmessage = \"/shop/confirmed/\" + str(user.id) +\"/\" + str(user.confirmedlink)\r\n\t\t\tmessage = html\r\n\r\n\t\t\tmsg = EmailMultiAlternatives('HexPhotos Confirmation Email', message, 'hexphotos.byu@gmail.com', [user.email])\r\n\t\t\tmsg.attach_alternative(html, \"text/html\")\r\n\t\t\tmsg.send()\r\n\r\n\t\t\t#once the account is created, the user is being logged in\r\n\t\t\tloginuser = authenticate(username = form.cleaned_data['username'], password = form.cleaned_data['password'])\r\n\t\t\tlogin(request, loginuser)\r\n\t\t\tredirect = request.META.get(\"HTTP_REFERER\")\r\n\t\t\treturn HttpResponse('')\r\n\r\n\ttvars = {\r\n\t\t'form': form,\r\n\t}\r\n\treturn templater.render_to_response(request, 'new_account.html', tvars)\r\n\r\nclass UserForm(forms.Form):\r\n\tusername = forms.CharField(required=False, label='Username', widget=forms.TextInput(attrs={'class':'form-control'}))\r\n\temail = forms.EmailField(required=False, label='Email', widget=forms.TextInput(attrs={'class':'form-control'}))\r\n\tpassword = forms.CharField(required=False, label='Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))\r\n\tretypepassword = forms.CharField(required=False, label='Confirm Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))\r\n\r\n\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tself.request = kwargs.pop('request', None)\r\n\t\tsuper(UserForm, self).__init__(*args, **kwargs)\r\n\r\n\tdef clean(self):\r\n\t\tallUsers = pmod.User.objects.all()\r\n\t\tfor u in allUsers:\r\n\t\t\tif self.cleaned_data['email'] == u.email:\r\n\t\t\t\traise forms.ValidationError(\"That email is already in use.\")\r\n\t\tif self.cleaned_data['password'] == \"\":\r\n\t\t\traise forms.ValidationError(\"You must enter a password.\")\r\n\t\tif self.cleaned_data['password'] != self.cleaned_data['retypepassword']:\r\n\t\t\traise forms.ValidationError(\"The passwords do not match.\")\r\n\t\treturn self.cleaned_data\r\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":344836642478137600,"string":"344,836,642,478,137,600"},"line_mean":{"kind":"number","value":41.0333333333,"string":"41.033333"},"line_max":{"kind":"number","value":210,"string":"210"},"alpha_frac":{"kind":"number","value":0.7007487736,"string":"0.700749"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":108999,"cells":{"repo_name":{"kind":"string","value":"shadowmint/nwidget"},"path":{"kind":"string","value":"tests/nwidget/textbox_tests.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1879"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom datetime import timedelta\ntry:\n import bootstrap\nexcept:\n pass\nimport unittest\nimport nwidget\nfrom nwidget.helpers import *\n\n\n\nclass Tests(PygletTestBase):\n\n def setup(self):\n return nwidget.Textbox(), nwidget.Assets()\n \n def shutdown(self):\n self.stop_pyglet()\n \n def test_can_create_instance(self):\n def update():\n if self._tested:\n if self._elpased > timedelta(seconds=8):\n self.shutdown()\n\n def draw():\n if self._tested:\n self._window.clear()\n for i in self.__widgets:\n i.draw()\n\n def runner():\n self.__widgets = []\n self.enable_blending()\n\n # Multiline label with edge wrapping\n i, a = self.setup()\n i.bounds(50, 110, 250, 210)\n i.text = \"Hello World Thd sf s dfas df sdf dsf adf dsf dsf dsaf dsa fdsaf adsf adsf asdf asdf\"\n i.font = a.resolve(\"data\", \"roboto.ttf\")\n i.color = (255, 255, 0, 255)\n i.size = 10\n i.panel = a.resolve(\"data\", \"textbox_panel1.png\")\n i.panel_focus = a.resolve(\"data\", \"textbox_panel2.png\")\n i.register(self._window)\n i0 = i\n self.__widgets.append(i)\n \n # Single line label\n i, a = self.setup()\n i.bounds(10, 250, 390, 330)\n i.text = \"Hello World\"\n i.font = a.resolve(\"data\", \"roboto.ttf\")\n i.color = (30, 30, 30, 255)\n i.multiline = False\n i.size = 12\n i.limit = 30\n i.register(self._window)\n i.panel = a.resolve(\"data\", \"textbox_panel1.png\")\n i.panel_focus = a.resolve(\"data\", \"textbox_panel2.png\")\n i.on_change = \"TEXT_CHANGE\"\n i.padding = 20\n i1 = i\n self.__widgets.append(i)\n\n def cb(code, widget):\n print(\"New text: %s\" % widget.text)\n nwidget.listen(\"TEXT_CHANGE\", cb)\n \n self.run_pyglet(runner, draw, update)\n \nif __name__ == \"__main__\":\n unittest.main()\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":1046455372296335500,"string":"1,046,455,372,296,335,500"},"line_mean":{"kind":"number","value":24.0533333333,"string":"24.053333"},"line_max":{"kind":"number","value":100,"string":"100"},"alpha_frac":{"kind":"number","value":0.5753060138,"string":"0.575306"},"autogenerated":{"kind":"bool","value":false,"string":"false"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1089,"numItemsPerPage":100,"numTotalItems":110960,"offset":108900,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODI1NTM1Miwic3ViIjoiL2RhdGFzZXRzL2NvZGVwYXJyb3QvY29kZXBhcnJvdC12YWxpZC1uZWFyLWRlZHVwbGljYXRpb24iLCJleHAiOjE3NTgyNTg5NTIsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.i7fyfkIcS1yeUQVokt_Y2vVDthqACW4xm9qQPgFhDX_Ko0vyRvruuedXVdwlmLYoerAZwOkOmGkDaAfZ0_otCQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
22 values
size
stringlengths
4
7
content
stringlengths
626
1.05M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
5.21
99.9
line_max
int64
12
999
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
HaseloffLab/MarpoDB
server/keepPrimary.py
1
1372
import sys from partsdb.partsdb import PartsDB from tables import * from Bio.SeqFeature import SeqFeature, CompoundLocation, FeatureLocation def coordinatesToLocation(coordinates): locationParts = [ FeatureLocation(int(p[0]), int(p[1]), int(p[2]) ) for p in [ s.split(',') for s in coordinates.split(';')] ] if len(locationParts) == 1: return locationParts[0] elif len(locationParts) > 1: return CompoundLocation(locationParts) else: return None marpodb = PartsDB('postgresql:///'+sys.argv[1], Base = Base) session = marpodb.Session() n = 0 for locus in session.query(Locus).all(): genes = session.query(Gene).filter(Gene.locusID == locus.id).all() if not genes: session.delete(locus) else: if len(genes) > 1: primaryGene = max( genes, key = lambda gene: len( coordinatesToLocation(gene.cds.coordinates) ) ) genes.remove(primaryGene) for gene in genes: session.query(BlastpHit).filter(gene.cds.id == BlastpHit.targetID).delete(synchronize_session='fetch') session.query(InterProHit).filter(gene.cds.id == InterProHit.targetID).delete(synchronize_session='fetch') session.query(DbxRef).filter(gene.cds.id == DbxRef.targetID).delete(synchronize_session='fetch') session.delete(gene.cds) if gene.utr5: session.delete(gene.utr5) if gene.utr3: session.delete(gene.utr3) session.delete(gene) session.commit()
mit
-515,878,988,557,809,340
34.205128
127
0.722303
false
martincmartin/rocket-optimizer
rocket_optimizer.py
1
13296
#!/usr/bin/env python3 # TODO: Maybe have a Propulsion class that has a list of parts and is # separate from SRB/LiquidEngine? Things are getting hairy with # decouplers adding decouple_direction field to SRBs/LiquidEngines. # Methods of SRB/LiquidEngine need to preserve it, which isn't obvious # from the definition of the SRB/LiquidEngine classes. import argparse import math from enum import Enum from copy import copy ACCELERATION_GRAVITY = 9.81 parser = argparse.ArgumentParser( description="Help for finding the most efficient way to get delta-V!" ) parser.add_argument( "payload_mass", metavar="Mass", type=float, help="Mass in tonnes of payload that these stages will deliver.", ) parser.add_argument("--steerable1st", dest="steerable1st", action="store_true") parser.add_argument("--no-steerable1st", dest="steerable1st", action="store_false") parser.set_defaults(steerable1st=True) parser.add_argument( "--delta-v-of-atmosphere", dest="delta_v_of_atmosphere", type=float, help="All stages that start below this delta_v will use atmosphere Isp. Other will use vaccum.", default=1500, ) parser.add_argument("--filter", dest="filter", action="store_true") parser.add_argument("--no-filter", dest="filter", action="store_false") parser.set_defaults(filter=True) parser.add_argument( "--min-twr", dest="min_twr_at_launch", type=float, help="Minimum Thrust To Weight Ratio, i.e. acceleration as a multiple of g, at launch.", default=1.2, ) args = parser.parse_args() class Radius(Enum): tiny = 1 small = 2 large = 3 extra_large = 4 radial = 5 class Direction(Enum): stack = 1 radial = 2 class Part: def __init__(self, name, radius, cost, mass): self.name = name self.radius = radius self.cost = cost self.mass = mass def __radd__(self, other): c = copy(other) iadd(c, self) return c # Also for separators class Decoupler(Part): pass def iadd(self, other): self.cost += other.cost self.full_name += " w/ " + other.name if hasattr(self, "mass"): self.mass += other.mass else: self.full_mass += other.mass self.empty_mass += other.mass if isinstance(other, Decoupler): dir = Direction.radial if other.radius == Radius.radial else Direction.stack assert not hasattr(self, "decouple_direction") or self.decouple_direction == dir self.decouple_direction = dir SmallStackDecoupler = Decoupler("TD-12 Stack Decoupler", Radius.small, 400, 0.04) RadialDecoupler = Decoupler("TT-38K Radial Decoupler", Radius.radial, 600, 0.025) # Unfortunatley, the game has a part called "Small Nose Cone," whose # radius is tiny, while the "Aerodynamic Nose Cone"'s radius is small AerodynamicNoseCone = Part("Aerodynamic Nose Cone", Radius.small, 240, 0.03) class SRB: def __init__(self, name, cost, full_mass, empty_mass, thrust_atm, isp_atm, isp_vac): self.name = name self.full_name = name self.cost = cost self.full_mass = full_mass self.empty_mass = empty_mass self.thrust_atm = thrust_atm self.ve_atm = isp_atm * ACCELERATION_GRAVITY self.ve_vac = isp_vac * ACCELERATION_GRAVITY def __iadd__(self, part): assert isinstance(part, Part) assert part.radius == Raidus.radial or part.radius == Radius.small iadd(self, part) def __mul__(self, multiplier): c = copy(self) c.name += "x" + str(multiplier) c.cost *= multiplier c.full_mass *= multiplier c.empty_mass *= multiplier c.thrust_atm *= multiplier # Don't need to modify ve_atm, ve_vac or decouple_direction return c Flea = SRB("Flea", 200, 1.5, 0.45, 162.91, 140, 165) Hammer = SRB("Hammer", 400, 3.5625, 0.7525, 197.90, 170, 195) Thumper = SRB("Thumper", 850, 7.65, 1.5, 250.0, 175, 210) Kickback = SRB("Kickback", 2700, 24, 4.5, 593.86, 195, 220) FleaS = Flea + SmallStackDecoupler HammerS = Hammer + SmallStackDecoupler ThumperS = Thumper + SmallStackDecoupler KickbackS = Kickback + SmallStackDecoupler Thumperx3S = ( Thumper * 3 + AerodynamicNoseCone + AerodynamicNoseCone + SmallStackDecoupler ) Kickbackx3S = ( Kickback * 3 + AerodynamicNoseCone + AerodynamicNoseCone + SmallStackDecoupler ) srbs = [FleaS, HammerS, ThumperS, KickbackS, Thumperx3S, Kickbackx3S] def fuel_cost(fuel, radius): assert radius is Radius.small or radius is Radius.large if radius is Radius.small: assert fuel % 100 == 0 num800 = fuel // 800 cost = 800 * num800 fuel -= 800 * num800 if fuel >= 400: cost += 500 fuel -= 400 if fuel >= 200: cost += 275 fuel -= 200 if fuel >= 100: cost += 150 fuel -= 100 assert fuel == 0 elif radius is Radius.large: assert fuel % 800 == 0 num6400 = fuel // 6400 cost = 5750 * num6400 fuel -= 6400 * num6400 if fuel >= 3200: cost += 3000 fuel -= 3200 if fuel >= 1600: cost += 1550 fuel -= 1600 if fuel >= 800: cost += 800 fuel -= 800 assert fuel == 0 return cost class LiquidEngine: def __init__(self, name, radius, cost, mass, thrust_atm, isp_atm, isp_vac): self.name = name self.radius = radius self.cost = cost self.mass = mass self.thrust_atm = thrust_atm self.ve_atm = isp_atm * ACCELERATION_GRAVITY self.ve_vac = isp_vac * ACCELERATION_GRAVITY Terrier = LiquidEngine("Terrier", Radius.small, 390, 0.5, 14.78, 85, 345) Swivel = LiquidEngine("Swivel", Radius.small, 1200, 1.5, 167.97, 250, 320) Vector = LiquidEngine("Vector", Radius.small, 18000, 4.0, 936.5, 295, 315) Poodle = LiquidEngine("Poodle", Radius.large, 1300, 1.75, 64.29, 90, 350) Skipper = LiquidEngine("Skipper", Radius.large, 5300, 3.0, 568.75, 280, 320) Mainsail = LiquidEngine("Mainsail", Radius.large, 13000, 6.0, 1379.0, 285, 310) # Liquid engine plus fuel tanks, plus optional SRBs. class Liquid: def __init__(self, engine, fuel, srbs=None): if srbs is None: srbs = [] self.engine = engine self.fuel = fuel # For now, all liquid stages use stack decouplers. self.decouple_direction = Direction.stack # Compute weighted average of exhaust velocities. I think # this is accurate if we throttle everything such that they # all burn out at the same time. # For liquid fuel tanks, the weight (both full and empty) is # proportional to capacity. liquid_fuel_mass = fuel / 200.0 ve_atm = engine.ve_atm * liquid_fuel_mass ve_vac = engine.ve_vac * liquid_fuel_mass total_fuel_mass = liquid_fuel_mass for booster in srbs: booster_fuel_mass = booster.full_mass - booster.empty_mass ve_atm += booster.ve_atm * booster_fuel_mass ve_vac += booster.ve_vac * booster_fuel_mass total_fuel_mass += booster_fuel_mass self.ve_atm = ve_atm / total_fuel_mass self.ve_vac = ve_vac / total_fuel_mass self.name = engine.name + " " + str(fuel) for booster in srbs: self.name += " " + booster.name liquid_empty_mass = engine.mass + fuel / 1600.0 self.empty_mass = liquid_empty_mass + sum([s.empty_mass for s in srbs]) self.full_mass = ( liquid_empty_mass + liquid_fuel_mass + sum([s.full_mass for s in srbs]) ) self.thrust_atm = engine.thrust_atm + sum([s.thrust_atm for s in srbs]) self.cost = ( engine.cost + fuel_cost(fuel, engine.radius) + sum([s.cost for s in srbs]) ) class Stage: def __init__(self, rocket, propulsion, payload_mass): assert hasattr(propulsion, "decouple_direction") self.rocket = rocket self.propulsion = propulsion self.cost = propulsion.cost self.payload_mass = payload_mass self.full_mass = payload_mass + propulsion.full_mass self.empty_mass = payload_mass + propulsion.empty_mass def compute_delta_v(self, atm): self.ve = self.propulsion.ve_atm if atm else self.propulsion.ve_vac self.delta_v = self.ve * math.log(self.full_mass / self.empty_mass) class Rocket: # Stages ordered from highest to lowest, i.e. from closest to # payload to furthest from payload. def __init__(self, propulsions): propulsions = [p for p in propulsions if p is not None] # Loop through stages from payload to ground, computing mass of each stage as we go. self.stages = [] prev_mass = args.payload_mass for i, propulsion in enumerate(propulsions): stage = Stage(self, propulsion, prev_mass) self.stages.append(stage) prev_mass = stage.full_mass # Can compute TWR at launch thrust = 0 for stage in reversed(self.stages): thrust += stage.propulsion.thrust_atm if stage.propulsion.decouple_direction == Direction.stack: break self.twr_launch = thrust / self.stages[-1].full_mass / ACCELERATION_GRAVITY # Now loop in the other direction, deciding when to use atm of # vac Ve, and computing delta_v prev_delta_v = 0 for s in reversed(self.stages): s.compute_delta_v(prev_delta_v < args.delta_v_of_atmosphere) prev_delta_v += s.delta_v self.cost = sum([s.cost for s in self.stages]) self.delta_v = sum([s.delta_v for s in self.stages]) if args.steerable1st: srbs1st = [] else: srbs1st = srbs # Up to two stages, all SRBs: # rockets = \ # [Rocket([first]) for first in srbs] + \ # [Rocket([second, first]) for second in srbs for first in srbs] + \ # [Rocket([third, second, first]) for third in srbs for second in srbs for first in srbs] Thumper_radial = Thumper + AerodynamicNoseCone ThumperR = Thumper_radial + RadialDecoupler Kickback_radial = Kickback + AerodynamicNoseCone KickbackR = Kickback_radial + RadialDecoupler terriers = [Liquid(Terrier, fuel) for fuel in range(100, 3201, 100)] swivels = [Liquid(Swivel, fuel) for fuel in range(100, 3201, 100)] swivels_srbs = [ Liquid(Swivel, fuel, [Thumper_radial] * n) for n in [2, 3] for fuel in range(100, 3201, 100) ] + [ Liquid(Swivel, fuel, [Kickback_radial] * n) for n in [2, 3] for fuel in range(100, 3201, 100) ] radial_srbs = [ThumperR * 2, ThumperR * 3, KickbackR * 2, KickbackR * 3] # terriers = [Liquid(Terrier, 1200)] # swivels = [Liquid(Swivel, 100)] # swivels_srbs = [Liquid(Swivel, 1600, [Thumper_radial, Thumper_radial])] # Swivel is never chosen for the middle of three stages. # # For a 4.53t payload, it never makes sense to "split" the terrier # stage into two stages, since the cost of the terrier + stack # separator = 790, about the cost of an FL-T800 (800). So losing 800 # fuel is just not worth it. # # For a 0.53t payload, it starts to make sense around 6700 m/s delta # v (atmosphere at 500 m/s). rockets = ( [Rocket([single]) for single in swivels + srbs1st] + [ Rocket([second, first]) for second in terriers + srbs for first in swivels + swivels_srbs + srbs1st ] + [ Rocket([third, second, first]) for third in terriers + srbs for second in terriers + swivels + srbs for first in swivels + swivels_srbs + srbs1st ] + [ Rocket([third, second, first]) for third in terriers + srbs for second in swivels + srbs1st for first in radial_srbs ] + [ Rocket([fourth, third, second, first]) for fourth in terriers + srbs for third in terriers + srbs for second in swivels + srbs1st for first in radial_srbs ] + [ Rocket([fourth, third, second, first]) for fourth in terriers + srbs for third in terriers + srbs for second in terriers + swivels + srbs for first in swivels + srbs1st ] ) # Get rid of ones that won't get off the launch pad fast enough. rockets = [r for r in rockets if r.twr_launch >= args.min_twr_at_launch] # Could add this just to eliminate silly ones. # rockets = [r for r in rockets if r.stages[-1].delta_v >= args.min_delta_v_of_first_stage] rockets.sort(key=lambda r: r.delta_v, reverse=True) rockets.sort(key=lambda r: r.cost) # Keep only the dominating ones, i.e. eliminate ones that cost the same or more, but have lower delta-v. if args.filter: filtered_rockets = [] best_delta_v = None for r in rockets: if best_delta_v is None or r.delta_v > best_delta_v: filtered_rockets.append(r) best_delta_v = r.delta_v else: filtered_rockets = rockets print(" Cost Delta-V TWR") for r in filtered_rockets: print( "%5d %6.1f %4.2f %4.2f " % (r.cost, r.delta_v, r.twr_launch, r.stages[-1].full_mass), ["%10s, %7.2f, %7.2f" % (s.propulsion.name, s.delta_v, s.ve) for s in r.stages], )
apache-2.0
-5,224,596,264,299,599,000
30.581948
104
0.623947
false
theresaswayne/imagej-plugins
Demos and Tests/misc scripts/Scripts/Tutorial3_.py
1
1079
from ij import IJ from ij.process import ImageStatistics as IS import os options=IS.MEAN|IS.MEDIAN|IS.MIN_MAX def getStatistics(imp): global options ip=imp.getProcessor() stats=IS.getStatistics(ip,options,imp.getCalibration()) return stats.mean,stats.median,stats.min,stats.max folder="/Users/confocal/Desktop/jython" for filename in os.listdir(folder): if filename.endswith(".tif"): print "now processing",filename imp=IJ.openImage(os.path.join(folder,filename)) if imp is None: # ends with tif but has no image print "where's my file",filename, "dude" continue # goes out of this .tif loop and to the else mean,median,min,max=getStatistics(imp) #call the function print "Image statistics for",imp.title print "mean",mean print "median",median print "min and max:", min,"-",max else: print "ignoring",filename # does not end with tif
gpl-3.0
3,821,387,374,227,072,000
36.241379
77
0.597776
false
FofanovLab/VaST
VaST/Haplotype.py
1
6237
import json import logging import numpy as np import itertools as it from utils import parse_flag_file from analyze import ( remove_extra_loci, get_resolution, get_summary_data, get_haplotype_matrix, get_resolution_matrix) class Haplotype: def __init__(self, patterns, minimum_spanning_set, flag_file_path, primer_zone_size, variant_matrix, sep): self._logger = logging.getLogger(__name__) self._variant_matrix = variant_matrix self._sep = sep self._minimum_spanning_set = minimum_spanning_set self._selected_patterns = \ self._minimum_spanning_set.get_selected_patterns() self._selected_amplicons = \ self._minimum_spanning_set.get_selected_amplicons() self._patterns = patterns self._pattern_dic = patterns.get_pattern_dic( self._selected_patterns) self._pattern_df = patterns.get_pattern_df( self._selected_patterns) # Selected amps is the group of amplicons for a pattern that # were not removed due to overlap with other amplicons # in the minimum spanning set. self._pattern_dic = self._get_selected_amplicons() self._get_flags( flag_file_path, int(primer_zone_size)) def _get_flags(self, flag_file_path, primer_zone_size): flag_df = parse_flag_file(flag_file_path) for pattern, amplicons in self._pattern_dic.iteritems(): for amplicon, chars in amplicons.iteritems(): genome = chars['g']['name'] genome_size = int(chars['g']['length']) start = int(amplicon) stop = int(chars['s']) up_start = start - primer_zone_size - 1 if start - primer_zone_size > 1 else 0 up_stop = start - 1 down_start = stop down_stop = (stop + primer_zone_size if stop + primer_zone_size < genome_size else genome_size - 1) upstream_flags = np.array( flag_df[flag_df.Genome == genome].iloc[up_start: up_stop].Flag, dtype=int) downstream_flags = np.array( flag_df[flag_df.Genome == genome].iloc[down_start: down_stop].Flag, dtype=int) upstream_count = np.array([sum([not value for value in run[1]]) for run in it.groupby(np.array(upstream_flags, dtype=bool))], dtype=int) downstream_count = np.array( [sum([not value for value in run[1]]) for run in it.groupby(np.array(downstream_flags, dtype=bool))], dtype=int) percent_ok = ( (np.sum(upstream_count) + np.sum(downstream_count))/float( len(upstream_flags) + len(downstream_flags)) * 100) med_size = np.median(np.append(upstream_count, downstream_count)) self._pattern_dic[pattern][amplicon]['primer_zone'] = { 'upstream': ",".join(np.array(upstream_flags, dtype=str)), 'downstream': ",".join(np.array(downstream_flags, dtype=str)), 'percent_ok': percent_ok, 'med_size': med_size } def _get_selected_amplicons(self): new_dic = {} for pattern, sel_amplicons in zip( self._selected_patterns, self._selected_amplicons): all_amplicons = self._pattern_dic[pattern] new_dic[pattern] = { k: v for k, v in all_amplicons.iteritems() if k in sel_amplicons} return new_dic def write_haplotype(self, file_name): self._logger.info("Writing haplotype to %s", file_name) self._pattern_df.to_csv(file_name) def write_json(self, file_name): self._logger.info( "Writing minimum spanning set amplicons to %s", file_name) with open(file_name, 'w') as out: out.write(json.dumps(self._pattern_dic)) def write_haplotype_matrix(self, file_name): self._logger.info("Writing haplotype matrix to %s", file_name) def write_suggested_amplicons(self, file_name): self._logger.info( "Writing suggested amplicons to %s", file_name ) with open(file_name, 'w') as out: out.write() def write_output(self, haplotype_output, pattern_output, amplicon_output): best_loci = remove_extra_loci(self._pattern_dic) pattern_order = self._pattern_df.columns haplotype_matrix = get_haplotype_matrix( pattern_order, best_loci, self._variant_matrix, self._sep) self._logger.info("Writing haplotype matrix to %s", haplotype_output) haplotype_matrix.to_csv(haplotype_output) self._logger.info("Writing pattern matrix to %s", pattern_output) scores, patterns = get_resolution(self._pattern_df) pattern_matrix = get_resolution_matrix( self._pattern_df.index, pattern_order, patterns) pattern_matrix.to_csv(pattern_output) self._logger.info("Writing amplicon matrix to %s", amplicon_output) amplicon_matrix = get_summary_data(best_loci, scores, pattern_order) amplicon_matrix.to_csv(amplicon_output, index=None) def write_summary(self, file_name): self._logger.info( "Writing summary to %s", file_name ) with open(file_name, 'w') as out: out.write( "Minimum set size: {}\n".format( len(self._selected_patterns))) out.write( "Resolution Index: {:0.2f}%\n".format( self._minimum_spanning_set.get_resolution_index()) ) group_size, counts = \ self._minimum_spanning_set.get_resolution_groups() out.write("Group Size Breakdown:\n") labels = [ "Group(s) of size {}".format(i) if i > 1 else "Strain(s) Fully Resolved" for i in group_size] for label, count in zip(labels, counts): out.write("{0} {1}\n".format(count, label))
mit
-71,548,061,211,650,456
42.3125
98
0.568222
false
nebgnahz/CS268NetworkMeasurement
king/geoutils.py
1
1409
"""All sorts of geo utils for measurement project """ import math, sys, getopt def distance(origin, destination, radius = 6371): """Based on Haversine formula, default return result is kilometers""" # The Haversine formula is an equation that can be used to find great-circle distances between two points on a sphere from their longitudes and latitudes. # When this formula is applied to the earth the results are an approximation because the Earth is not a perfect sphere. # The currently accepted (WGS84) radius at the equator is 6378.137 km and 6356.752 km at the polar caps. For aviation purposes the FAI uses a radius of 6371.0 km lat1, lon1 = origin lat2, lon2 = destination dlat = math.radians(lat2 - lat1) dlon = math.radians(lon2 - lon1) a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \ * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) d = radius * c return d if __name__ == "__main__": # parse command line options try: opts, args = getopt.getopt(sys.argv[1:], "h", ["help"]) except getopt.error, msg: print msg print "for help use --help" sys.exit(2) # process options for o, a in opts: if o in ("-h", "--help"): print __doc__ sys.exit(0) seattle = [47.621800, -122.350326] olympia = [47.041917, -122.893766] print "distance:", distance(seattle, olympia)
bsd-2-clause
-2,797,107,088,448,208,400
35.128205
162
0.689851
false
ld4apps/lda-serverlib
logiclibrary/example_logic_tier.py
1
42946
from storage import operation_primitives import urlparse, urllib import json, rdf_json from rdf_json import URI from trsbuilder import TrackedResourceSetBuilder import utils import os import requests from requests.exceptions import ConnectionError from base_constants import RDF, RDFS, LDP, CE, OWL, TRS, AC, AC_R, AC_W, AC_C, AC_D, AC_ALL, ADMIN_USER, NAMESPACE_MAPPINGS from base_constants import URL_POLICY as url_policy import logging logger=logging.getLogger(__name__) HISTORY = CE+'history' CREATION_EVENT = TRS+'Creation' MODIFICATION_EVENT = TRS+'Modification' DELETION_EVENT = TRS+'Deletion' CHECK_ACCESS_RIGHTS = os.environ.get('CHECK_ACCESS_RIGHTS') != 'False' UNCHANGED=object() # special value for recurse() args SAFE_IN_QUERY_STRING = "~:@!$'()*+,;=/" # exclude & def quote_query_string(s): return urllib.quote(s, SAFE_IN_QUERY_STRING) class Domain_Logic(object): def __init__(self, environ, change_tracking=False): self.environ = environ self.claims = utils.get_or_create_claims(environ) self.user = self.claims['user'] self.url_components = url_policy.get_url_components(environ) self.tenant, self.namespace, self.document_id, self.extra_path_segments, self.path, self.path_parts, self.request_hostname, self.query_string = self.url_components self.change_tracking = change_tracking # TODO: should we provide a way to turn change_tracking on/off dynamically if change_tracking: self.trs_builders = {} def recurse(self, function, namespace=UNCHANGED, document_id=UNCHANGED, extra_path_segments=UNCHANGED, query_string=UNCHANGED, url=None, tenant=UNCHANGED): """ Perform an operation with the same host-name and tenant, but new document_id, extra_segements and query_string. One implementation option would be to make a new instance of Domain_Logic and give it a new environ dict copy. This implementation is slightly cheaper/messier. """ if url != None: if namespace != UNCHANGED or document_id != UNCHANGED or extra_path_segments != UNCHANGED or query_string != UNCHANGED: raise ValueError('may not set URL and also set namespace, document_id, extra_path_segments or query_string') namespace, document_id, extra_path_segments, parse_result = url_policy.parse(url) query_string = parse_result.query original_namespace = self.namespace original_document_id = self.document_id original_path = self.path original_extra_path_segments = self.extra_path_segments original_query_string = self.query_string original_path_parts = self.path_parts original_tenant = self.tenant try: if namespace != UNCHANGED: self.namespace = namespace if document_id != UNCHANGED: self.document_id = document_id if extra_path_segments != UNCHANGED: self.extra_path_segments = extra_path_segments self.path_parts = ['', self.namespace, self.document_id] if self.namespace and self.document_id else ['', self.namespace] if self.namespace else [''] if self.extra_path_segments: self.path_parts = self.path_parts + self.extra_path_segments self.path = '/'.join(self.path_parts) if query_string != UNCHANGED: self.query_string = query_string if tenant != UNCHANGED: self.tenant = tenant status, headers, document = function() finally: self.namespace = original_namespace self.document_id = original_document_id self.path = original_path self.extra_path_segments = original_extra_path_segments self.query_string = original_query_string self.path_parts = original_path_parts self.tenant = original_tenant return status, headers, document def recursive_get_document(self, namespace=UNCHANGED, document_id=UNCHANGED, extra_path_segments=UNCHANGED, query_string=UNCHANGED, url=None, tenant=UNCHANGED): return self.recurse(self.get_document, namespace, document_id, extra_path_segments, query_string, url, tenant) def create_document(self, document, document_id=None): """ This method is called when a POST is made that means 'create'. The 'document' argument is a Python dictionary that was created from a rdf/json (ld_json) string in the request. The return value is a triple of (status, headers, body). The values of headers and body depends on the status: 201 - Created => headers is a list of headers to return to the client. It should contain at least a location entry with the URL of the newly-created resource. If no content_type header is given, it will be set to 'application/rdf+json+ce' body may be an empty list or a dictionary that contains the ld+json representaton of the created object others => headers may be an empty list or may optionally include headers to return to the client body should be a list of pairs, where the first element of the pair identifies the field in error, or is ''. The second element of the pair should start with a number, a space, and an optional string explaining the error """ status, headers, container = self.recursive_get_document(query_string=self.query_string+'?non-member-properties' if self.query_string else 'non-member-properties') if status == 200: return self.insert_document(container, document, document_id) else: return status, headers, container def insert_document(self, container, document, document_id=None): if CHECK_ACCESS_RIGHTS: status, permissions = self.permissions(container, document) if status == 200: if not permissions & AC_C: return 403, [], [('', 'not authorized')] else: return 403, [], [('', 'unable to retrieve permissions. status: %s text: %s' % (status, permissions))] document = rdf_json.RDF_JSON_Document(document, '') self.complete_document_for_container_insertion(document, container) self.complete_document_for_storage_insertion(document) self.preprocess_properties_for_storage_insertion(document) status, location, result = operation_primitives.create_document(self.user, document, self.request_hostname, self.tenant, self.namespace, document_id) if status == 201: if self.change_tracking: self.generate_change_event(CREATION_EVENT, location) # Todo: fix up self.document_id, self.path, self.path_parts to match location url of new document self.complete_result_document(result) return status, [('Location', str(location))], result else: return status, [], [('', result)] def put_document(self, document): return 405, [], [('', 'PUT not allowed')] def execute_query(self, query): """ Execute the specified query. Queries are safe and idempotent. That is, they do not have side-effects, and (weaker and implied by safe) the result of doing them muultiple times is the same as doing them once. In that sense, they are similar to a GET, but done via POST. The 'query' argument is a Python dictionary that was created from a json string in the request. The format of the JSON will depend on the database back-end. The return value is a triple of (status, headers, body). The values of headers and body depends on the status: 200 - OK => headers is a list of headers to return to the client. If no content_type header is given, it will be set to 'application/rdf+json+ce' body may be an empty list or a dictionary that contains the json representaton of the query result others => headers may be an empty list or may optionally include headers to return to the client body should be a list of pairs, where the first element of the pair identifies the field in error, or is ''. The second element of the pair should start with a number, a space, and an optional string explaining the error """ if not self.namespace or self.document_id: #trailing / or other problem return self.bad_path() status, result = operation_primitives.execute_query(self.user, query, self.request_hostname, self.tenant, self.namespace) return status, [], result def execute_action(self, body): """ This method is called when a POST is made that means 'execute action'. The 'body' argument is a Python dictionary that was created from a json string in the request. The format of the JSON will depend on the action. The return value is a triple of (status, headers, body). The values of headers and body depends on the status: 200 - OK => headers is a list of headers to return to the client. If no content_type header is given, it will be set to 'application/rdf+json+ce' body may be an empty list or a dictionary that contains the json representaton of the query result others => headers may be an empty list or may optionally include headers to return to the client body should be a list of pairs, where the first element of the pair identifies the field in error, or is ''. The second element of the pair should start with a number, a space, and an optional string explaining the error """ return 400, [], [('', 'unknown action')] def permissions(self, document, insert_document=None): owner = document.get_value(CE+'owner') if self.user == str(owner): return 200, AC_ALL # owner can do everything else: resource_group = document.get_value(AC+'resource-group') if resource_group: permissions_url = url_policy.construct_url(self.request_hostname, self.tenant, 'ac-permissions') + ('?%s&%s' % (quote_query_string(str(resource_group)), quote_query_string(self.user))) r = self.intra_system_get(permissions_url) if r.status_code == 200: return 200, int(r.text) else: return r.status_code, 'url: %s text: %s' % (permissions_url, r.text) return 200, 0 def resource_groups(self): resource_group_url = url_policy.construct_url(self.request_hostname, self.tenant, 'ac-resource-groups') + ('?%s' % quote_query_string(self.user)) r = self.intra_system_get(resource_group_url) if r.status_code == 200: return json.loads(r.text, object_hook=rdf_json.rdf_json_decoder) else: return [] def get_health(self): # Before claiming to be healthy, Make sure that we can do outgoing intra_system calls intra_system_test_url = url_policy.construct_url(self.request_hostname, self.tenant, 'favicon.ico') try: with utils.LIMIT_LOGGING_LEVEL_INFO: r = self.intra_system_get(intra_system_test_url) if r.status_code != 200 and r.status_code != 404: # Note that 404 means that we are able to reach the SYSTEM_HOST, so we're healthy, even if it doesn't implement favicon.ico return r.status_code, [], [('','intra_system_get not functioning: %s' % r.text)] except ConnectionError as e: return 504, [], [('','intra_system_get exception: %s' % e.message)] return 200, [('Content-Type', 'text/plain'), ('Content-length', '1')], ['1'] def prim_get_document(self): if not self.document_id and 'rdfs_label=' in self.query_string: #TODO: move this to a separate method and call it from get_container() instead of from here query_parms=urlparse.parse_qs(self.query_string) query = {'_any': {RDFS+'label' : query_parms['rdfs_label'][0]}} status, result = operation_primitives.execute_query(self.user, query, self.request_hostname, self.tenant, self.namespace) if status == 200: logger.info('Successful query for url: %s query: %s number of results: %s', self.request_url(), query, len(result)) if len(result) == 1: document = result[0] new_url_parts = urlparse.urlparse(document.graph_url) document_id = url_policy.parse_path(new_url_parts.path)[2] self.document_id = document_id self.query_string = '' return 200, document elif len(result) > 1: get_all = query_parms.get('all') if get_all and (get_all[0] == 'true'): container_url = self.request_url() container_predicates = { RDF+'type': URI(LDP+'BasicContainer'), LDP+'contains': [URI(resource.default_subject()) for resource in result] } document = rdf_json.RDF_JSON_Document({container_url: container_predicates}, container_url) self.add_member_detail(document, result) return 200, document logger.info('Multiple query matches for url : %s query: %s status: %s', self.request_url(), query, status) return 409, ['Duplicate label, use ?all=true to retrieve the list of resources'] logger.info('Failed query for url: %s query: %s status: %s', self.request_url(), query, status) return 404, ['Not found'] return operation_primitives.get_document(self.user, self.request_hostname, self.tenant, self.namespace, self.document_id) def get_document(self): """ GET the document associated with 'self'. The return value is a triple of (status, headers, body). The values of headers and body depends on the status: 200 - OK => headers is a list of headers to return to the client. If no content_type header is given, it will be set to 'application/rdf+json+ce' body is a dictionary that contains the json (or ld+json) representaton of the resource others => headers may be an empty list or may optionally include headers to return to the client body should be a list of pairs, where the first element of the pair identifies the field in error, or is ''. The second element of the pair should start with a number, a space, and an optional string explaining the error """ if not self.document_id and 'rdfs_label=' not in self.query_string: return self.get_collection() if not self.namespace: logger.warn("example_logic_tier GET failed (404; no namespace) request {0}".format(self.request_url())) return 404, [], [('', 'no resource with the URL: %s' % self.request_url())] status, document = self.prim_get_document() if status == 200: # we found the document, but is the user entitled to see it? if CHECK_ACCESS_RIGHTS: status, permissions = self.permissions(document) if status == 200: if not permissions & AC_R: return 403, [], [('', 'not authorized')] else: logger.warn("example_logic_tier GET failed (403; no permissions) {0}".format(permissions)) return 403, [], [('', 'unable to retrieve permissions. status: %s text: %s' % (status, permissions))] status, document = self.complete_request_document(document) return status, [], document else: logger.warn("example_logic_tier GET failed (prim_get_document) {0}: {1}".format(status, document)) return status, [], [('', document)] def get_collection(self): """ This method returns a storage collection as a Basic Profile Container. TODO: Need to support paging for large collections """ if not self.namespace: # nope, not a pre-existing container resource either return self.bad_path() # TODO: What access control specs govern these "built-in" collections? Who can see them? What resource-group are they part of? container_url = url_policy.construct_url(self.request_hostname, self.tenant, self.namespace) container_properties = { RDF+'type': URI(LDP+'DirectContainer'), LDP+'membershipResource': URI(container_url), LDP+'hasMemberRelation': URI(LDP+'member'), CE+'owner': URI(ADMIN_USER), AC+'resource-group': self.default_resource_group() } document = rdf_json.RDF_JSON_Document({ container_url : container_properties }, container_url) if self.query_string.endswith('non-member-properties'): document.default_subject_url = document.graph_url document.graph_url = document.graph_url + '?non-member-properties' status = 200 else: status, results = operation_primitives.execute_query(self.user, {}, self.request_hostname, self.tenant, self.namespace) if status == 200: self.add_member_detail(document, results) member_values = [] for result in results: member_values.append(URI(result.graph_url)) if len(member_values) != 0: container_properties[LDP+'member'] = member_values container_properties[LDP+'contains'] = member_values else: return status, [], [('', results)] return status, [], document def delete_document(self): """ DELETE the document associated with 'self'. The return value is a triple of (status, headers, body). The values of headers and body depends on the status: 204 - No content => Successful delete. Headers is an optional list of headers to return to the client. others => headers may be an empty list or may optionally include headers to return to the client body should be a list of pairs, where the first element of the pair identifies the field in error, or is ''. The second element of the pair should start with a number, a space, and an optional string explaining the error """ resource_url = url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id) status, headers, document = self.get_document() if status != 200: return status, headers, document if CHECK_ACCESS_RIGHTS: status, permissions = self.permissions(document) if status == 200: if not permissions & AC_D: return 403, [], [('', 'not authorized')] else: return 403, [], [('', 'unable to retrieve permissions. status: %s text: %s' % (status, permissions))] if self.document_id is None: return self.drop_collection() if not self.namespace: #trailing / or other problem return self.bad_path() # use the information from the document that was fetched, rather than the request params new_url_parts = urlparse.urlparse(document.graph_url) path_parts, namespace, document_id, extra_path_segments = url_policy.parse_path(new_url_parts.path) status, err_msg = operation_primitives.delete_document(self.user, self.request_hostname, self.tenant, namespace, document_id) if self.change_tracking: resource_url = url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id) self.generate_change_event(DELETION_EVENT, resource_url) return status, [], [('', err_msg)] if err_msg else [] def drop_collection(self): if not self.namespace: # nope, not a pre-existing container resource either return self.bad_path() operation_primitives.drop_collection(self.user, self.request_hostname, self.tenant, self.namespace) operation_primitives.drop_collection(self.user, self.request_hostname, self.tenant, self.namespace + '_history') operation_primitives.drop_collection(self.user, self.request_hostname, self.tenant, self.namespace + '_tracking') document_namespace = self.tenant + '/' + self.namespace if self.change_tracking and document_namespace in self.trs_builders: del self.trs_builders[document_namespace] return 204, [], [] def patch_document(self, request_body): """ PATCH the contents of document associated with 'self'. The 'request_body' argument is a Python dictionary that was created from a json string in the request. The format of the JSON will depend on the database back-end. The return value is a triple of (status, headers, body). The values of headers and body depends on the status: 200 - OK => Successful patch. Headers is an optional list of headers to return to the client. body is a dictionary that may contain the ld+json representaton of the patched resource others => headers may be an empty list or may optionally include headers to return to the client body should be a list of pairs, where the first element of the pair identifies the field in error, or is ''. The second element of the pair should start with a number, a space, and an optional string explaining the error """ if not self.namespace: #trailing / or other problem return self.bad_path() resource_url = url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id) document = rdf_json.RDF_JSON_Document(request_body, resource_url) if CHECK_ACCESS_RIGHTS: status, headers, prepatch_document = self.get_document() # TODO: Use prim get if status != 200: return status, headers, prepatch_document status, permissions = self.permissions(prepatch_document) if status == 200: if not permissions & AC_W: return 403, [], [('', 'not authorized')] else: return 403, [], [('', 'unable to retrieve permissions. status: %s text: %s' % (status, permissions))] if not 'HTTP_CE_REVISION' in self.environ: return 400, [], [('', 'Must provide CE-Revision header')] revision = self.environ['HTTP_CE_REVISION'] self.preprocess_properties_for_storage_insertion(document) new_url_parts = urlparse.urlparse(document.graph_url) path_parts, namespace, document_id, extra_path_segments = url_policy.parse_path(new_url_parts.path) status, result = operation_primitives.patch_document(self.user, revision, request_body, self.request_hostname, self.tenant, namespace, document_id) if(status == 200): get_status, headers, new_document = self.get_document() if(get_status == 200): if self.change_tracking: self.generate_change_event(MODIFICATION_EVENT, resource_url) return 200, headers, new_document else: return get_status, [], [('', 'Patch was successful but getting the document afterwards failed')] else: return status, [], [('', result)] def document_url(self): return url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id) def absolute_url(self, relative_url): return urlparse.urljoin(self.request_url(), relative_url) def request_url(self): return url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id, self.extra_path_segments, self.query_string) def add_member_detail(self, container, result): for rdf_json_document in result: # we will include the membership triples, plus any triples in the same documents. This will pick up the triples that describe the members. for subject, subject_node in rdf_json_document.iteritems(): #warning - tricky code. If a membership subject is set to the collection, the member documents will contain triples whose subject is the container itself. #To avoid infinite loops, we must not call complete_result_document on this subject. To avoid this, we see if the subject is already in the result new_subject = subject not in container for predicate, value_array in subject_node.iteritems(): container.add_triples(subject, predicate, value_array) if new_subject: self.complete_result_document(rdf_json.RDF_JSON_Document(container.data, subject)) def add_bpc_member_properties(self, container, query=None): ldp_resource = container.get_value(LDP+'membershipResource') ldp_hasMember = container.get_value(LDP+'hasMemberRelation') ldp_isMemberOf = container.get_value(LDP+'isMemberOfRelation') ldp_containerSortPredicate = container.get_value(CE+'containerSortPredicates') if not ldp_resource: raise ValueError('must provide a membership resource') elif ldp_hasMember: if ldp_isMemberOf: raise ValueError('cannot provide both hasMember and isMemberOf predicates') if not query: query = {str(ldp_resource) : {str(ldp_hasMember) : '_any'}} elif ldp_isMemberOf: # subject or object may be set, but not both if ldp_hasMember: raise ValueError('cannot provide both hasMember and isMemberOf predicates') if not query: if ldp_resource == '_any': query = {'_any': {str(ldp_isMemberOf) : '_any'}} else: query = {'_any': {str(ldp_isMemberOf) : ldp_resource}} else: return 200, container if CHECK_ACCESS_RIGHTS: resource_groups = self.resource_groups() query['_any2'] = {} if len(resource_groups) > 0: if len(resource_groups) > 1: resource_group_value = {'$in': resource_groups} else: resource_group_value = resource_groups[0] query['_any2']['$or'] = [{CE+'owner': URI(self.user)}, {AC+'resource-group': resource_group_value}] else: query['_any2'][CE+'owner'] = URI(self.user) if ldp_containerSortPredicate: query = {'$query': query, '$orderby' : {ldp_containerSortPredicate: 1}} status, result = operation_primitives.execute_query(self.user, query, self.request_hostname, self.tenant, self.namespace) if status == 200: self.add_member_detail(container, result) return 200, container else: return status, [('', result)] def complete_container(self, document): if self.query_string.endswith('non-member-properties'): document.default_subject_url = document.graph_url document.graph_url = document.graph_url + '?non-member-properties' return 200, document else: status, document = self.add_bpc_member_properties(document) if status == 200: members = document.get_container_members() if len(members) > 0: document.set_value(LDP+'contains', members) return status, document def complete_result_document(self, document): return 200, document def complete_request_document(self, document): self.complete_result_document(document) # will add any calculated properties, including owned containers. document_url = document.graph_url #self.document_url() expected_url = self.request_url() if document.graph_url != expected_url: #usually a bad thing, unless it's an owned container that was being asked for owned_container_url = url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id, self.extra_path_segments) if owned_container_url in document.data and URI(LDP+'DirectContainer') in document.get_values(RDF+'type', owned_container_url): document.graph_url = owned_container_url return self.complete_container(document) if URI(LDP+'DirectContainer') in document.get_values(RDF+'type'): status, document = self.complete_container(document) else: status = 200 if document.graph_url != expected_url: return 404, [('', 'no document matching that url: %s , graph_url: %s' % (self.request_url(), document.graph_url))] else: return status, document def complete_document_for_container_insertion(self, document, container): ldp_resource = container.get_value(LDP+'membershipResource') ldp_hasMember = container.get_value(LDP+'hasMemberRelation') ldp_isMemberOf = container.get_value(LDP+'isMemberOfRelation') if not ldp_resource: raise ValueError('must provide container resource: %s' % container) elif ldp_hasMember: if ldp_isMemberOf: raise ValueError('cannot provide both hasMember and isMemberOf predicates: %s' % container) # store the membership triple in the new document document.add_triples(ldp_resource, ldp_hasMember, URI('')) # last argument is null relative address of resource-to-be elif ldp_isMemberOf: if ldp_hasMember: raise ValueError('cannot provide both hasMember and isMemberOf predicates: %s' % container) # store the membership triple in the new document document.add_triple('', ldp_isMemberOf, ldp_resource) # first argument is null relative address of resource-to-be else: raise ValueError('must provide a membership predicate') def complete_document_for_storage_insertion(self, document): document.set_value(CE+'owner', URI(self.user)) if document.get_value(AC+'resource-group') is None: default_resource_group = self.default_resource_group() if default_resource_group: document.set_value(AC+'resource-group', default_resource_group) def preprocess_properties_for_storage_insertion(self, rdf_json): pass def default_resource_group(self): return URI(url_policy.construct_url(self.request_hostname, self.tenant)) # default is the root resource (i.e., '/') def add_container(self, document, container_url, membership_resource, membership_predicate, member_is_object=False, container_resource_group=None, container_owner=None) : if container_resource_group is None: container_resource_group = self.default_resource_group() document[container_url] = { RDF+'type': URI((LDP+'DirectContainer')), LDP+'membershipResource' : URI(membership_resource), (LDP+'hasMemberRelation' if member_is_object else LDP+'isMemberOfRelation') : URI(membership_predicate), AC+'resource-group' : container_resource_group } if container_owner is not None: document[container_url][CE+'owner'] = container_owner def create_container(self, container_url, membership_resource, membership_predicate, member_is_object=False): document = rdf_json.RDF_JSON_Document ({}, container_url) self.add_container(document, container_url, membership_resource, membership_predicate, member_is_object, None, None) return document def container_from_membership_resource_in_query_string(self, membership_predicate, member_is_object=False, membership_resource_key=None): if self.query_string.endswith('?non-member-properties'): qs = self.query_string[:-22] else: qs = self.query_string container_url = url_policy.construct_url(self.request_hostname, self.tenant, self.namespace, self.document_id, self.extra_path_segments, qs) if membership_resource_key: query_parms=urlparse.parse_qs(qs) membership_resource = self.absolute_url(urllib.unquote(query_parms[membership_resource_key][0])) else: membership_resource = self.absolute_url(urllib.unquote(qs)) document = self.create_container(container_url, membership_resource, membership_predicate, member_is_object) status, document = self.complete_result_document(document) return status, [], document def query_resource_document(self, membership_resource, membership_predicate, make_result, member_is_object=False): if member_is_object: query = {str(membership_resource) : {str(membership_predicate) : '_any'}} else: query = {'_any': {str(membership_predicate) : URI(membership_resource)}} status, result = operation_primitives.execute_query(self.user, query, self.request_hostname, self.tenant, self.namespace) if status == 200: if len(result) == 0: return 404, [], [('', '404 error - no such virtual document %s' % query)] elif len(result) == 1: return make_result(result) else: return 404, [], [('', '404 error - ambiguous virtual document - should be a LDPC collection?')] else: return status, [], [('', result)] def resource_from_membership_info(self, membership_resource, membership_predicate, member_is_object=False): def make_result(result): document = result[0] document.add_triples(self.request_url(), OWL+'sameAs', document.graph_url) self.complete_result_document(document) return 200, [('Content-Location', str(document.graph_url))], document return self.query_resource_document(membership_resource, membership_predicate, make_result, member_is_object) def resource_from_membershipResource_in_query_string(self, membership_predicate, member_is_object=False): membership_resource = self.absolute_url(urllib.unquote(self.query_string)) return self.resource_from_membership_info(membership_resource, membership_predicate, member_is_object) def resource_from_object_in_query_string(self, membership_predicate, member_is_object=False): print 'resource_from_object_in_query_string is deprecated - use resource_from_membershipResource_in_query_string' return self.resource_from_membershipResource_in_query_string(membership_predicate, member_is_object) def add_resource_triples(self, document, membership_resource, membership_predicate, member_is_object=False): def make_result(result): self.add_member_detail(document, result) return 200, [], document return self.query_resource_document(membership_resource, membership_predicate, make_result, member_is_object) def add_owned_container(self, document, container_predicate, container_path_segment, membership_predicate, member_is_object=False): document_url = document.graph_url document.add_triples(document_url, container_predicate, URI(document_url + '/' + container_path_segment)) if self.request_url().startswith(document_url) and self.extra_path_segments != None and len(self.extra_path_segments) == 1 and self.extra_path_segments[0] == container_path_segment: # client doesn't really want the document, just its owned container container_resouce_group = document.get_value(AC+'resource-group') container_owner = document.get_value(CE+'owner') container_graph_url = document_url + '/' + container_path_segment self.add_container(document, container_graph_url, document_url, membership_predicate, member_is_object, container_resouce_group, container_owner) def add_inverse(self, document, property_predicate, membership_shortname, namespace=None): if not namespace: namespace = self.namespace #FB query_string = quote_query_string(self.document_url()) #FB GET http%3A//localhost%3A5001/xdo/webserver/deployments generates: #FB ce_group: http://localhost:5001/sx/ce_for_deployment?http%3A//localhost%3A5001/xdo/webserver #FB instead of: #FB ce_group: http://localhost:5001/sx/ce_for_deployment?http%3A//localhost%3A5001/xdo/webserver_v1 query_string = quote_query_string(document.graph_url) url = url_policy.construct_url(self.request_hostname, self.tenant, namespace, membership_shortname, query_string=query_string) document.set_value(property_predicate, URI(url)) def generate_change_event(self, event_type, resource_uri): document_namespace = self.tenant + '/' + self.namespace #Todo: - do this better if document_namespace not in self.trs_builders: self.trs_builders[document_namespace] = TrackedResourceSetBuilder(self.request_hostname, document_namespace) self.trs_builders[document_namespace].addChangeEntry(resource_uri, event_type) def namespace_mappings(self): return NAMESPACE_MAPPINGS def convert_rdf_json_to_compact_json(self, document): converter = rdf_json.RDF_json_to_compact_json_converter(self.namespace_mappings()) compact_json = converter.convert_to_compact_json(document) return compact_json def convert_compact_json_to_rdf_json(self, document): converter = rdf_json.Compact_json_to_rdf_json_converter(self.namespace_mappings()) result = converter.convert_to_rdf_json(document) return result def convert_rdf_json_to_html(self, document): from example_rdf_json_to_html_converter import Rdf_json_to_html_converter return Rdf_json_to_html_converter().convert_rdf_json_to_html(document) def bad_path(self): return 400, [], [('', '4001 - bad path: %s (trailing / or path too short or other problem)' % self.path)] def check_input_value(self, rdf_document, predicate, field_errors, value_type=None, required=True, subject=None, expected_value=None): return rdf_document.check_value(predicate, field_errors, value_type, required, subject, expected_value) def intra_system_get(self, request_url, headers=None): if not headers: headers = dict() actual_url = utils.set_resource_host_header(str(request_url), headers) if not 'Authorization' in headers: headers['Authorization'] = 'Bearer %s' % utils.get_jwt(self.environ) if not 'Accept' in headers: headers['Accept'] = 'application/rdf+json+ce' logger.debug('intra_system_get request_url: %s actual_url: %s headers: %s', request_url, actual_url, headers) return requests.get(actual_url, headers=headers) def intra_system_post(self, request_url, data, headers=None): if not headers: headers = dict() if not 'Authorization' in headers: headers['Authorization'] = 'Bearer %s' % utils.get_jwt(self.environ) if not 'Content-Type' in headers: headers['Content-Type'] = 'application/rdf+json+ce' if not 'CE-Post-Reason' in headers: headers['CE-Post-Reason'] = 'CE-Create' actual_url = utils.set_resource_host_header(str(request_url), headers) logger.debug('intra_system_post request_url: %s actual_url: %s headers: %s data: %s', request_url, actual_url, headers,data) return requests.post(actual_url, headers=headers, data=json.dumps(data, cls=rdf_json.RDF_JSON_Encoder), verify=False) def intra_system_patch(self, request_url, revision, data, headers=None): if not headers: headers = dict() if not 'Authorization' in headers: headers['Authorization'] = 'Bearer %s' % utils.get_jwt(self.environ) if not 'Content-Type' in headers: headers['Content-Type'] = 'application/rdf+json+ce' headers['CE-Revision'] = str(revision) actual_url = utils.set_resource_host_header(str(request_url), headers) logger.debug('intra_system_patch request_url: %s actual_url: %s headers: %s data: %s', request_url, actual_url, headers,data) return requests.patch(actual_url, headers=headers, data=json.dumps(data, cls=rdf_json.RDF_JSON_Encoder), verify=False) def intra_system_delete(self, request_url, headers=None): if not headers: headers = dict() if not 'Authorization' in headers: headers['Authorization'] = 'Bearer %s' % utils.get_jwt(self.environ) actual_url = utils.set_resource_host_header(str(request_url), headers) logger.debug('intra_system_delete request_url: %s actual_url: %s headers: %s', request_url, actual_url, headers) return requests.delete(actual_url, headers=headers, verify=False) def intra_system_put(self, request_url, data, headers=None): if not headers: headers = dict() if not 'Authorization' in headers: headers['Authorization'] = 'Bearer %s' % utils.get_jwt(self.environ) if not 'Content-Type' in headers: headers['Content-Type'] = 'application/rdf+json+ce' actual_url = utils.set_resource_host_header(str(request_url), headers) logger.debug('intra_system_put request_url: %s actual_url: %s headers: %s data: %s', request_url, actual_url, headers,data) return requests.put(actual_url, headers=headers, data=json.dumps(data, cls=rdf_json.RDF_JSON_Encoder), verify=False) def get_header(header, headers, default=None): headerl = header.lower() for item in headers: if item[0].lower() == headerl: return item[1] return default
apache-2.0
-301,956,451,690,148,200
60.263909
200
0.633959
false
harsh-a1/repeater-testing
server.py
1
6990
# Copyright (C) 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from bzrlib.bzrdir import BzrDir from bzrlib.repository import Repository from bzrlib.inventory import InventoryDirectory, InventoryFile from bzrlib.osutils import splitpath from bzrlib.plugins.git.fetch import import_git_objects from bzrlib.plugins.git.mapping import default_mapping from dulwich.server import Backend from dulwich.pack import Pack, PackData, write_pack_index_v2 from dulwich.objects import ShaFile, Commit, Tree, Blob import os, tempfile import stat S_IFGITLINK = 0160000 #S_IFREG | 0664 # *Might* see this; would fail fsck --strict class BzrBackend(Backend): def __init__(self, directory): self.directory = directory self.mapping = default_mapping def get_refs(self): """ return a dict of all tags and branches in repository (and shas) """ ret = {} repo_dir = BzrDir.open(self.directory) repo = repo_dir.open_repository() for branch in repo.find_branches(using=True): #FIXME: Need to get branch path relative to its repository and use this instead of nick ret["refs/heads/"+branch.nick] = self.mapping.revision_id_bzr_to_foreign(branch.last_revision()) return ret def apply_pack(self, refs, read): """ apply pack from client to current repository """ fd, path = tempfile.mkstemp(suffix=".pack") f = os.fdopen(fd, 'w') f.write(read()) f.close() p = PackData(path) entries = p.sorted_entries() write_pack_index_v2(path[:-5]+".idx", entries, p.calculate_checksum()) def get_objects(): pack = Pack(path[:-5]) for obj in pack.iterobjects(): yield obj target = Repository.open(self.directory) target.lock_write() try: target.start_write_group() try: import_git_objects(target, self.mapping, iter(get_objects())) finally: target.commit_write_group() finally: target.unlock() for oldsha, sha, ref in refs: if ref[:11] == 'refs/heads/': branch_nick = ref[11:] try: target_dir = BzrDir.open(self.directory + "/" + branch_nick) except: target_dir = BzrDir.create(self.directory + "/" + branch_nick) try: target_branch = target_dir.open_branch() except: target_branch = target_dir.create_branch() rev_id = self.mapping.revision_id_foreign_to_bzr(sha) target_branch.generate_revision_history(rev_id) def fetch_objects(self, determine_wants, graph_walker, progress): """ yield git objects to send to client """ wants = determine_wants(self.get_refs()) commits_to_send = set([self.mapping.revision_id_foreign_to_bzr(w) for w in wants]) rev_done = set() obj_sent = set() repo = Repository.open(self.directory) objects = set() repo.lock_read() try: have = graph_walker.next() while have: rev_done.add(have) if repo.has_revision(self.mapping.revision_id_foregin_to_bzr(sha)): graph_walker.ack(have) have = graph_walker.next() while commits_to_send: commit = commits_to_send.pop() if commit in rev_done: continue rev_done.add(commit) rev = repo.get_revision(commit) commits_to_send.update([p for p in rev.parent_ids if not p in rev_done]) for sha, obj in inventory_to_tree_and_blobs(repo, self.mapping, commit): if sha not in obj_sent: obj_sent.add(sha) objects.add(obj) objects.add(revision_to_commit(rev, self.mapping, sha)) finally: repo.unlock() return (len(objects), iter(objects)) def revision_to_commit(rev, mapping, tree_sha): """ Turn a Bazaar revision in to a Git commit :param tree_sha: HACK parameter (until we can retrieve this from the mapping) :return dulwich.objects.Commit represent the revision: """ commit = Commit() commit._tree = tree_sha for p in rev.parent_ids: commit._parents.append(mapping.revision_id_bzr_to_foreign(p)) commit._message = rev.message commit._committer = rev.committer if 'author' in rev.properties: commit._author = rev.properties['author'] else: commit._author = rev.committer commit._commit_time = long(rev.timestamp) commit.serialize() return commit def inventory_to_tree_and_blobs(repo, mapping, revision_id): stack = [] cur = "" tree = Tree() inv = repo.get_inventory(revision_id) for path, entry in inv.iter_entries(): while stack and not path.startswith(cur): tree.serialize() sha = tree.sha().hexdigest() yield sha, tree t = (stat.S_IFDIR, splitpath(cur)[-1:][0].encode('UTF-8'), sha) cur, tree = stack.pop() tree.add(*t) if type(entry) == InventoryDirectory: stack.append((cur, tree)) cur = path tree = Tree() if type(entry) == InventoryFile: #FIXME: We can make potentially make this Lazy to avoid shaing lots of stuff # and having all these objects in memory at once blob = Blob() _, blob._text = repo.iter_files_bytes([(entry.file_id, revision_id, path)]).next() sha = blob.sha().hexdigest() yield sha, blob name = splitpath(path)[-1:][0].encode('UTF-8') mode = stat.S_IFREG | 0644 if entry.executable: mode |= 0111 tree.add(mode, name, sha) while len(stack) > 1: tree.serialize() sha = tree.sha().hexdigest() yield sha, tree t = (stat.S_IFDIR, splitpath(cur)[-1:][0].encode('UTF-8'), sha) cur, tree = stack.pop() tree.add(*t) tree.serialize() yield tree.sha().hexdigest(), tree
gpl-2.0
6,657,052,916,029,055,000
32.768116
108
0.58927
false
sernst/cauldron
cauldron/cli/commands/open/__init__.py
1
5560
import os import typing from argparse import ArgumentParser import cauldron from cauldron import cli from cauldron import environ from cauldron.cli.commands.listing import discovery from cauldron.cli.commands.open import actions from cauldron.cli.commands.open import opener from cauldron.cli.commands.open import remote as remote_opener from cauldron.cli.interaction import autocompletion from cauldron.environ import Response NAME = 'open' DESCRIPTION = 'Opens a cauldron project' def populate( parser: ArgumentParser, raw_args: typing.List[str], assigned_args: dict ): """...""" parser.add_argument( 'path', nargs='?', default=None, help=cli.reformat( """ A path to the directory containing a cauldron project. Special location paths can also be used. """ ) ) parser.add_argument( '-s', '--show', dest='show_in_browser', default=False, action='store_true', help=cli.reformat( """ The previously stored state of the project will open in the browser for display if this flag is included. """ ) ) parser.add_argument( '-l', '--last', dest='last_opened_project', default=False, action='store_true', help=cli.reformat(""" The open command will open the most recently opened project if this flag is included. """) ) parser.add_argument( '-r', '--recent', dest='a_recent_project', default=False, action='store_true', help=cli.reformat( """ Displays a list of recently opened projects for you to select from. """ ) ) parser.add_argument( '-a', '--available', '--all', dest='list_available', default=False, action='store_true', help=cli.reformat( """ List all known projects to choose one to open. """ ) ) parser.add_argument( '--forget', dest='forget', default=False, action='store_true', help=cli.reformat('Forget that this project was opened') ) def execute( context: cli.CommandContext, path: str = None, last_opened_project: bool = False, a_recent_project: bool = False, show_in_browser: bool = False, list_available: bool = False, forget: bool = False, results_path: str = None ) -> Response: """...""" response = context.response path = path.strip('"') if path else None if list_available: path = actions.select_from_available(response) if not path: return response if last_opened_project: path = actions.fetch_last(response) if not path: return response elif a_recent_project: path = actions.fetch_recent(response) if not path: return response elif not path or not path.strip(): discovery.echo_known_projects(response) return response else: p = actions.fetch_location(path) path = p if p else path if context.remote_connection.active: environ.remote_connection.reset_sync_time() response.consume(remote_opener.sync_open( context=context, path=path, forget=forget )) else: response.consume(opener.open_project( path=path, forget=forget, results_path=results_path )) if not response.failed and show_in_browser: cli.open_in_browser(cauldron.project.internal_project) return response def autocomplete(segment: str, line: str, parts: typing.List[str]): """ :param segment: :param line: :param parts: :return: """ if parts[-1].startswith('-'): return autocompletion.match_flags( segment=segment, value=parts[-1], shorts=['s', 'l', 'r', 'a'], longs=['show', 'last', 'recent', 'available', 'forget'] ) if len(parts) == 1: value = parts[0] if value.startswith('@examples:'): path_segment = value.split(':', 1)[-1] return autocompletion.match_path( segment, environ.paths.resources('examples', path_segment), include_files=False ) if value.startswith('@home:'): path_segment = value.split(':', 1)[-1] return autocompletion.match_path( segment, environ.paths.home(path_segment), include_files=False ) environ.configs.load() aliases = environ.configs.fetch('folder_aliases', {}) matches = ['@{}:'.format(x) for x in aliases.keys()] for m in matches: if value.startswith(m): return autocompletion.match_path( segment, environ.paths.clean(os.path.join( aliases[m[1:-1]]['path'], value[-1].split(':', 1)[-1] )), include_files=False ) matches.append('@examples:') matches.append('@home:') if value.startswith('@'): return autocompletion.matches(segment, value, matches) return autocompletion.match_path(segment, value)
mit
-5,139,337,195,299,121,000
26.254902
79
0.544784
false
sbird/fake_spectra
fake_spectra/tempdens.py
1
4005
"""File to find the temperature density relation of the forest and make a temperature density plot weighted by HI fraction. Main function is fit_td_rel_plot()""" import numpy as np from scipy.optimize import leastsq import matplotlib from . import abstractsnapshot as absn from . import unitsystem as units from .gas_properties import GasProperties from .ratenetworkspectra import RateNetworkGas matplotlib.use("PDF") import matplotlib.pyplot as plt def mean_density(hub, redshift, unit, omegab=0.0465): """Get mean gas density at some redshift.""" unit = units.UnitSystem() #in g cm^-3 rhoc = unit.rho_crit(hub) #Convert to atoms per cm^-3 rhoc /= unit.protonmass nH = rhoc * omegab * (1 + redshift)**3 return nH def fit_temp_dens_relation(logoverden, logT): """Fit a temperature density relation.""" ind = np.where((logoverden > 0.0) * (logoverden < 1.0) * (logT > 0.1) * (logT < 5.0)) logofor = logoverden[ind] logtfor = logT[ind] def min_func(param): """Function to minimize: power law fit to temperature density relation.""" logT0 = param[0] gammam1 = param[1] #print(param) return logtfor - (logT0 + gammam1 * logofor) res = leastsq(min_func, np.array([np.log10(1e4), 0.5]), full_output=True) params = res[0] if res[-1] <= 0: print(res[3]) return 10**params[0], params[1] + 1 def get_temp_meandensity(overden, Temp): """Get the temperature at the mean density following Puchwein 2018, 1801.04931. Median temperature between 0.95 and 1.05 x mean cosmic baryon density""" ind = np.where((overden > 0.95)*(overden < 1.05)) return np.median(Temp[ind]) def fit_td_rel_plot(num, base, nhi=True, nbins=500, gas="raw", plot=True): """Make a temperature density plot of neutral hydrogen or gas. Also fit a temperature-density relation for the total gas (not HI). Arguments: num - snapshot number base - snapshot base directory nbins - number of bins to use for the T-rho histogram gas - if "raw" use snapshot values for temperature and neutral fraction. Otherwise use rate network values. nhi - if True, plot neutral hydrogen, otherwise plot total gas density plot - if True, make a plot, otherwise just do the fit """ snap = absn.AbstractSnapshotFactory(num, base) unit = snap.get_units() redshift = 1./snap.get_header_attr("Time") - 1 hubble = snap.get_header_attr("HubbleParam") if gas == "raw": rates = GasProperties(redshift, snap, hubble, units=unit) else: rates = RateNetworkGas(redshift, snap, hubble) temp = rates.get_temp(0, -1) dens = rates.get_code_rhoH(0, -1) mean_dens = mean_density(hubble, redshift, unit=unit, omegab=snap.get_omega_baryon()) T00 = get_temp_meandensity(dens/mean_dens, temp) print("z=%f T0(K) = %f" %(redshift, T00)) logdens = np.log10(dens) logT = np.log10(temp) (T0, gamma) = fit_temp_dens_relation(logdens - np.log10(mean_dens), logT) print("z=%f [fit] T0(K) = %f, gamma = %g" % (redshift, T0, gamma)) if plot: if nhi: nhi = rates.get_reproc_HI(0, -1) else: nhi = dens hist, dedges, tedges = np.histogram2d(logdens-np.log10(mean_dens), logT, bins=nbins, weights=nhi, density=True) plt.imshow(hist.T, interpolation='nearest', origin='low', extent=[dedges[0], dedges[-1], tedges[0], tedges[-1]], cmap=plt.cm.cubehelix_r, vmax=0.75, vmin=0.01) plt.plot(0, np.log10(T0), '*', markersize=10, color="gold") dd = np.array([-2,-1,0,1,2]) plt.xticks(dd, [r"$10^{%d}$" % d for d in dd]) tt = np.array([2000, 3000, 5000, 10000, 20000, 30000, 50000, 100000]) plt.yticks(np.log10(tt), tt//1000) plt.ylabel(r"T ($10^3$ K)") plt.xlabel(r"$\rho / \bar{\rho}$") plt.xlim(-2,2) plt.ylim(3.4,5) plt.colorbar() plt.tight_layout() return T0, gamma
mit
827,204,182,449,320,300
35.743119
167
0.632959
false
Brunel-Visualization/Brunel
python/brunel/brunel_main.py
1
5258
# Copyright (c) 2015 IBM Corporation and others. # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import io import uuid import os import inspect import fnmatch from py4j.java_gateway import JavaGateway import sys import pkg_resources import brunel.brunel_util as brunel_util import jinja2 as jin from IPython.display import Javascript, HTML from IPython.display import display as ipydisplay # JS & HTML Files. D3_TEMPLATE_FILE ="D3_Template.js" D3_TEMPLATE_HTML_FILE = "D3_Template.html" # Jinja templates templateLoader = jin.PackageLoader("brunel", "") templateEnv = jin.Environment(loader=templateLoader) D3_TEMPLATE = templateEnv.get_template(D3_TEMPLATE_FILE) D3_TEMPLATE_HTML = templateEnv.get_template(D3_TEMPLATE_HTML_FILE) # Main version number x.x is used for JS file versions brunel_raw_version = pkg_resources.get_distribution("brunel").version.split(".") brunel_version = brunel_raw_version[0] + "." + brunel_raw_version[1] def display(brunel, data, width=800, height=600, online_js=False): csv = None if data is not None: csv = to_csv(data) # unique identifier for HTML tags visid = "visid" + str(uuid.uuid1()) controlsid = "controlsid" + str(uuid.uuid1()) result = brunel_java_call(csv, brunel, width, height, visid, controlsid) return d3_output(result, visid, controlsid, width, height, online_js) def to_csv(df): #If user has done something to cause a named Index, preserve it use_index = False if df.index.name is not None: use_index=True # CSV to pass to service # Code is different in python 2 vs. 3 if sys.version_info < (3,0): import io csvIO = io.StringIO() df.to_csv(csvIO, index=use_index, encoding='utf-8') csv = csvIO.getvalue() return str(csv, errors="ignore") else: import io csvIO = io.StringIO() df.to_csv(csvIO, index=use_index) csv = csvIO.getvalue() return csv # Uses Py4J to call the main Brunel D3 integration method def brunel_java_call(data, brunel_src, width, height, visid, controlsid): try: return brunel_entry.createBrunelJSON(data, brunel_src, int(width), int(height), visid, controlsid) except Py4JJavaError as exception: raise ValueError(exception.message()) def get_dataset_names(brunel_src): return brunel_entry.getDatasetNames(brunel_src) def cacheData(data_key, data): brunel_entry.cacheData(data_key, data) # D3 response should contain the D3 JS and D3 CSS def d3_output(response, visid, controlsid, width, height, online_js): results = json.loads(response) d3js = results["js"] d3css = results["css"] jsloc = brunel_util.JS_LOC #Forces online loading of JS from brunelvis. if online_js: jsloc = "https://brunelvis.org/js" html = D3_TEMPLATE_HTML.render({'jsloc': jsloc, 'd3css': d3css, 'visId': visid, 'width': width, 'height': height, 'controlsid': controlsid, 'version': brunel_version}) # side effect pushes required D3 HTML to the client ipydisplay(HTML(html)) js = D3_TEMPLATE.render({'jsloc': jsloc, 'd3loc': brunel_util.D3_LOC, 'topojsonloc':brunel_util.TOPO_JSON_LOC, 'd3js': d3js, 'version': brunel_version}) return Javascript(js) #File search given a path. Used to find the JVM if needed def find_file(pattern, path): result = [] for root, dirs, files in os.walk(path): for name in files: if fnmatch.fnmatch(name, pattern): result.append(os.path.join(root, name)) return result # Start the Py4J Gateway def start_gateway(): # Find the classpath for the required Brunel Jar files lib_dir = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), "lib") brunel_core_jar = lib_dir + "/brunel-core-" + brunel_version + ".jar" brunel_data_jar = lib_dir + "/brunel-data-" + brunel_version + ".jar" gson_jar = lib_dir + "/gson-2.3.1.jar" java_classpath = brunel_core_jar + os.pathsep + brunel_data_jar + os.pathsep + gson_jar # Define which JVM to use custom_java_path = None if brunel_util.JVM_PATH != "": custom_java_path = brunel_util.JVM_PATH # Start it up return JavaGateway.launch_gateway(classpath=java_classpath, javaopts=["-Djava.awt.headless=true"], java_path=custom_java_path) # Py4J Initialization and define main brunel entry gateway = start_gateway() brunel_entry = gateway.jvm.org.brunel.util.D3Integration
apache-2.0
552,569,149,606,619,100
34.288591
111
0.659376
false
joshmoore/openmicroscopy
components/tools/OmeroPy/src/omero_model_DetailsI.py
1
3505
""" /* * $Id$ * * Copyright 2007 Glencoe Software, Inc. All rights reserved. * Use is subject to license terms supplied in LICENSE.txt * */ """ import Ice, IceImport IceImport.load("omero_model_Details_ice") _omero = Ice.openModule("omero") _omero_model = Ice.openModule("omero.model") __name__ = "omero.model" class DetailsI(_omero_model.Details): class DetailsI_generator: def __iter__(self): return self def next(self): return DetailsI() def generator(cls): return cls.DetailsI_generator() generator = classmethod(generator) def __init__(self): super(DetailsI, self).__init__() def getOwner(self): return self._owner def setOwner(self, value): self._owner = value pass def getGroup(self): return self._group def setGroup(self, value): self._group = value pass def getCreationEvent(self): return self._creationEvent def setCreationEvent(self, value): self._creationEvent = value pass def getUpdateEvent(self): return self._updateEvent def setUpdateEvent(self, value): self._updateEvent = value pass def getPermissions(self): return self._permissions def setPermissions(self, value): self._permissions = value pass def getExternalInfo(self): return self._externalInfo def setExternalInfo(self, value): self._externalInfo = value pass def ice_postUnmarshal(self): """ Provides additional initialization once all data loaded Required due to __getattr__ implementation. """ pass # Currently unused def ice_preMarshal(self): """ Provides additional validation before data is sent Required due to __getattr__ implementation. """ pass # Currently unused def __getattr__(self, attr): if attr == "owner": return self.getOwner() elif attr == "group": return self.getGroup() elif attr == "creationEvent": return self.getCreationEvent() elif attr == "updateEvent": return self.getUpdateEvent() elif attr == "permissions": return self.getPermissions() elif attr == "externalInfo": return self.getExternalInfo() else: raise AttributeError(attr) def __setattr__(self, attr, value): if attr.startswith("_"): self.__dict__[attr] = value else: try: object.__getattribute__(self, attr) object.__setattr__(self, attr, value) except AttributeError: if attr == "owner": return self.setOwner(value) elif attr == "group": return self.setGroup(value) elif attr == "creationEvent": return self.setCreationEvent(value) elif attr == "updateEvent": return self.setUpdateEvent(value) elif attr == "permissions": return self.setPermissions(value) elif attr == "externalInfo": return self.setExternalInfo(value) else: raise _omero_model.DetailsI = DetailsI
gpl-2.0
-279,337,086,214,513,280
26.81746
65
0.540942
false
redshodan/lazarus-ssh
setup.py
1
5704
#!/usr/bin/python # -*- coding: latin-1 -*- # # The ManPageFormatter and build_manpage were borrowed from: # http://crunchyfrog.googlecode.com/svn/tags/0.3.4/utils/command/build_manpage.py # import sys, os, optparse, datetime from distutils.command.build import build from distutils.errors import DistutilsOptionError from distutils.core import setup, Command import imp lssh = imp.load_source("lssh", "lssh") # Remove the compiled file, ignore failures try: os.unlink("lsshc") except: pass MANDIR = "man/man1" BUILD_MANDIR = os.path.join("build", MANDIR) BUILD_MANPAGE = os.path.join(BUILD_MANDIR, "lssh.1") COPYRIGHT = \ ("Copyright © 2012 Chris Newton. License GPLv2: GNU GPL version 2 " + "<http://gnu.org/licenses/gpl.html>. This is free software: you are free " + "to change and redistribute it. There is NO WARRANTY, to the extent " + "permitted by law.") parser = None class ManPageFormatter(lssh.LongHelpFormatter): def __init__(self, indent_increment=2, max_help_position=24, width=None, short_first=1): """Constructor. Unfortunately HelpFormatter is no new-style class.""" lssh.LongHelpFormatter.__init__(self, True, indent_increment, max_help_position, width, short_first) def _markup(self, txt): """Prepares txt to be used in man pages.""" return txt.replace('-', '\\-') def format_usage(self, usage): """Formate the usage/synopsis line.""" return self._markup(usage) def format_heading(self, heading): """Format a heading. If level is 0 return an empty string. This usually is the string 'Options'. """ if self.level == 0: return '' return '.TP\n%s\n' % self._markup(heading.upper()) def format_option(self, option): """Format a single option. The base class takes care to replace custom optparse values. """ result = [] opts = self.option_strings[option] result.append('.TP\n.B %s\n' % self._markup(opts)) if option.long_help: option.help = option.long_help if option.help: help_text = '%s\n' % self._markup(self.expand_default(option)) result.append(help_text) return ''.join(result) class build_manpage(Command): description = 'Generate man page.' user_options = [('output=', 'O', 'output file')] def initialize_options(self): if not os.path.isdir(BUILD_MANDIR): os.makedirs(BUILD_MANDIR) self.output = BUILD_MANPAGE self.parser = None def finalize_options(self): parser.formatter = ManPageFormatter() parser.formatter.set_parser(parser) self.announce('Writing man page %s' % self.output) self._today = datetime.date.today() def _markup(self, txt): return txt.replace('-', '\\-') def _write_header(self): appname = self.distribution.get_name() ret = [] ret.append('.TH %s 1 %s\n' % (self._markup(appname), self._today.strftime('%Y\\-%m\\-%d'))) description = self.distribution.get_description() if description: name = self._markup('%s - %s' % (self._markup(appname), description.splitlines()[0])) else: name = self._markup(appname) ret.append('.SH NAME\n%s\n' % name) synopsis = parser.get_usage() if synopsis: synopsis = synopsis.replace('%s ' % appname, '') ret.append('.SH SYNOPSIS\n.B %s\n%s\n' % (self._markup(appname), synopsis)) long_desc = self.distribution.get_long_description() if long_desc: ret.append('.SH DESCRIPTION\n%s\n' % self._markup(long_desc)) return ''.join(ret) def _write_options(self): ret = ['.SH OPTIONS\n'] ret.append(parser.format_option_help()) return ''.join(ret) def _write_footer(self): ret = [] appname = self.distribution.get_name() author = '%s <%s>' % (self.distribution.get_author(), self.distribution.get_author_email()) ret.append(('.SH AUTHOR\n.B %s\nwas written by %s.\n' % (self._markup(appname), self._markup(author)))) ret.append((".SH COPYRIGHT\n" + COPYRIGHT + "\n")) homepage = self.distribution.get_url() ret.append(('.SH DISTRIBUTION\nThe latest version of %s may ' 'be downloaded from\n' '.UR %s\n.UE\n' % (self._markup(appname), self._markup(homepage),))) ret.append('.SH "SEE ALSO"\n.BR ssh(1)\n') return ''.join(ret) def run(self): manpage = [] manpage.append(self._write_header()) manpage.append(self._write_options()) manpage.append(self._write_footer()) stream = open(self.output, 'w') stream.write(''.join(manpage)) stream.close() build.sub_commands.append(('build_manpage', None)) if __name__=='__main__': parser = lssh.buildParser(False, "lssh") setup(name="lssh", version=lssh.version, description="resurrecting SSH from the dead", long_description=lssh.LONG_DESC, author="Chris Newton", author_email="[email protected]", license="GNU GPL v2", url="http://code.google.com/p/lazarus-ssh/", data_files=[('man/man1', [BUILD_MANPAGE])], scripts=['lssh', 'lssh'], cmdclass={'build_manpage': build_manpage}, )
lgpl-2.1
-1,831,263,415,902,198,800
34.209877
81
0.571529
false
HuygensING/bioport-repository
bioport_repository/similarity/tests/test_similarity.py
1
9897
########################################################################## # Copyright (C) 2009 - 2014 Huygens ING & Gerbrandy S.R.L. # # This file is part of bioport. # # bioport is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this program. If not, see # <http://www.gnu.org/licenses/gpl-3.0.html>. ########################################################################## import os import pickle from common import CommonTestCase, unittest from bioport_repository.similarity.similarity import Similarity #from bioport_repository.person import Person from bioport_repository.db_definitions import CacheSimilarityPersons class SimilarityTestCase(CommonTestCase): def setUp(self): CommonTestCase.setUp(self) self.sim = Similarity() self.similarity_score = self.sim.similarity_score def test_sanity(self): persons = self.repo.get_persons() sim = Similarity(persons[1], persons) sim.compute() sim.sort() def assert_similarity_order(self, ls): def _format_person(p): return '%s (%s-%s)' % (p.get_names(), p.get_value('birth_date'), p.get_value('death_date')) sims = [(self.similarity_score(p1, p2), p1, p2) for p1, p2 in ls] for i, score in enumerate(sims): if i > 1: self.assertTrue(sims[i-1][0] >= sims[i][0], 'Expected %s, %s [%s] to be more similar than %s and %s [%s]' % ( _format_person(sims[i-1][1]), _format_person(sims[i-1][2]), sims[i-1][0], _format_person(sims[i][1]), _format_person(sims[i][2]), sims[i][0] )) def test_similarity(self): p1 = self._add_person('Jan', geboortedatum='1000', sterfdatum='2000') p2 = self._add_person('Jan', geboortedatum='1000', sterfdatum='2000') p3 = self._add_person('Piet Jan') p4 = self._add_person('Piet', geboortedatum='1000', sterfdatum='2000') p5 = self._add_person('Jan', geboortedatum='1001', sterfdatum='2000') self.assertEqual(self.similarity_score(p1, p2), 1) self.assertTrue(0 < self.similarity_score(p1, p3)< 1) self.assertTrue(self.similarity_score(p1, p4) < 1.0) self.assertTrue(self.similarity_score(p1, p5) < 1.0) def test_similarity_with_dates(self): p1 = self._add_person('Lucky', geboortedatum='1000', sterfdatum='2000') p2 = self._add_person('Lucky', geboortedatum='', sterfdatum='2000') p3 = self._add_person('Lucky', geboortedatum='1000', sterfdatum='') p4 = self._add_person('Lucky', geboortedatum='', sterfdatum='') self.assert_similarity_order([ (p1, p1), (p1, p2), (p1, p3), (p1, p4), ]) p2 = self._add_person('Lucky', geboortedatum='1001', sterfdatum='2000') p3 = self._add_person('Lucky', geboortedatum='1900', sterfdatum='2000') self.assert_similarity_order([ (p1, p1), (p1, p2), (p1, p3), ]) p1 = self._add_person('Lucky, Pozzo Vladimir Estragon', geboortedatum='1000', sterfdatum='2000') p2 = self._add_person('Luckie, Pozzo Vladimir Estragon', geboortedatum='1000', sterfdatum='2000') p3 = self._add_person('Lucky, Pozzo Vladimir Estragon', geboortedatum='', sterfdatum='') p4 = self._add_person('Luckie, Pozzo Vladimir Estragon', geboortedatum='', sterfdatum='') score1 = Similarity.similarity_score(p1, p2) #@UndefinedVariable score2 = Similarity.ratio(p1.get_names()[0], p2.get_names()[0]) #given the fact that they have the same birth and death dates, the scores of p1 and p2 shoudl imporve wrt the "bare" names self.assertTrue(score1 > score2) self.assert_similarity_order([ (p1, p1), (p1, p2), (p1, p3), (p1, p4), ]) def test_surely_equal(self): p0 = self._add_person('Estragon', geboortedatum='1000', sterfdatum='2000') p1 = self._add_person('Estragon', geboortedatum='1000', sterfdatum='2000') p2 = self._add_person('Estragon', geboortedatum='1001', sterfdatum='2000') p3 = self._add_person('Estragon', geboortedatum='1000-12-12', sterfdatum='2000') p4 = self._add_person('Estragon', geboortedatum='1000', sterfdatum='2001') p5 = self._add_person('Vladimir', geboortedatum='1000', sterfdatum='2000') p6 = self._add_person('Estragon', geboortedatum='1000', sterfdatum='') p7 = self._add_person('Mercier Camier', geboortedatum='1000', sterfdatum='1200') p8 = self._add_person('Camier, Mercier', geboortedatum='1000', sterfdatum='1200') p9 = self._add_person('Dongen, Kees van', geboortedatum='1000', sterfdatum='1200') p10 = self._add_person('Kees van Dongen', geboortedatum='1000', sterfdatum='1200') p11 = self._add_person(names=['Kees van Dongen', 'Cornelius van Dongen'], geboortedatum='1000', sterfdatum='1200') p12 = self._add_person('Mercier', geboortedatum='1000', sterfdatum='1200') bio = self._create_biography(name=u'Dongen, Kees van') p12.add_biography(bio) p12.save() try: self.repo.save_biography(bio, comment='test') except Exception, error: self.repo.save_biography(bio, comment='test') self.assertTrue(Similarity.are_surely_equal(p0, p1)) self.assertFalse(Similarity.are_surely_equal(p0, p2)) self.assertTrue(Similarity.are_surely_equal(p0, p3)) self.assertFalse(Similarity.are_surely_equal(p0, p4)) self.assertFalse(Similarity.are_surely_equal(p0, p5)) self.assertFalse(Similarity.are_surely_equal(p0, p6)) self.assertTrue(Similarity.are_surely_equal(p7, p8)) self.assertTrue(Similarity.are_surely_equal(p9, p10)) self.assertTrue(Similarity.are_surely_equal(p9, p11)) self.assertTrue(Similarity.are_surely_equal(p9, p12)) self.assertTrue(Similarity.are_surely_equal(p10, p12)) def test_with_biodes_files(self): s1 = """<biodes version="1.0.1"> <fileDesc> <title/> <ref target="http://www.rkd.nl/rkddb/dispatcher.aspx?action=search&amp;database=ChoiceArtists&amp;search=priref=19815"/> <publisher> <name>Rijksbureau voor Kunsthistorische Documentatie</name> <ref target="http://www.rkd.nl/"/> </publisher> </fileDesc> <person> <persName>Dam, Max van</persName> <event type="birth" when="1910-03-19"> <place>Winterswijk</place> </event> <event type="death" when="1943-09-20"> <place>S&#243;bibor (Polen)</place> </event> <state type="occupation">schilder</state> <state type="occupation">tekenaar</state> <idno type="id">19815</idno> </person> <biography> <text>Schilder, tekenaar. Geboren: 19 maart 1910, Winterswijk. Gestorven: 20 september 1943, S&#243;bibor (Polen). </text> </biography> </biodes>""" s2 = """ <biodes version="1.0.1"> <fileDesc> <title/> <publisher/> </fileDesc> <person> <idno type="id">50019330</idno> <persName>Max van Dam</persName><event type="birth" when="1910-03-19"><place>Winterswijk</place></event><event type="death" when="1943-09-20"><place>Sobibor, Polen</place></event><event type="funeral"><place/></event><event type="baptism"><place/></event><sex value="1"/><state type="category" idno="8">Maatschappelijke bewegingen</state><state type="floruit" from="" to=""><place/></state></person> <biography><snippet source_id="jews/109.xml"/></biography> </biodes>""" p1 = self._add_person(xml_source=s1) p2 = self._add_person(xml_source=s2) self.assertTrue(Similarity.are_surely_equal(p1, p2)) #@UndefinedVariable def _read_testsets(self): fn_identified = os.path.join(os.path.dirname(__file__), 'data', 'identified_examples.pickle') self._identified = [] for bio1, bio2 in pickle.load(open(fn_identified)): self._identified.append((bio1, bio2)) def test_most_similar_persons(self): repo = self.repo self.assertEqual(len(self.repo.get_persons()) ,10) self.repo.db.fill_similarity_cache(minimal_score=0.0, refresh=True) with self.repo.db.get_session_context() as session: for r in session.query(CacheSimilarityPersons).all(): assert r.bioport_id1 <= r.bioport_id2, (r.bioport_id1, r.bioport_id2) ls = self.repo.get_most_similar_persons(size=3) ls = list(ls) self.assertEqual(len(ls) ,3) score, p1, p2 = ls[0] self.assertNotEqual(p1.get_bioport_id(), p2.get_bioport_id()) ls = self.repo.get_most_similar_persons(bioport_id=p1.bioport_id) for score, pa, pb in ls: assert p1.bioport_id in [pa.bioport_id, pb.bioport_id] ls = self.repo.get_most_similar_persons(source_id=self.repo.get_sources()[0].id) ls = self.repo.get_most_similar_persons(search_name='jan') def test_suite(): test_suite = unittest.TestSuite() tests = [SimilarityTestCase] for test in tests: test_suite.addTest(unittest.makeSuite(test)) return test_suite if __name__ == "__main__": unittest.main() #defaultTest='SimilarityTestCase.test_cases_to_optimize')
gpl-3.0
8,912,973,642,295,769,000
46.128571
401
0.61746
false
p-o-seidon/tau4
src/tau4/timing/__init__.py
1
3038
#!/usr/bin/env python3 # -*- coding: utf8 -*- # # # Copyright (C) by [email protected], 1998 - 2016 # # This file is part of tau4. # # tau4 is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # tau4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with tau4. If not, see <http://www.gnu.org/licenses/>. import time from timeit import default_timer class Timer(object): """Zeitnehmer. """ def __init__( self, verbose=False): self.__verbose = verbose self.__timer = default_timer def __enter__( self): self.__start = self.__timer() return self def __exit__( self, *args): end = self.__timer() self.__elapsed_s = end - self.__start self.__elapsed_ms = self.__elapsed_s * 1000 self.__elapsed_us = self.__elapsed_ms * 1000 if self.__verbose: print( "Timer(): This operation took %.3f s = %.3f ms = %.3f us. " % (self.__elapsed_s, self.__elapsed_ms, self.__elapsed_us)) def elapsed_ms( self): return self.__elapsed_ms def elapsed_s( self): return self.__elapsed_s elapsed_time = elapsed_s elapsed = elapsed_s def elapsed_us( self): return self.__elapsed_us class Timer2(Timer): """Zeitnehmer mit detaillierterem "Dump". """ def __init__( self, info_about_testee=None): Timer.__init__( self, False) self.__info_about_testee = info_about_testee self.__timer = default_timer def __enter__( self): if self.__info_about_testee: print( "Timer(): Start timing '%s'..." % self.__info_about_testee) else: pass return Timer.__enter__( self) def __str__( self): return self.results() def elapsed_ms( self, timedivider=1.): return Timer.elapsed_ms( self)/timedivider def elapsed_s( self, timedivider=1.): return Timer.elapsed_s( self)/timedivider elapsed_time = elapsed_s def elapsed_us( self, timedivider=1.): return Timer.elapsed_us( self)/timedivider def results( self, timedivider=1.): if self.__info_about_testee: return "%s(): '%s' took %.3f s = %.3f ms = %.3f us. " \ % (self.__class__.__name__, self.__info_about_testee, self.elapsed_s( timedivider), self.elapsed_ms( timedivider), self.elapsed_us( timedivider)) return "%s(): The timed operation took %.3f s = %.3f ms = %.3f us. " \ % (self.__class__.__name__, self.elapsed_s( timedivider), self.elapsed_ms( timedivider), self.elapsed_us( timedivider))
gpl-3.0
8,052,613,477,175,272,000
29.686869
163
0.60237
false
yawd/yawd-elfinder
setup.py
1
1319
#!/usr/bin/env python from setuptools import setup, find_packages import elfinder # Try to detect if we have Pillow installed. imaging_library = list() try: import Image # PIL does this, Pillow does not. except ImportError: # Check to see if Pillow is installed... try: from PIL import Image except ImportError: # Prefer Pillow to PIL imaging_library.append('Pillow>=2.0.0') setup( name='yawd-elfinder', url='http://yawd.eu/open-source-projects/yawd-elfinder/', version = elfinder.__version__, description='Elfinder-based file management solution for Django', long_description=open('README.rst', 'rt').read(), author='yawd', author_email='[email protected]', packages=find_packages(), license='BSD', classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development :: Libraries' ], include_package_data = True, install_requires = [ "Django>=1.5", "python-magic==0.4.3" ] + imaging_library, )
bsd-3-clause
3,092,052,680,388,831,000
30.404762
71
0.612585
false
neuroidss/nupic.studio
nupic_studio/project.py
1
16399
from PyQt4 import QtGui, QtCore from nupic_studio.htm.network import Network from nupic_studio.htm.node import NodeType, Node from nupic_studio.htm.node_region import Region from nupic_studio.htm.node_sensor import Sensor, DataSourceType, PredictionsMethod from nupic_studio.htm.encoding import Encoding from nupic_studio.htm.link import Link """ Loads and saves the Elements of the .nuproj file, that contains user entries for project properties Provides loaded elements as a structure to return. """ class Project: """ Loads and saves the Elements of the Project file, that contains user entries for Network configuration Provides loaded elements as a structure to return. """ #region Constructor def __init__(self): """ Initializes a new instance of this class. """ #region Instance fields self.fileName = '' """Project file""" self.name = "Untitled" """Name of the project.""" self.author = "" """Author of the project.""" self.description = "" """Description of the project.""" self.network = Network() """The network created for the project.""" #endregion #endregion #region Methods def new(self): """ Initializes a new instance of this class. """ # Initialize metadata self.fileName = '' self.name = "Untitled" self.author = "" self.description = "" # Create the top region topRegion = Region("TopRegion") # Create the network and add topRegion as its starting node self.network = Network() self.network.nodes.append(topRegion) self.network.preparePhases() def open(self, fileName): """ Loads the content from XML file to Project instance. """ # Create the network self.network = Network() self.fileName = fileName file = QtCore.QFile(self.fileName) if (file.open(QtCore.QIODevice.ReadOnly)): xmlReader = QtCore.QXmlStreamReader() xmlReader.setDevice(file) while (not xmlReader.isEndDocument()): if xmlReader.isStartElement(): if xmlReader.name().toString() == 'MetaData': self.name = self.__getStringAttribute(xmlReader.attributes(), 'name') self.author = self.__getStringAttribute(xmlReader.attributes(), 'author') self.description = self.__getStringAttribute(xmlReader.attributes(), 'description') elif xmlReader.name().toString() == 'Net': while xmlReader.readNextStartElement(): if xmlReader.name().toString() == 'Node': node = self.__readNode(xmlReader) self.network.nodes.append(node) elif xmlReader.name().toString() == 'Link': link = self.__readLink(xmlReader) self.network.links.append(link) xmlReader.readNext() if (xmlReader.hasError()): QtGui.QMessageBox.critical(self, "Critical", "Ocurred a XML error: " + xmlReader.errorString().data(), QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) else: QtGui.QMessageBox.critical(self, "Critical", "Cannot read the project file!", QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default, QtGui.QMessageBox.NoButton) self.network.preparePhases() def __getStringAttribute(self, attributes, attributeName): if attributes.value(attributeName).toString() != "": attributeValue = str(attributes.value(attributeName).toString()) else: attributeValue = "" return attributeValue def __getIntegerAttribute(self, attributes, attributeName): attributeValue = 0 if attributes.value(attributeName).toString() != "": attributeValue = int(attributes.value(attributeName).toString()) return attributeValue def __getFloatAttribute(self, attributes, attributeName): attributeValue = 0.0 if attributes.value(attributeName).toString() != "": attributeValue = float(attributes.value(attributeName).toString()) return attributeValue def __getBooleanAttribute(self, attributes, attributeName): attributeValue = False if attributes.value(attributeName).toString() == "True": attributeValue = True return attributeValue def __readNode(self, xmlReader): # Read type of node name = self.__getStringAttribute(xmlReader.attributes(), 'name') type = self.__getStringAttribute(xmlReader.attributes(), 'type') # Create a node from parameters node = None if type == 'Region': node = Region(name) elif type == 'Sensor': node = Sensor(name) node.width = self.__getIntegerAttribute(xmlReader.attributes(), 'width') node.height = self.__getIntegerAttribute(xmlReader.attributes(), 'height') # Read specific parameters according to node type if type == 'Region': node.enableSpatialLearning = self.__getBooleanAttribute(xmlReader.attributes(), 'enableSpatialLearning') node.potentialRadius = self.__getIntegerAttribute(xmlReader.attributes(), 'potentialRadius') node.potentialPct = self.__getFloatAttribute(xmlReader.attributes(), 'potentialPct') node.globalInhibition = self.__getBooleanAttribute(xmlReader.attributes(), 'globalInhibition') node.localAreaDensity = self.__getFloatAttribute(xmlReader.attributes(), 'localAreaDensity') node.numActiveColumnsPerInhArea = self.__getFloatAttribute(xmlReader.attributes(), 'numActiveColumnsPerInhArea') node.stimulusThreshold = self.__getIntegerAttribute(xmlReader.attributes(), 'stimulusThreshold') node.proximalSynConnectedPerm = self.__getFloatAttribute(xmlReader.attributes(), 'proximalSynConnectedPerm') node.proximalSynPermIncrement = self.__getFloatAttribute(xmlReader.attributes(), 'proximalSynPermIncrement') node.proximalSynPermDecrement = self.__getFloatAttribute(xmlReader.attributes(), 'proximalSynPermDecrement') node.minPctOverlapDutyCycle = self.__getFloatAttribute(xmlReader.attributes(), 'minPctOverlapDutyCycle') node.minPctActiveDutyCycle = self.__getFloatAttribute(xmlReader.attributes(), 'minPctActiveDutyCycle') node.dutyCyclePeriod = self.__getIntegerAttribute(xmlReader.attributes(), 'dutyCyclePeriod') node.maxBoost = self.__getFloatAttribute(xmlReader.attributes(), 'maxBoost') node.spSeed = self.__getIntegerAttribute(xmlReader.attributes(), 'spSeed') node.enableTemporalLearning = self.__getBooleanAttribute(xmlReader.attributes(), 'enableTemporalLearning') node.numCellsPerColumn = self.__getIntegerAttribute(xmlReader.attributes(), 'numCellsPerColumn') node.distalSynInitialPerm = self.__getFloatAttribute(xmlReader.attributes(), 'distalSynInitialPerm') node.distalSynConnectedPerm = self.__getFloatAttribute(xmlReader.attributes(), 'distalSynConnectedPerm') node.distalSynPermIncrement = self.__getFloatAttribute(xmlReader.attributes(), 'distalSynPermIncrement') node.distalSynPermDecrement = self.__getFloatAttribute(xmlReader.attributes(), 'distalSynPermDecrement') node.minThreshold = self.__getIntegerAttribute(xmlReader.attributes(), 'minThreshold') node.activationThreshold = self.__getIntegerAttribute(xmlReader.attributes(), 'activationThreshold') node.maxNumNewSynapses = self.__getIntegerAttribute(xmlReader.attributes(), 'maxNumNewSynapses') node.tpSeed = self.__getIntegerAttribute(xmlReader.attributes(), 'tpSeed') elif type == 'Sensor': dataSourceType = self.__getStringAttribute(xmlReader.attributes(), 'dataSourceType') if dataSourceType == "File": node.dataSourceType = DataSourceType.file node.fileName = self.__getStringAttribute(xmlReader.attributes(), 'fileName') elif dataSourceType == "Database": node.dataSourceType = DataSourceType.database node.databaseConnectionString = self.__getStringAttribute(xmlReader.attributes(), 'databaseConnectionString') node.databaseTable = self.__getStringAttribute(xmlReader.attributes(), 'databaseTable') node.predictionsMethod = self.__getStringAttribute(xmlReader.attributes(), 'predictionsMethod') if node.predictionsMethod == PredictionsMethod.classification: node.enableClassificationLearning = self.__getBooleanAttribute(xmlReader.attributes(), 'enableClassificationLearning') node.enableClassificationInference = self.__getBooleanAttribute(xmlReader.attributes(), 'enableClassificationInference') # If still is not end of element it's because this node has encodings token = xmlReader.readNext() if not xmlReader.isEndElement(): while xmlReader.readNextStartElement(): encoding = self.__readEncoding(xmlReader) node.encodings.append(encoding) token = xmlReader.readNext() return node def __readEncoding(self, xmlReader): # Create a encoding from parameters encoding = Encoding() encoding.dataSourceFieldName = self.__getStringAttribute(xmlReader.attributes(), 'dataSourceFieldName') encoding.dataSourceFieldDataType = self.__getStringAttribute(xmlReader.attributes(), 'dataSourceFieldDataType') encoding.enableInference = self.__getBooleanAttribute(xmlReader.attributes(), 'enableInference') encoding.encoderModule = self.__getStringAttribute(xmlReader.attributes(), 'encoderModule') encoding.encoderClass = self.__getStringAttribute(xmlReader.attributes(), 'encoderClass') encoding.encoderParams = self.__getStringAttribute(xmlReader.attributes(), 'encoderParams') encoding.encoderFieldName = self.__getStringAttribute(xmlReader.attributes(), 'encoderFieldName') encoding.encoderFieldDataType = self.__getStringAttribute(xmlReader.attributes(), 'encoderFieldDataType') token = xmlReader.readNext() return encoding def __readLink(self, xmlReader): # Read link parameters outNodeName = self.__getStringAttribute(xmlReader.attributes(), 'outNode') inNodeName = self.__getStringAttribute(xmlReader.attributes(), 'inNode') token = xmlReader.readNext() # Find output node instance outNode = None for node in self.network.nodes: if node.name == outNodeName: outNode = node break # Find input node instance inNode = None for node in self.network.nodes: if node.name == inNodeName: inNode = node break # Create a link from parameters link = Link() link.outNode = outNode link.inNode = inNode return link def save(self, fileName): """ Saves the content from Project instance to XML file. """ self.fileName = fileName file = QtCore.QFile(self.fileName) file.open(QtCore.QIODevice.WriteOnly) xmlWriter = QtCore.QXmlStreamWriter(file) xmlWriter.setAutoFormatting(True) xmlWriter.writeStartDocument() xmlWriter.writeStartElement('Project') xmlWriter.writeStartElement('MetaData') xmlWriter.writeAttribute('name', self.name) xmlWriter.writeAttribute('author', self.author) xmlWriter.writeAttribute('description', self.description) xmlWriter.writeEndElement() xmlWriter.writeStartElement('Net') for node in self.network.nodes: self.__writeNode(node, xmlWriter) for link in self.network.links: self.__writeLink(link, xmlWriter) xmlWriter.writeEndElement() xmlWriter.writeEndElement() xmlWriter.writeEndDocument() file.close() def __writeNode(self, node, xmlWriter): # Write common parameters xmlWriter.writeStartElement('Node') xmlWriter.writeAttribute('name', node.name) # Write specific parameters according to node type if node.type == NodeType.region: xmlWriter.writeAttribute('type', 'Region') xmlWriter.writeAttribute('width', str(node.width)) xmlWriter.writeAttribute('height', str(node.height)) xmlWriter.writeAttribute('enableSpatialLearning', str(node.enableSpatialLearning)) xmlWriter.writeAttribute('potentialRadius', str(node.potentialRadius)) xmlWriter.writeAttribute('potentialPct', str(node.potentialPct)) xmlWriter.writeAttribute('globalInhibition', str(node.globalInhibition)) xmlWriter.writeAttribute('localAreaDensity', str(node.localAreaDensity)) xmlWriter.writeAttribute('numActiveColumnsPerInhArea', str(node.numActiveColumnsPerInhArea)) xmlWriter.writeAttribute('stimulusThreshold', str(node.stimulusThreshold)) xmlWriter.writeAttribute('proximalSynConnectedPerm', str(node.proximalSynConnectedPerm)) xmlWriter.writeAttribute('proximalSynPermIncrement', str(node.proximalSynPermIncrement)) xmlWriter.writeAttribute('proximalSynPermDecrement', str(node.proximalSynPermDecrement)) xmlWriter.writeAttribute('minPctOverlapDutyCycle', str(node.minPctOverlapDutyCycle)) xmlWriter.writeAttribute('minPctActiveDutyCycle', str(node.minPctActiveDutyCycle)) xmlWriter.writeAttribute('dutyCyclePeriod', str(node.dutyCyclePeriod)) xmlWriter.writeAttribute('maxBoost', str(node.maxBoost)) xmlWriter.writeAttribute('spSeed', str(node.spSeed)) xmlWriter.writeAttribute('enableTemporalLearning', str(node.enableTemporalLearning)) xmlWriter.writeAttribute('numCellsPerColumn', str(node.numCellsPerColumn)) xmlWriter.writeAttribute('distalSynInitialPerm', str(node.distalSynInitialPerm)) xmlWriter.writeAttribute('distalSynConnectedPerm', str(node.distalSynConnectedPerm)) xmlWriter.writeAttribute('distalSynPermIncrement', str(node.distalSynPermIncrement)) xmlWriter.writeAttribute('distalSynPermDecrement', str(node.distalSynPermDecrement)) xmlWriter.writeAttribute('minThreshold', str(node.minThreshold)) xmlWriter.writeAttribute('activationThreshold', str(node.activationThreshold)) xmlWriter.writeAttribute('maxNumNewSynapses', str(node.maxNumNewSynapses)) xmlWriter.writeAttribute('tpSeed', str(node.tpSeed)) elif node.type == NodeType.sensor: xmlWriter.writeAttribute('type', 'Sensor') xmlWriter.writeAttribute('width', str(node.width)) xmlWriter.writeAttribute('height', str(node.height)) if node.dataSourceType == DataSourceType.file: xmlWriter.writeAttribute('dataSourceType', "File") xmlWriter.writeAttribute('fileName', node.fileName) elif node.dataSourceType == DataSourceType.database: xmlWriter.writeAttribute('dataSourceType', "Database") xmlWriter.writeAttribute('databaseConnectionString', node.databaseConnectionString) xmlWriter.writeAttribute('databaseTable', node.databaseTable) xmlWriter.writeAttribute('predictionsMethod', node.predictionsMethod) if node.predictionsMethod == PredictionsMethod.classification: xmlWriter.writeAttribute('enableClassificationLearning', str(node.enableClassificationLearning)) xmlWriter.writeAttribute('enableClassificationInference', str(node.enableClassificationInference)) # Tranverse all encodings for encoding in node.encodings: self.__writeEncoding(encoding, xmlWriter) xmlWriter.writeEndElement() def __writeEncoding(self, encoding, xmlWriter): # Write encoding parameters xmlWriter.writeStartElement('Encoding') xmlWriter.writeAttribute('dataSourceFieldName', encoding.dataSourceFieldName) xmlWriter.writeAttribute('dataSourceFieldDataType', encoding.dataSourceFieldDataType) xmlWriter.writeAttribute('enableInference', str(encoding.enableInference)) xmlWriter.writeAttribute('encoderModule', encoding.encoderModule) xmlWriter.writeAttribute('encoderClass', encoding.encoderClass) xmlWriter.writeAttribute('encoderParams', encoding.encoderParams) xmlWriter.writeAttribute('encoderFieldName', encoding.encoderFieldName) xmlWriter.writeAttribute('encoderFieldDataType', encoding.encoderFieldDataType) xmlWriter.writeEndElement() def __writeLink(self, link, xmlWriter): # Write encoding parameters xmlWriter.writeStartElement('Link') xmlWriter.writeAttribute('outNode', link.outNode.name) xmlWriter.writeAttribute('inNode', link.inNode.name) xmlWriter.writeEndElement() #endregion
gpl-2.0
6,697,959,141,045,358,000
44.7151
188
0.71629
false
palash1992/GEM
gem/embedding/hope.py
1
3591
disp_avlbl = True import os if 'DISPLAY' not in os.environ: disp_avlbl = False import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.pyplot as plt import networkx as nx import numpy as np import scipy.io as sio import scipy.sparse as sp import scipy.sparse.linalg as lg from time import time import sys sys.path.append('./') sys.path.append(os.path.realpath(__file__)) from .static_graph_embedding import StaticGraphEmbedding from gem.utils import graph_util, plot_util from gem.evaluation import visualize_embedding as viz class HOPE(StaticGraphEmbedding): def __init__(self, *hyper_dict, **kwargs): ''' Initialize the HOPE class Args: d: dimension of the embedding beta: higher order coefficient ''' hyper_params = { 'method_name': 'hope_gsvd' } hyper_params.update(kwargs) for key in hyper_params.keys(): self.__setattr__('_%s' % key, hyper_params[key]) for dictionary in hyper_dict: for key in dictionary: self.__setattr__('_%s' % key, dictionary[key]) def get_method_name(self): return self._method_name def get_method_summary(self): return '%s_%d' % (self._method_name, self._d) def learn_embedding(self, graph=None, edge_f=None, is_weighted=False, no_python=False): if not graph and not edge_f: raise Exception('graph/edge_f needed') if not graph: graph = graph_util.loadGraphFromEdgeListTxt(edge_f) t1 = time() # A = nx.to_scipy_sparse_matrix(graph) # I = sp.eye(graph.number_of_nodes()) # M_g = I - self._beta*A # M_l = self._beta*A A = nx.to_numpy_matrix(graph) M_g = np.eye(len(graph.nodes)) - self._beta * A M_l = self._beta * A S = np.dot(np.linalg.inv(M_g), M_l) u, s, vt = lg.svds(S, k=self._d // 2) X1 = np.dot(u, np.diag(np.sqrt(s))) X2 = np.dot(vt.T, np.diag(np.sqrt(s))) t2 = time() self._X = np.concatenate((X1, X2), axis=1) p_d_p_t = np.dot(u, np.dot(np.diag(s), vt)) eig_err = np.linalg.norm(p_d_p_t - S) print('SVD error (low rank): %f' % eig_err) return self._X, (t2 - t1) def get_embedding(self): return self._X def get_edge_weight(self, i, j): return np.dot(self._X[i, :self._d // 2], self._X[j, self._d // 2:]) def get_reconstructed_adj(self, X=None, node_l=None): if X is not None: node_num = X.shape[0] self._X = X else: node_num = self._node_num adj_mtx_r = np.zeros((node_num, node_num)) for v_i in range(node_num): for v_j in range(node_num): if v_i == v_j: continue adj_mtx_r[v_i, v_j] = self.get_edge_weight(v_i, v_j) return adj_mtx_r if __name__ == '__main__': # load Zachary's Karate graph edge_f = 'data/karate.edgelist' G = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=False) G = G.to_directed() res_pre = 'results/testKarate' graph_util.print_graph_stats(G) t1 = time() embedding = HOPE(4, 0.01) embedding.learn_embedding(graph=G, edge_f=None, is_weighted=True, no_python=True) print('HOPE:\n\tTraining time: %f' % (time() - t1)) viz.plot_embedding2D(embedding.get_embedding()[:, :2], di_graph=G, node_colors=None) plt.show()
bsd-3-clause
3,953,646,742,152,848,000
30.226087
75
0.561682
false
igordcard/RO
openmanoconfig.py
1
4720
#!/usr/bin/env python # -*- coding: utf-8 -*- ## # Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U. # This file is part of openmano # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact with: [email protected] ## """ Read openmanod.cfg file and creates envioronment variables for openmano client Call it wusing execution quotes, or copy paste the output to set your shell envioronment It read database to look for a ninc tenant / datacenter """ from __future__ import print_function from os import environ from openmanod import load_configuration #from socket import gethostname from db_base import db_base_Exception import nfvo_db import getopt import sys __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes" __date__ ="$26-aug-2014 11:09:29$" __version__="0.0.1-r509" version_date="Oct 2016" database_version="0.16" #expected database schema version def usage(): print("Usage: ", sys.argv[0], "[options]") print(" -v|--version: prints current version") print(" -c|--config [configuration_file]: loads the configuration file (default: openmanod.cfg)") print(" -h|--help: shows this help") return if __name__ == "__main__": # Read parameters and configuration file try: # load parameters and configuration opts, args = getopt.getopt(sys.argv[1:], "vhc:", ["config=", "help", "version"]) config_file = 'openmanod.cfg' for o, a in opts: if o in ("-v", "--version"): print("openmanoconfig.py version " + __version__ + ' ' + version_date) print("(c) Copyright Telefonica") exit() elif o in ("-h", "--help"): usage() exit() elif o in ("-c", "--config"): config_file = a else: assert False, "Unhandled option" global_config = load_configuration(config_file) if global_config["http_host"] == "0.0.0.0": global_config["http_host"] = "localhost" #gethostname() environ["OPENMANO_HOST"]=global_config["http_host"] print("export OPENMANO_HOST='{}'".format(global_config["http_host"])) environ["OPENMANO_PORT"] = str(global_config["http_port"]) print("export OPENMANO_PORT={}".format(global_config["http_port"])) mydb = nfvo_db.nfvo_db(); mydb.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name']) try: tenants = mydb.get_rows(FROM="nfvo_tenants") if not tenants: print("#No tenant found", file=sys.stderr) elif len(tenants) > 1: print("#Found several tenants export OPENMANO_TENANT=", file=sys.stderr, end="") for tenant in tenants: print(" '{}'".format(tenant["name"]), file=sys.stderr, end="") print("") else: environ["OPENMANO_TENANT"] = tenants[0]["name"] print("export OPENMANO_TENANT='{}'".format(tenants[0]["name"])) dcs = mydb.get_rows(FROM="datacenters") if not dcs: print("#No datacenter found", file=sys.stderr) elif len(dcs) > 1: print("#Found several datacenters export OPENMANO_DATACENTER=", file=sys.stderr, end="") for dc in dcs: print(" '{}'".format(dc["name"]), file=sys.stderr, end="") print("") else: environ["OPENMANO_DATACENTER"] = dcs[0]["name"] print("export OPENMANO_DATACENTER='{}'".format(dcs[0]["name"])) except db_base_Exception as e: print("#DATABASE is not a MANO one or it is a '0.0' version. Try to upgrade to version '{}' with \ './database_utils/migrate_mano_db.sh'".format(database_version), file=sys.stderr) exit(-1) except db_base_Exception as e: print("#"+str(e), file=sys.stderr) exit(-1) except SystemExit: pass
apache-2.0
-2,927,230,656,209,897,500
37.048387
126
0.591352
false
UiL-OTS-labs/iSpector
gui/ispectorgui.py
1
38842
#!/usr/bin/env python #from log import * from log.eyedata import * from log.eyeexperiment import * from log.parseeyefile import * from PyQt5 import QtGui, QtWidgets from PyQt5 import QtCore from . import inspecteyedataview from gui import datamodel from . import statusmessage as sm import sys import os import os.path as p from . import ievent from . import statusbox import utils.configfile import iSpectorVersion from . import fixationeditor LOGO = "iSpectorLogo.svg" SMOOTH_WINDOW_CHOICES = [str(i) for i in range(3,21, 2)] class NoSuchString(Exception): def __init__(self,message): super(NoSuchString, self).__init__(message) def comboSelectString(combo, string): """ Selects the string if available does noting when the string is allready selected """ if combo.currentText() == string: return index = combo.findText(string) if index < 0: raise NoSuchString("The string \"" + string + "\" is not available") else: combo.setCurrentIndex(index) class Controller : """ The controller keeps track if the user enters valid input inside the gui. if the input is valdid the controller updates the model. """ def __init__(self, model, view): """ @param Model the model that contains all parameters for succesfull parsing the input files and the input files themselves. """ assert (model and view) self.model = model self.view = view self._initModel() def _initModel(self): """ Fixes model initially """ self.model[self.model.STATUS] = "ready" # Only select real files and create a absolute path self.model.set_files( [p.abspath(i) for i in self.model.files() if p.isfile(i) ] ) self.model.set_files(sorted(set(self.model.files()))) # create selected file in the model namespace if self.model.files(): #set the first file as selected self.model.selected.append(self.model.files()[0]) if self.model.output_dir(): self.updateDirectory(self.model.output_dir()) if self.model.stimulus_dir(): self.updateStimDir(self.model.stimulus_dir()) def updateAction(self, string): self.model[self.model.ACTION] = string def updateStatus(self, string): s = str(string) self.model[self.model.STATUS] = s def updateEye(self, string): if string == "left": self.model[self.model.EXTRACT_LEFT]=True self.model[self.model.EXTRACT_RIGHT]=False elif string == "right": self.model[self.model.EXTRACT_LEFT]=False self.model[self.model.EXTRACT_RIGHT]=True elif string == "both": #if both are false default is to select both self.model[self.model.EXTRACT_LEFT]=False self.model[self.model.EXTRACT_RIGHT]=False else: raise RuntimeError("Invalid string in updateEye") def updateSmooth(self, state): if state == QtCore.Qt.Checked: if self.model[self.model.SMOOTH]: # everything is already alright return elif state == QtCore.Qt.Unchecked: if not self.model[self.model.SMOOTH]: #everything is ok return else: raise ValueError("state must be either QtCore.Qt.Checked or Unchecked") self.model[self.model.SMOOTH] = not self.model[self.model.SMOOTH] def updateSmoothWindowSize(self, string): value = int(string) if value % 2 == 0 or value < 3: raise ValueError("Value must be odd and larger then 2") self.model[self.model.SMOOTHWIN] = value def updateSmoothOrder(self, string): value = int(string) if value < 1: raise ValueError("Smoothorder isn't Smootorder > 0") self.model[self.model.SMOOTHORDER] = value def updateThreshold(self, string): if string == "mean": self.model[self.model.THRESHOLD] = "mean" elif string == "median": self.model[self.model.THRESHOLD] = "median" else: raise ValueError(("The method for determining the threshold must be" "either mean or median") ) def updateNThreshold(self, real): """ set the number of times the mean or median must be taken to get the threshold. @param real number larger than 0 """ value = float(real) if value <= 0.0: return self.model[self.model.NTHRESHOLD] = value def updateFiles(self, filenamelist): """ set filenamelist as the new selected files. """ filenamelist = [str(i) for i in filenamelist] for i in filenamelist: #remove possible dupplicates if not (i in self.model.files()): self.model.files().append(i) self.model.files().sort() def updateDefaultOpenDir(self, d): """ Store the default open directory """ if p.isdir(d): dirs = self.model.set_file_dir(d) def updateDirectory(self, dirname): """ updates the output directory to be dirname """ dirname = str(dirname) if p.isdir(dirname): self.model.set_output_dir(p.abspath(dirname)) else: self.model.set_output_dir(p.abspath(".")) def updateStimDir(self, dirname): """ updates the stimulus directory to be dirname """ dirname = str(dirname) if p.isdir(dirname): self.model.set_stimulus_dir(p.abspath(dirname)) else: self.model.set_stimulus_dir(p.abspath(".")) def updateSelected(self, flist): """ Sets the new selected items in the file list. """ flist = sorted([str(i) for i in flist]) if flist != self.model.selected: self.model.selected = flist def removeSelected(self, flist): """ Remove items selected for an action""" flist = [str(i) for i in flist] removeset = set(flist) oldset = set(self.model.files()) newset = oldset - removeset self.model.selected = [] self.model.set_files(list(sorted(newset))) class DirGroup (QtWidgets.QGroupBox): """ The dirgroup handles all directories that are useful for The program. """ def __init__(self, controller, model, mainwindow): super(DirGroup, self).__init__("Directories") self.controller = controller self.MODEL = model self.grid = QtWidgets.QGridLayout() self.mainwindow = mainwindow self._init() def _init(self): verticalbox = QtWidgets.QVBoxLayout() self.setLayout(verticalbox) # Adding widgets to select the output directory self.outdirlabel = QtWidgets.QLabel("Output directory:") self.outdiropenicon = QtGui.QIcon.fromTheme("folder") self.outdirbutton = QtWidgets.QPushButton(self.outdiropenicon, "open") self.outdirbox = QtWidgets.QHBoxLayout() # Setting tooltips and adding the directory widgets to the directory HBox self.outdirbutton.setToolTip("Select directory to write the output to.") self.outdirbox.addWidget(self.outdirlabel, 1, alignment=QtCore.Qt.AlignLeft) self.outdirbox.addWidget(self.outdirbutton) # Adding widgets to select the output directory self.stimdirlabel = QtWidgets.QLabel("Stimulus directory:") self.stimdiropenicon= QtGui.QIcon.fromTheme("folder") self.stimdirbutton = QtWidgets.QPushButton(self.stimdiropenicon, "open") self.stimdirbox = QtWidgets.QHBoxLayout() # Setting tooltips and adding the directory widgets to the directory HBox self.stimdirbutton.setToolTip("Select directory to search for stimuli.") self.stimdirbox.addWidget(self.stimdirlabel, 1, alignment=QtCore.Qt.AlignLeft) self.stimdirbox.addWidget(self.stimdirbutton) # Connect signals self.outdirbutton.clicked.connect(self._openOutDir) self.stimdirbutton.clicked.connect(self._openStimDir) verticalbox.addLayout(self.outdirbox) verticalbox.addLayout(self.stimdirbox) self.setLayout(verticalbox) def _setOutDirlabel(self, dirname): name = "Output directory:\n \"{0:s}\"".format(dirname) self.outdirlabel.setText(name) def _setStimDirlabel(self, dirname): name = "Stimulus directory:\n \"{0:s}\"".format(dirname) self.stimdirlabel.setText(name) def updateFromModel(self): """ examines the model. """ self._setOutDirlabel(self.MODEL.output_dir()) self._setStimDirlabel(self.MODEL.stimulus_dir()) def _openOutDir(self): """ Shows the file dialog to choose a new dir. """ d = self.MODEL.output_dir() folder = QtWidgets.QFileDialog.getExistingDirectory( caption = "Select output directory", directory = d ) if folder: self.controller.updateDirectory(folder) self.updateFromModel() def _openStimDir(self): """ Shows the file dialog to choose a new dir. """ d = self.MODEL.stimulus_dir() folder = QtWidgets.QFileDialog.getExistingDirectory( caption = "Select stimulus directory", directory = d ) if folder: self.controller.updateStimDir(folder) self.updateFromModel() class OptionGroup(QtWidgets.QGroupBox): """ Option group contains mainly the parameters to control the detection of fixations and saccades. It also sets the main action of iSpector to either inspect the data or to extract it forFixation. """ actiontip =("Select <b>inspect</b> to look at the file, and\n" "<b>extract</b> to create output for fixation.") smoothtip =("Check to smooth the data with a Savitsky-Golay filter\n" "Note: use the same smoothing values over an entire experiment\n" "because smoothing values has an influence on the duration\n" "of saccades and fixations.") eyetip = "Select the eye to inspect/extract." smoothwintip =("Select a value for the window size of the smoothing filter\n" "a bigger value means stronger smoothing.\n" ) smoothordertip =("Select the polynomial order for the smoothing function\n" "using order=1 means you are using a moving average.") thresholdtip =("Use mean or median to set the threshold for determining the\n" "difference between fixations and saccades.") nthresholdtip =("Enter a real number. This number is used to multiply with the\n" "threshold to find the final value over the data. So the\n" "final threshold = nthreshold * <mean|median>.") def __init__(self, controller, model, mainwindow): super(OptionGroup, self).__init__("Options") self.controller = controller self.MODEL = model self.grid = QtWidgets.QGridLayout() self.mainwindow = mainwindow self._init() def _handle(self, arg1=None): """ calls the right event handler and updates the view """ self.handlers[self.sender()](arg1) self.mainwindow.updateFromModel() def handleAction(self, index): string = self.actioncombo.itemText(index) self.controller.updateAction(string) def handleEye(self, index): string = self.eyecombo.itemText(index) self.controller.updateEye(string) def handleSmooth(self, value): self.controller.updateSmooth(value) def handleSmoothOrder(self, index): string = self.ordercombo.itemText(index) self.controller.updateSmoothOrder(string) def handleSmoothWindow(self, index): string = self.windowcombo.itemText(index) self.controller.updateSmoothWindowSize(string) def handleThreshold(self, index): string = self.thresholdcombo.itemText(index) self.controller.updateThreshold(string) def handleNThreshold(self, event): string = self.nthresholdentry.text() self.controller.updateNThreshold(string) def _init(self): """ place all Qt widgets on the in a grid and put the grid inside the groupwidget """ self.setFlat(False) self.setLayout(self.grid) self._addLabel("Action:", 0, 0) self._addLabel("Inspect/Extract eye:", 1, 0) self._addLabel("Smooth:", 2, 0) self._addLabel("Smoothwindow:",3, 0) self._addLabel("Smoothorder:", 4, 0) self._addLabel("Threshold:", 5, 0) self._addLabel("NThreshold:", 6, 0) # A combobox that sets the main action of the program. combo = QtWidgets.QComboBox() combo.setToolTip(self.actiontip) combo.addItems(MainGuiModel.VALID_ACTIONS) combo.activated.connect(self._handle) self.grid.addWidget(combo, 0, 1) self.actioncombo = combo # Select the eye(s) to inspect or extract. combo = QtWidgets.QComboBox() combo.setToolTip(self.eyetip) combo.addItems(["left", "right", "both"]) combo.activated.connect(self._handle) self.grid.addWidget(combo, 1, 1) self.eyecombo = combo # Allow the user to select smoothing via this checkbox. checkbox = QtWidgets.QCheckBox() checkbox.setToolTip(self.smoothtip) checkbox.stateChanged.connect(self._handle) self.grid.addWidget(checkbox, 2, 1) self.smoothcheckbox = checkbox # Allow the user to select a smoothing window size # all windowsizes are valid. combo = QtWidgets.QComboBox() combo.setToolTip(self.smoothwintip) combo.addItems(SMOOTH_WINDOW_CHOICES) combo.activated.connect(self._handle) self.grid.addWidget(combo, 3, 1) self.windowcombo = combo # Let the user select a valid smooting order # for the Savitsky-golay filter. combo = QtWidgets.QComboBox() combo.setToolTip(self.smoothordertip) combo.addItems(["1", "2", "3", "4", "5"]) combo.activated.connect(self._handle) self.grid.addWidget(combo, 4, 1) self.ordercombo = combo # Let the user select the method to set the base threshold. combo = QtWidgets.QComboBox() combo.setToolTip(self.thresholdtip) combo.addItems(["median", "mean"]) combo.activated.connect(self._handle) self.grid.addWidget(combo, 5, 1) self.thresholdcombo = combo # allow the user to express a factor to select the final threshold # So the final threshold = base threshold * the value they enter here. entry = QtWidgets.QLineEdit() validator = QtGui.QDoubleValidator() validator.setNotation(QtGui.QDoubleValidator.StandardNotation) validator.setRange(0, 99, 4) entry.setValidator(validator) entry.setText(str(self.MODEL[self.MODEL.NTHRESHOLD])) entry.setToolTip(self.nthresholdtip) entry.editingFinished.connect(self._handle) self.grid.addWidget(entry, 6, 1) self.nthresholdentry = entry # when a event happens this class maps the sender(the key) # to the handler(value) of the next dict. the handler # will handle the event. self.handlers = { self.actioncombo : self.handleAction, self.eyecombo : self.handleEye, self.smoothcheckbox : self.handleSmooth, self.windowcombo : self.handleSmoothWindow, self.ordercombo : self.handleSmoothOrder, self.thresholdcombo : self.handleThreshold, self.nthresholdentry : self.handleNThreshold } def _addLabel(self, string, row , column, rowspan=1, heightspan=1): """ utility to add labels to the layout """ label = QtWidgets.QLabel(string) self.grid.addWidget(label, row, column, rowspan, heightspan) label.show() def updateFromModel(self): """ Sets the options combo to the values in the model. """ comboSelectString(self.actioncombo, self.MODEL[self.MODEL.ACTION]) if ((self.MODEL[self.MODEL.EXTRACT_LEFT] and self.MODEL[self.MODEL.EXTRACT_RIGHT] ) or (not self.MODEL[self.MODEL.EXTRACT_LEFT] and not self.MODEL[self.MODEL.EXTRACT_RIGHT])): comboSelectString(self.eyecombo, "both") elif self.MODEL[self.MODEL.EXTRACT_LEFT]: comboSelectString(self.eyecombo, "left") else: comboSelectString(self.eyecombo, "right") if self.MODEL[self.MODEL.SMOOTH]: self.smoothcheckbox.setCheckState(QtCore.Qt.Checked) self.windowcombo.setEnabled(True) self.ordercombo.setEnabled(True) try: comboSelectString(self.windowcombo, str(self.MODEL[self.MODEL.SMOOTHWIN])) except NoSuchString as e: sys.stderr.write(e.message + "\n") try: comboSelectString(self.ordercombo, str(self.MODEL[self.MODEL.SMOOTHORDER])) except NoSuchString as e: sys.stderr.write(e.message + "\n") else: self.smoothcheckbox.setCheckState(QtCore.Qt.Unchecked) self.windowcombo.setEnabled(False) self.ordercombo.setEnabled(False) comboSelectString(self.thresholdcombo, self.MODEL[self.MODEL.THRESHOLD]) self.nthresholdentry.setText(str(self.MODEL[self.MODEL.NTHRESHOLD])) class FileEntry(QtWidgets.QListWidgetItem): """ FileEntry can be cast to string. It displays the filename, but keeps the directory in mind. """ def __init__(self, directory, fname, parent): super(FileEntry, self).__init__(fname, parent) self.directory = directory self.fname = fname self.setToolTip(str(self)) def __str__(self): """ Create absolute pathname. """ return p.join(self.directory, self.fname) class FileEntryList(QtWidgets.QListWidget) : """ Handles delete keypress to remove one entry from the model and view. """ def __init__(self, FileView, parent=None): super(FileEntryList, self).__init__(parent) self.view = FileView def keyPressEvent(self, e): if e.key() == QtCore.Qt.Key_Delete: self.view.removeSelected() class InputOutput(QtWidgets.QVBoxLayout) : """ InputOutput helps to display the files used as input for the fixation dectection algorithms either to display them or to extract data for Fixation. It also allows to set the output directory. """ FILTERS = "EyeData (*.csv *.asc);;all (*)" EYE_FILT = "EyeData" def __init__(self, controller, model): """ initializes model, controller and finally the gui elements""" super(InputOutput, self).__init__() self.MODEL = model self.controller = controller self.files = [] # the input files self._init() def _init(self): """ Initializes the gui elements """ label = QtWidgets.QLabel("Input files:") self.addWidget(label) self.fileviewwidget = FileEntryList(self)#QtWidgets.QListWidget() self.fileviewwidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) self.fileviewwidget.itemSelectionChanged.connect(self.onSelection) self.addWidget(self.fileviewwidget) def onSelection(self): """ Called when selection changes. """ items = self.fileviewwidget.selectedItems() self.controller.updateSelected(items) #don't update, causes infinite recursion def _openFiles(self): """ This opens files, and updates the data model. """ d = self.MODEL.file_dir() l, _filt = QtWidgets.QFileDialog.getOpenFileNames( caption = "Select input file(s)", directory = d, filter = self.FILTERS, initialFilter = self.EYE_FILT ) if l: self.controller.updateFiles(l) path = str(l[0]) self.controller.updateDefaultOpenDir(p.dirname(path)) self.updateFromModel() def removeSelected(self): """ delete selected item's """ l = self.fileviewwidget.selectedItems() self.controller.removeSelected(l) self.updateFromModel() def updateFromModel(self): """ Add filenames to view """ modelset = set (self.MODEL.files()) currentset = set([]) selected = self.MODEL.selected itemlist = self.fileviewwidget.findItems("*", QtCore.Qt.MatchWildcard) for i in itemlist: currentset.add(str(i)) currentset = currentset & modelset self.fileviewwidget.clear() for i in self.MODEL.files(): directory, filename = p.split(i) item = FileEntry(directory, filename, self.fileviewwidget) if i in selected: item.setSelected(True) else: item.setSelected(False) ## # The main window of iSpector # # This is the main window. It is essentially no longer a wrapper around the # commandline arguments of examine_csv. # # \note ispector actually started as a terminal program instead of a gui # some things just keep expanding :D!! class ISpectorGui(QtWidgets.QMainWindow): ## window title, can still be improved _WINDOW_TITLE = iSpectorVersion.getVersion() +\ " (if it starts with eye it \"must\" be good)" ## # Construct the main window. def __init__(self, model): """ Inits the main window """ super(ISpectorGui, self).__init__() ## the controller from a MVC gui implementation self.controller = Controller(model, self) ## the model from a MVC gui implementation self.MODEL = model # init Qt related stuff self._init() ## # Add all gui stuff to the main window. def _init(self): # Set main window appearance. self.setWindowTitle(self._WINDOW_TITLE) self.setWindowIcon(QtGui.QIcon(LOGO)) # Adding main widget to the window centralwidget = QtWidgets.QWidget(self) self.setCentralWidget(centralwidget) ## a grid to layout all other widgets. self.grid = QtWidgets.QGridLayout() self.grid.setColumnStretch(1, 1) self.grid.setColumnStretch(0,10) centralwidget.setLayout(self.grid) # Make the options ## a OptionGroup to configure "global" parameters. self.options = OptionGroup(self.controller, self.MODEL, self) self.options.setFlat(False) self.grid.addWidget(self.options, 0, 1) # Create the directory widget self.dirs = DirGroup(self.controller, self.MODEL, self) self.dirs.setFlat(False) self.grid.addWidget(self.dirs, 1, 1) ## The file selector self.files = InputOutput(self.controller, self.MODEL) self.grid.addLayout(self.files, 0, 0, 2,1, QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop) ## The action button to start the iSpector action. self.actionbutton = QtWidgets.QPushButton("") self.actionbutton.clicked.connect(self.doAction) self.grid.addWidget(self.actionbutton, 3, 1, alignment=QtCore.Qt.AlignRight) ## The box wherein status messages are displayed. self.statusbox = statusbox.StatusBox() templayout = QtWidgets.QVBoxLayout(); templabel = QtWidgets.QLabel("Status messages: ") templayout.addWidget(templabel) templayout.addWidget(self.statusbox) self.grid.addLayout(templayout, 2, 0, 1, 2) # create exit handeling and keyboard short cuts. icon = QtGui.QIcon.fromTheme("window-close") exitAction = QtWidgets.QAction(icon, '&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtWidgets.qApp.quit) icon = QtGui.QIcon.fromTheme("document-open") openAction = QtWidgets.QAction(icon, '&Open', self) openAction.setShortcut('Ctrl+O') openAction.setStatusTip('Open file(s)') openAction.triggered.connect(self.files._openFiles) # add menubar menubar = self.menuBar() fileMenu = menubar.addMenu('&File') fileMenu.addAction(exitAction) fileMenu.addAction(openAction) # Update the entire window before showing it. self.updateFromModel() self.show() ## # Catch events related check to see whether it is a status event. # # The event event is overwritten to capture custom user events. # for example if a user reports a status # def event(self, e): if e.type() == ievent.StatusEvent.my_user_event_type: status = e.get_status() self.statusbox.addMessage(status) return True return super(ISpectorGui, self).event(e) ## # returns a tuple of data model and the controller def getModel(self): return self.MODEL, self.controller ## # Show a status message in iSpector main gui. # # This function shows a status message in the iSpectorgui. # since it posts an event it should be threadsafe. # # \param status one of StatusMessage.ok, error or warning. # \param message a string with information to display. def reportStatus(self, status, message): status = sm.StatusMessage(status, message) event = ievent.StatusEvent(status) QtWidgets.QApplication.postEvent(self, event) ## # Read the model and update the view. def updateFromModel(self): self.statusBar().showMessage(self.MODEL[self.MODEL.STATUS]) self.options.updateFromModel() self.files.updateFromModel() self.dirs.updateFromModel() actiontxt = self.MODEL[self.MODEL.ACTION] self.actionbutton.setText(actiontxt) ## # Examines all selected files. def examine(self, filelist): filelist = [] if len(self.MODEL.selected) > 0: filelist = self.MODEL.selected else: filelist = self.MODEL.files() model = datamodel.ExamineDataModel(filelist, self) controller = datamodel.ExamineDataController(model) examinewidget = inspecteyedataview.InspectEyeDataView(model, controller, self ) if examinewidget.hasValidData(): examinewidget.show() else: self.reportStatus(statusmessage.StatusMessage.error, "Unable to load valid data for inspection.") def editFixations(self, filelist): filelist = [] if len(self.MODEL.selected) > 0: filelist = self.MODEL.selected else: filelist = self.MODEL.files() model = fixationeditor.FixationEditModel(filelist, self, fixationeditor.SHOW_ALL ) controller = fixationeditor.FixationEditController(model) fix_edit_widget = fixationeditor.FixationEditView(model, controller, self) if fix_edit_widget.hasValidData(): fix_edit_widget.show() else: self.reportStatus(statusmessage.StatusMessage.error, "Unable to load valid data for inspection.") def _createOutputFilename(self, experiment, fname, outdir): """ Create a suitable output absolute pathname based on the input of the experiment or the output. """ # create output filename expname = experiment.getFixationName() # take a Fixation compatible output name tempdir, shortfname = p.split(fname) odir = "" if outdir: #use specified output dir odir = outdir else: #use origin filedir (put in- and output alongside eachother) odir = tempdir #absolute path to new filename absoutput = p.join(odir, expname) # Warn user if file allready exists. if p.exists(absoutput): msg = ("The file \"" + absoutput + "\"already exits.\n" "Do you want to overwrite it?") dlg = QtWidgets.QMessageBox( QtWidgets.QMessageBox.Warning, "File exits", msg, QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel ) if dlg.exec_() == QtWidgets.QMessageBox.Cancel: return None return absoutput def extractForFixation(self, filelist, outdir=""): for fname in filelist: #inform user msg = "processing file: \"" + fname + "\"" self.reportStatus(sm.StatusMessage.ok, msg) self.statusBar().showMessage(msg) pr = parseEyeFile(fname) entries = pr.getEntries() if not entries: # add error info to gui dlg = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Parse Error", "Unable to parse \"" + fname + "\"", QtWidgets.QMessageBox.Ok ) dlg.exec_() continue entries = list(LogEntry.removeEyeEvents(entries)) # Optionally filter right or left gaze from the experiment if (self.MODEL[self.MODEL.EXTRACT_RIGHT] and self.MODEL[self.MODEL.EXTRACT_LEFT]) or\ (not self.MODEL[self.MODEL.EXTRACT_LEFT] and not self.MODEL[self.MODEL.EXTRACT_RIGHT]): pass # If both are specified or if none are specified extract both eyes elif self.MODEL[self.MODEL.EXTRACT_LEFT]: entries = list(LogEntry.removeRightGaze(entries)) elif self.MODEL[self.MODEL.EXTRACT_RIGHT]: entries = list(LogEntry.removeLeftGaze(entries)) else: raise RuntimeError("The control flow shouldn't get here.") experiment = EyeExperiment(entries) assert(experiment) # Obtain filename absoutput = self._createOutputFilename(experiment, fname, outdir) if not absoutput: continue # Determine our own fixations and saccades. for t in experiment.trials: if t.containsGazeData() == True: thres = self.MODEL[self.MODEL.THRESHOLD] nthres= self.MODEL[self.MODEL.NTHRESHOLD] smooth= self.MODEL[self.MODEL.SMOOTH] winsz = self.MODEL[self.MODEL.SMOOTHWIN] order = self.MODEL[self.MODEL.SMOOTHORDER] eyedata = EyeData( thres, nthres, smooth, winsz, order ) eyedata.processTrial(t, True) lfixes, rfixes = eyedata.getFixations() rsacs , lsacs = eyedata.getSaccades() entries.extend(lfixes) entries.extend(rfixes) entries.extend(lsacs) entries.extend(rsacs) # finally save the output. saveForFixation(entries, absoutput) self.controller.updateStatus("Finished") self.updateFromModel() def doAction(self): """ Runs the action either to inspect or extract the selected items """ files = self.MODEL.selected if not files: # if no files are selected ask whether all files in the # listview should be processed. self.reportStatus(sm.StatusMessage.warning, "No files selected running action on all files") files = self.MODEL.files() if not files: return if self.MODEL[self.MODEL.ACTION] == self.MODEL.EXTRACT: d = self.MODEL.output_dir() self.extractForFixation(files, d) elif self.MODEL[self.MODEL.ACTION] == self.MODEL.INSPECT: self.examine(files) elif self.MODEL[self.MODEL.ACTION] == self.MODEL.EDIT_FIXATIONS: self.editFixations(files) else: raise RuntimeError("Invalid key in self.MODEL[self.MODEL.ACTION]") ## # Save config file and exit def closeEvent(self, event): self.MODEL.configfile.write() super(ISpectorGui, self).closeEvent(event) class MainGuiModel(dict): """ The model of the parameters of iSpector """ SMOOTH = "smooth" ##<bool SMOOTHWIN = "smoothwin" ##<int SMOOTHORDER = "smoothorder" ##<int THRESHOLD = "threshold" ##<string NTHRESHOLD = "nthreshold" ##<float DRAWSACCADES = "drawsaccades" ##<bool COMPARE = "compare" ##<bool ACTION = "action" ##<string EXTRACT_LEFT = "extract-left" ##<bool EXTRACT_RIGHT = "extract-right" ##<bool #DIRS = "dirs" ##<dict #FILES = "files" ##<list[string] #SELECTED = "selected" ##<list[string] STATUS = "status" ##<string # strings releated to a certain action ## string used for inspecting INSPECT = "inspect" ## string used for extracting EXTRACT = "extract" ## string used for edit fixation action EDIT_FIXATIONS = "edit fixations" ## a list of strings with valid actions that iSpector can do. VALID_ACTIONS = [ INSPECT, EXTRACT, EDIT_FIXATIONS ] def __init__ (self, cmdargs): """ First we set the values from the configuration file, then we set/overwrite from """ ## # Configurations that are stored between runs self.configfile = utils.configfile.ConfigFile() self.configfile.parse() ## # If there were missing items in the config file this is set to False. self.read_config = self._check_config() if cmdargs.stim_dir: self.set_stimulus_dir(cmdargs.stim_dir) if cmdargs.output_dir: self.set_output_dir(cmdargs.output_dir) if cmdargs.files: self.set_files(cmdargs.files) ## # The files that are selected. self.selected = [] self[self.SMOOTH] = cmdargs.smooth self[self.SMOOTHWIN] = cmdargs.swin self[self.SMOOTHORDER] = cmdargs.sorder self[self.THRESHOLD] = cmdargs.threshold self[self.NTHRESHOLD] = cmdargs.nthres self[self.DRAWSACCADES] = cmdargs.draw_saccades self[self.COMPARE] = cmdargs.compare self[self.ACTION] = cmdargs.action self[self.EXTRACT_LEFT] = cmdargs.extract_left self[self.EXTRACT_RIGHT] = cmdargs.extract_right self[self.STATUS] = "ready" def readConfig(self): """Determine whether iSpector was able to read/parse the config file.""" return self.read_config def config_dirs(self): """Obtain the map with common directories.""" return self.configfile[utils.configfile.DIR] def stimulus_dir(self): """Get the stimulus directory.""" return self.config_dirs()[utils.configfile.STIMDIR] def set_stimulus_dir(self, stimdir): """Set the stimulus directory.""" self.config_dirs()[utils.configfile.STIMDIR] = stimdir def output_dir(self): """Get the output directory.""" return self.config_dirs()[utils.configfile.OUTPUTDIR] def set_output_dir(self, outputdir): """Set the output directory.""" self.config_dirs()[utils.configfile.OUTPUTDIR] = outputdir def file_dir(self): """Get the directory in which the last file was opened.""" return self.config_dirs()[utils.configfile.FILEDIR] def set_file_dir(self, outputdir): """Set the output directory.""" self.config_dirs()[utils.configfile.OUTPUTDIR] = outputdir def set_files(self, files): """Set the files on which iSpector operates.""" self.configfile[utils.configfile.FILE_HIST] = list(files) def files(self): """Obtain the list of files on which iSpector operates.""" return self.configfile[utils.configfile.FILE_HIST] def _check_config(self): """Check the config file, to see whether the expected member are present. This function can fail if a necessary element of the expected data members isn't present. If it isn't, then this function should initialize it with a default value. @return This function returns true when there were no expected data members missing. """ missing = False if utils.configfile.DIR not in self.configfile: self.configfile[utils.configfile.DIR] = utils.configfile.ConfigDir() missing = True if utils.configfile.FILE_HIST not in self.configfile: self.configfile[utils.configfile.FILE_HIST] = list() missing= True return not missing
gpl-2.0
2,721,603,611,902,245,000
37.005871
107
0.592477
false
medworx/MedUX
medux/plugins/core/__init__.py
1
6307
# # Copyright (C) 2013 Christian A. Reiter # # This file is part of MedUX. # # MedUX is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # MedUX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with MedUX. If not, see <http://www.gnu.org/licenses/>. # import warnings from PyQt4.QtCore import QFileInfo, QDir from PyQt4.QtGui import QMainWindow from PyQt4.QtCore import QSettings, QObject from PyQt4.QtGui import QPrinter, QWidget, QApplication from medux.extensionsystem.components import Component, implements from medux.extensionsystem.interfaces import IPlugin from medux.plugins.core.constants import * from medux.plugins.core.icontext import IContext, Context from medux.plugins.core.interfaces import ICore, IOptionsPage from medux.plugins.core.itemid import ItemId from medux.plugins.core.mainwindow import MainWindow from medux.plugins.core.settings import SettingsDatabase from medux.plugins.core.icontext import Context from medux.plugins.core import ItemId, Constants from medux.plugins.core.command import Command from medux.plugins.core.icontext import IContext class Core(Component): """ The Core class provides central access to all actions, settings etc. that should be in shared state. It implements the Borg pattern, so you can just create a new Core() object and access it, you will always use the main applications settings. The initial instance is created by the core plugin during initialization, so you can assure all properties and class variables (including \l mainWindow) are accessible at any time. """ #implement the ICore interface implements(ICore) #FIXME This object should be in a gui module, not here, as Core has direct access to the GUI #FIXME: settingsDatabase Object should be Pythonic!!! #settingsDatabase = SettingsDatabase(QFileInfo(settings.fileName()).path(), "MedUX") mainWindow = None def __init__(self): """ Initialize Core """ super().__init__() def setMainWindow(self, mainWindow: QMainWindow): """sets the mainWindow property to""" self.mainWindow = mainWindow @classmethod def showNewItemDialog(cls, title: str, wizards, defaultLocation="", extraVariables=None): """ :type wizards: list of IWizard """ if not extraVariables: extraVariables = {} cls.mainWindow.showNewItemDialog(title, wizards, defaultLocation, extraVariables) @staticmethod def showOptionsDialog(groupId, pageId, parent=None): """ :type groupId: ItemId :type pageId: ItemId :type parent: QObject """ return Core.mainWindow.showOptionsDialog(groupId, pageId, parent) @classmethod def showWarningWithOptions(cls, title: str, text: str, details="", settingsCategoryId=None, settingsId=None, parent=None): """ :type settingsId: ItemId :type settingsCategoryId: ItemId """ if settingsCategoryId is None: settingsCategoryId = ItemId() if settingsId is None: settingsId = ItemId() return cls.mainWindow.showWarningWithOptions(title, text, details, settingsCategoryId, settingsId, parent) @classmethod def settings(cls, scope: QSettings.Scope) -> QSettings: return cls.mainWindow.settings(scope) @classmethod def settingsDatabase(cls) -> SettingsDatabase: return cls.mainWindow.settingsDatabase @classmethod def printer(cls) -> QPrinter: return cls.mainWindow.printer() @classmethod def userInterfaceLanguage(cls) -> str: #FIXME: this probably does NOT work. return QApplication.qtc_locale @classmethod def userResourcePath(cls) -> str: configDir = QFileInfo(cls.settings(QSettings.UserScope).fileName()).path() urp = configDir + "/medux" # TODO: make this more pythonic fi = QFileInfo(urp + "/") if not fi.exists(): dir = QDir() if not dir.mkpath(urp): warnings.warn("could not create {}".format(urp)) return urp @classmethod def versionString(cls): ideVersionDescription = " ({})".format(Constants.IDE_VERSION_DESCRIPTION_STR) return "MedUX {}{}".format(Constants.IDE_VERSION_LONG, ideVersionDescription) @classmethod def statusBar(cls): return cls.mainWindow.statusBar() @classmethod def raiseWindow(cls, widget: QWidget): if not widget: return window = widget.window() """ :type: QWidget""" if window is cls.mainWindow: cls.mainWindow.raiseWindow() else: window.raise_() window.activateWindow() @classmethod def updateAdditionalContexts(cls, remove: Context, add: Context): cls.mainWindow.updateAdditionalContexts(remove, add) @classmethod def addContextObject(cls, context: IContext): cls.mainWindow.addContextObject(context) @classmethod def removeContextObject(cls, context: IContext): cls.mainWindow.removeContextObject(context) @classmethod def saveSettings(cls): cls.saveSettingsRequested.emit() Core.settings(QSettings.SystemScope).sync() Core.settings(QSettings.UserScope).sync() class CorePlugin(Component): implements(IPlugin) def __init__(self, *args, **kwargs): """Creates a MainWindow""" super().__init__(*args, **kwargs) self.mainWindow = MainWindow() def initialize(self, **kwargs): pass def extensionsInitialized(self): pass def delayedInitialize(self): return False def aboutToShutdown(self): pass def shutDown(self): pass
gpl-3.0
9,101,523,460,304,377,000
30.853535
112
0.673062
false
MeGotsThis/BotGotsThis
tests/unittest/test_messagingqueue.py
1
29036
import itertools import math import unittest from bot.coroutine.connection import ConnectionHandler from bot.data import Channel, MessagingQueue from bot.data._messaging_queue import ChatMessage, WhisperMessage from datetime import datetime, timedelta from unittest.mock import call, patch class BaseTestMessagingQueue(unittest.TestCase): def setUp(self): self.queue = MessagingQueue() connection = ConnectionHandler('Twitch.TV', 'irc.twitch.tv', 6667) self.bgt_channel = Channel('botgotsthis', connection, -math.inf) self.bgt_channel.isMod = False self.mgt_channel = Channel('megotsthis', connection, -math.inf) self.mgt_channel.isMod = True self.mbt_channel = Channel('mebotsthis', connection, -math.inf) self.mbt_channel.isMod = False class TestMessagingQueue(BaseTestMessagingQueue): @patch('bot.config', autospec=True) def test_ismod_own_channel(self, mock_config): mock_config.botnick = 'botgotsthis' self.assertIs(MessagingQueue._isMod(self.bgt_channel), True) @patch('bot.config', autospec=True) def test_ismod_mod_in_channel(self, mock_config): mock_config.botnick = 'botgotsthis' self.assertIs(MessagingQueue._isMod(self.mgt_channel), True) @patch('bot.config', autospec=True) def test_ismod_mod_in_channel_and_owner(self, mock_config): mock_config.botnick = 'megotsthis' self.assertIs(MessagingQueue._isMod(self.mgt_channel), True) @patch('bot.config', autospec=True) def test_ismod_not_own_(self, mock_config): mock_config.botnick = 'botgotsthis' self.assertIs(MessagingQueue._isMod(self.mbt_channel), False) def test_sendChat_channel_none(self): self.assertRaises(TypeError, self.queue.sendChat, None, '') def test_sendChat_channel_int(self): self.assertRaises(TypeError, self.queue.sendChat, 1, '') def test_sendChat_none(self): self.assertRaises( TypeError, self.queue.sendChat, self.bgt_channel, None) def test_sendChat_int(self): self.assertRaises(TypeError, self.queue.sendChat, self.bgt_channel, 1) def test_sendChat_str(self): self.queue.sendChat(self.bgt_channel, 'Hello Kappa !') self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[1]), 1) self.assertIsInstance(self.queue._chatQueues[1][0], ChatMessage) self.assertIs(self.queue._chatQueues[1][0].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[1][0].message, 'Hello Kappa !') def test_sendChat_list_str(self): self.queue.sendChat(self.bgt_channel, ['Kappa ', 'Kappa Kappa ', 'Kappa Kappa Kappa ']) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[1]), 3) self.assertIs(self.queue._chatQueues[1][0].message, 'Kappa ') self.assertIs(self.queue._chatQueues[1][1].message, 'Kappa Kappa ') self.assertIs(self.queue._chatQueues[1][2].message, 'Kappa Kappa Kappa ') def test_sendChat_tuple_str(self): self.queue.sendChat(self.bgt_channel, ('a', 'b', 'c', 'd')) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[1]), 4) self.assertIs(self.queue._chatQueues[1][0].message, 'a') self.assertIs(self.queue._chatQueues[1][1].message, 'b') self.assertIs(self.queue._chatQueues[1][2].message, 'c') self.assertIs(self.queue._chatQueues[1][3].message, 'd') def test_sendChat_generator_str(self): self.queue.sendChat(self.bgt_channel, (str(i) for i in range(10))) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[1]), 10) self.assertEqual(self.queue._chatQueues[1][0].message, '0') self.assertEqual(self.queue._chatQueues[1][1].message, '1') self.assertEqual(self.queue._chatQueues[1][2].message, '2') self.assertEqual(self.queue._chatQueues[1][3].message, '3') self.assertEqual(self.queue._chatQueues[1][4].message, '4') self.assertEqual(self.queue._chatQueues[1][5].message, '5') self.assertEqual(self.queue._chatQueues[1][6].message, '6') self.assertEqual(self.queue._chatQueues[1][7].message, '7') self.assertEqual(self.queue._chatQueues[1][8].message, '8') self.assertEqual(self.queue._chatQueues[1][9].message, '9') def test_sendChat_generator_int(self): self.assertRaises( TypeError, self.queue.sendChat, self.bgt_channel, range(10)) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[1]) self.assertFalse(self.queue._chatQueues[2]) def test_sendChat_generator_str_int(self): self.assertRaises(TypeError, self.queue.sendChat, self.bgt_channel, itertools.chain((str(i) for i in range(10)), range(10))) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[1]) self.assertFalse(self.queue._chatQueues[2]) def test_sendChat_multiple_calls(self): self.queue.sendChat(self.bgt_channel, 'TBTacoLeft TBCheesePull TBTacoRight ') self.queue.sendChat(self.mgt_channel, '<3 :)') self.queue.sendChat(self.bgt_channel, 'duDudu duDudu duDudu ') self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[1]), 3) self.assertIs(self.queue._chatQueues[1][0].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[1][0].message, 'TBTacoLeft TBCheesePull TBTacoRight ') self.assertIs(self.queue._chatQueues[1][1].channel, self.mgt_channel) self.assertIs(self.queue._chatQueues[1][1].message, '<3 :)') self.assertIs(self.queue._chatQueues[1][2].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[1][2].message, 'duDudu duDudu duDudu ') def test_sendChat_highest_priority(self): self.queue.sendChat(self.bgt_channel, 'KevinTurtle', 0) self.assertFalse(self.queue._chatQueues[1]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[0]), 1) self.assertIsInstance(self.queue._chatQueues[0][0], ChatMessage) self.assertIs(self.queue._chatQueues[0][0].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[0][0].message, 'KevinTurtle') def test_sendChat_lowest_priority(self): self.queue.sendChat(self.bgt_channel, 'SwiftRage', -1) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[1]) self.assertEqual(len(self.queue._chatQueues[2]), 1) self.assertIsInstance(self.queue._chatQueues[2][0], ChatMessage) self.assertIs(self.queue._chatQueues[2][0].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[2][0].message, 'SwiftRage') def test_sendChat_priority_out_of_range_positive(self): self.assertRaises(ValueError, self.queue.sendChat, self.bgt_channel, 'SwiftRage', len(self.queue._chatQueues)) self.assertFalse(any(self.queue._chatQueues)) def test_sendChat_priority_out_of_range_negative(self): self.assertRaises(ValueError, self.queue.sendChat, self.bgt_channel, 'SwiftRage', -len(self.queue._chatQueues) - 1) self.assertFalse(any(self.queue._chatQueues)) def test_sendChat_priority_multiple_calls(self): self.queue.sendChat(self.bgt_channel, ':)', 1) self.queue.sendChat(self.mgt_channel, ';)', 1) self.queue.sendChat(self.bgt_channel, ':/', 0) self.queue.sendChat(self.mgt_channel, ':(', 2) self.assertEqual(len(self.queue._chatQueues[0]), 1) self.assertEqual(len(self.queue._chatQueues[1]), 2) self.assertEqual(len(self.queue._chatQueues[2]), 1) self.assertIs(self.queue._chatQueues[0][0].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[0][0].message, ':/') self.assertIs(self.queue._chatQueues[1][0].channel, self.bgt_channel) self.assertIs(self.queue._chatQueues[1][0].message, ':)') self.assertIs(self.queue._chatQueues[1][1].channel, self.mgt_channel) self.assertIs(self.queue._chatQueues[1][1].message, ';)') self.assertIs(self.queue._chatQueues[2][0].channel, self.mgt_channel) self.assertIs(self.queue._chatQueues[2][0].message, ':(') def test_sendChat_disallowed_commands(self): self.queue.sendChat(self.bgt_channel, '.disconnect') self.queue.sendChat(self.bgt_channel, '/disconnect') self.queue.sendChat(self.bgt_channel, '.ignore megotsthis') self.queue.sendChat(self.bgt_channel, '/ignore botgotsthis') self.assertFalse(any(self.queue._chatQueues)) def test_sendChat_allow_disallowed_commands(self): self.queue.sendChat(self.bgt_channel, '.disconnect', bypass=True) self.queue.sendChat(self.bgt_channel, '/disconnect', bypass=True) self.queue.sendChat(self.bgt_channel, '.ignore megotsthis', bypass=True) self.queue.sendChat(self.bgt_channel, '/ignore botgotsthis', bypass=True) self.assertFalse(self.queue._chatQueues[0]) self.assertFalse(self.queue._chatQueues[2]) self.assertEqual(len(self.queue._chatQueues[1]), 4) @patch.object(MessagingQueue, 'sendWhisper', autospec=True) def test_sendChat_whisper(self, mock_sendWhisper): self.queue.sendChat(self.bgt_channel, '.w botgotsthis Kappa') self.assertFalse(any(self.queue._chatQueues)) mock_sendWhisper.assert_called_once_with( self.queue, 'botgotsthis', ['Kappa']) @patch.object(MessagingQueue, 'sendWhisper', autospec=True) def test_sendChat_whispers(self, mock_sendWhisper): self.queue.sendChat(self.bgt_channel, ['.w botgotsthis Kappa', '.w megotsthis KappaPride', '.w mebotsthis KappaClaus']) self.assertFalse(any(self.queue._chatQueues)) mock_sendWhisper.assert_has_calls( [call(self.queue, 'botgotsthis', ['Kappa']), call(self.queue, 'megotsthis', ['KappaPride']), call(self.queue, 'mebotsthis', ['KappaClaus'])]) @patch.object(MessagingQueue, 'sendWhisper', autospec=True) def test_sendChat_whispers_multi(self, mock_sendWhisper): self.queue.sendChat(self.bgt_channel, ['.w botgotsthis Kappa', '.w megotsthis KappaPride', '.w mebotsthis KappaClaus', '.w botgotsthis FrankerZ', '.w megotsthis RalpherZ', '.w mebotsthis ChefFrank'], 0) self.assertFalse(any(self.queue._chatQueues)) mock_sendWhisper.assert_has_calls( [call(self.queue, 'botgotsthis', ['Kappa', 'FrankerZ']), call(self.queue, 'megotsthis', ['KappaPride', 'RalpherZ']), call(self.queue, 'mebotsthis', ['KappaClaus', 'ChefFrank'])]) def test_sendWhisper_None(self): self.assertRaises(TypeError, self.queue.sendWhisper, None, 'PogChamp') def test_sendWhisper_str_None(self): self.assertRaises(TypeError, self.queue.sendWhisper, 'botgotsthis', None) def test_sendWhisper_str(self): self.queue.sendWhisper('botgotsthis', 'PogChamp') self.assertEqual(len(self.queue._whisperQueue), 1) self.assertEqual(self.queue._whisperQueue[0].nick, 'botgotsthis') self.assertEqual(self.queue._whisperQueue[0].message, 'PogChamp') def test_sendWhisper_list(self): self.queue.sendWhisper('botgotsthis', ['PogChamp', 'KevinTurtle']) self.assertEqual(len(self.queue._whisperQueue), 2) self.assertEqual(self.queue._whisperQueue[0].nick, 'botgotsthis') self.assertEqual(self.queue._whisperQueue[0].message, 'PogChamp') self.assertEqual(self.queue._whisperQueue[1].nick, 'botgotsthis') self.assertEqual(self.queue._whisperQueue[1].message, 'KevinTurtle') def test_sendWhisper_tuple(self): self.queue.sendWhisper('botgotsthis', ('PogChamp', 'KevinTurtle')) self.assertEqual(len(self.queue._whisperQueue), 2) self.assertEqual(self.queue._whisperQueue[0].message, 'PogChamp') self.assertEqual(self.queue._whisperQueue[1].message, 'KevinTurtle') def test_sendWhisper_generator(self): self.queue.sendWhisper('botgotsthis', (str(i) for i in range(3))) self.assertEqual(len(self.queue._whisperQueue), 3) self.assertEqual(self.queue._whisperQueue[0].message, '0') self.assertEqual(self.queue._whisperQueue[1].message, '1') self.assertEqual(self.queue._whisperQueue[2].message, '2') def test_sendWhisper_multiple_calls(self): self.queue.sendWhisper('botgotsthis', 'PraiseIt') self.queue.sendWhisper('megotsthis', 'bleedPurple') self.assertEqual(len(self.queue._whisperQueue), 2) self.assertEqual(self.queue._whisperQueue[0].nick, 'botgotsthis') self.assertEqual(self.queue._whisperQueue[0].message, 'PraiseIt') self.assertEqual(self.queue._whisperQueue[1].nick, 'megotsthis') self.assertEqual(self.queue._whisperQueue[1].message, 'bleedPurple') @patch('bot.utils.now', autospec=True) @patch('bot.config', autospec=True) def test_cleanOldTimestamps(self, mock_config, mock_now): # Setup now = datetime(2000, 1, 1, 0, 0, 0) mock_config.messageSpan = 10 mock_config.whiperSpan = 10 mock_now.return_value = now self.queue._chatSent.extend(now + i * timedelta(seconds=1) for i in range(-20, 11)) self.queue._whisperSent.extend(now + i * timedelta(seconds=1.5) for i in range(-10, 11)) # Call self.queue.cleanOldTimestamps() # Check self.assertCountEqual( self.queue._chatSent, [now + i * timedelta(seconds=1) for i in range(-10, 11)]) self.assertCountEqual( self.queue._whisperSent, [now + i * timedelta(seconds=1.5) for i in range(-6, 11)]) def test_clearChat_empty(self): self.assertFalse(any(self.queue._chatQueues)) self.queue.clearChat(self.bgt_channel) self.assertFalse(any(self.queue._chatQueues)) def test_clearChat_single(self): self.queue._chatQueues[1].append( ChatMessage(self.bgt_channel, 'Kappa')) self.queue.clearChat(self.bgt_channel) self.assertFalse(any(self.queue._chatQueues)) def test_clearChat_multiple(self): self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, '0')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, '1')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, '2')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, '3')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, '4')) self.queue.clearChat(self.bgt_channel) self.assertFalse(any(self.queue._chatQueues)) def test_clearChat_mixing(self): self.queue._chatQueues[0].append(ChatMessage(self.bgt_channel, 'a')) self.queue._chatQueues[0].append(ChatMessage(self.mgt_channel, 'b')) self.queue._chatQueues[0].append(ChatMessage(self.mbt_channel, 'c')) self.queue._chatQueues[0].append(ChatMessage(self.bgt_channel, 'd')) self.queue._chatQueues[0].append(ChatMessage(self.bgt_channel, 'e')) self.queue._chatQueues[1].append(ChatMessage(self.mgt_channel, 'f')) self.queue._chatQueues[1].append(ChatMessage(self.mbt_channel, 'g')) self.queue._chatQueues[1].append(ChatMessage(self.bgt_channel, 'h')) self.queue._chatQueues[1].append(ChatMessage(self.bgt_channel, 'i')) self.queue._chatQueues[1].append(ChatMessage(self.mbt_channel, 'j')) self.queue._chatQueues[1].append(ChatMessage(self.bgt_channel, 'k')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'l')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'm')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'n')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'o')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'p')) self.queue.clearChat(self.bgt_channel) self.assertEqual(len(self.queue._chatQueues[0]), 2) self.assertEqual(len(self.queue._chatQueues[1]), 3) self.assertFalse(self.queue._chatQueues[2]) self.assertIs(self.queue._chatQueues[0][0].channel, self.mgt_channel) self.assertIs(self.queue._chatQueues[0][1].channel, self.mbt_channel) self.assertIs(self.queue._chatQueues[1][0].channel, self.mgt_channel) self.assertIs(self.queue._chatQueues[1][1].channel, self.mbt_channel) self.assertIs(self.queue._chatQueues[1][2].channel, self.mbt_channel) def test_clearAllChat(self): self.queue._chatQueues[0].append(ChatMessage(self.bgt_channel, 'a')) self.queue._chatQueues[0].append(ChatMessage(self.mgt_channel, 'b')) self.queue._chatQueues[0].append(ChatMessage(self.mbt_channel, 'c')) self.queue._chatQueues[0].append(ChatMessage(self.bgt_channel, 'd')) self.queue._chatQueues[0].append(ChatMessage(self.bgt_channel, 'e')) self.queue._chatQueues[1].append(ChatMessage(self.mgt_channel, 'f')) self.queue._chatQueues[1].append(ChatMessage(self.mbt_channel, 'g')) self.queue._chatQueues[1].append(ChatMessage(self.bgt_channel, 'h')) self.queue._chatQueues[1].append(ChatMessage(self.bgt_channel, 'i')) self.queue._chatQueues[1].append(ChatMessage(self.mbt_channel, 'j')) self.queue._chatQueues[1].append(ChatMessage(self.bgt_channel, 'k')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'l')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'm')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'n')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'o')) self.queue._chatQueues[2].append(ChatMessage(self.bgt_channel, 'p')) self.queue.clearAllChat() self.assertFalse(any(self.queue._chatQueues)) @patch('bot.utils.now', autospec=True) @patch.object(MessagingQueue, '_getChatMessage', autospec=True) def test_popChat_None(self, mock_getChatMessage, mock_now): mock_getChatMessage.return_value = None now = datetime(2000, 1, 1, 0, 0, 0) mock_now.return_value = now self.assertIsNone(self.queue.popChat()) self.assertFalse(self.queue._chatSent) @patch('bot.utils.now', autospec=True) @patch.object(MessagingQueue, '_getChatMessage', autospec=True) def test_popChat(self, mock_getChatMessage, mock_now): msg = ChatMessage(self.bgt_channel, 'TBTacoLeft TBCheesePull TBTacoRight') mock_getChatMessage.return_value = msg now = datetime(2000, 1, 1, 0, 0, 0) mock_now.return_value = now self.assertIs(self.queue.popChat(), msg) self.assertEqual(self.queue._chatSent, [now]) @patch('bot.utils.now', autospec=True) @patch.object(MessagingQueue, '_getChatMessage', autospec=True) def test_popWhisper_None(self, mock_getChatMessage, mock_now): mock_getChatMessage.return_value = None now = datetime(2000, 1, 1, 0, 0, 0) mock_now.return_value = now self.assertIsNone(self.queue.popWhisper()) self.assertFalse(self.queue._whisperSent) @patch('bot.config', autospec=True) @patch('bot.utils.now', autospec=True) def test_popWhisper(self, mock_now, mock_config): mock_config.whiperLimit = 5 msg = WhisperMessage('botgotsthis', 'TBTacoLeft TBCheesePull TBTacoRight') self.queue._whisperQueue.append(msg) now = datetime(2000, 1, 1, 0, 0, 0) mock_now.return_value = now self.assertIs(self.queue.popWhisper(), msg) self.assertEqual(self.queue._whisperSent, [now]) @patch('bot.config', autospec=True) @patch('bot.utils.now', autospec=True) def test_popWhisper_full(self, mock_now, mock_config): mock_config.whiperLimit = 5 msg = WhisperMessage('botgotsthis', 'TBTacoLeft TBCheesePull TBTacoRight') self.queue._whisperQueue.append(msg) now = datetime(2000, 1, 1, 0, 0, 0) mock_now.return_value = now self.queue._whisperSent.extend(now for _ in range(5)) self.assertIsNone(self.queue.popWhisper()) self.assertEqual(self.queue._whisperSent, [now for _ in range(5)]) class TestMessagingQueueGetChatMessage(BaseTestMessagingQueue): def setUp(self): super().setUp() patcher = patch('bot.config', autospec=True) self.addCleanup(patcher.stop) self.mock_config = patcher.start() self.mock_config.botnick = 'botgotsthis' self.mock_config.modLimit = 5 self.mock_config.modSpamLimit = 5 self.mock_config.publicLimit = 2 self.mock_config.publicDelay = 1 self.mock_config.messageSpan = 10000 patcher = patch('bot.utils.now', autospec=True) self.addCleanup(patcher.stop) self.mock_now = patcher.start() self.now = datetime(2000, 1, 1, 0, 0, 0) self.mock_now.return_value = self.now def test_empty(self): self.assertIsNone(self.queue._getChatMessage(self.now)) self.assertFalse(self.queue._lowQueueRecent) self.assertFalse(self.queue._publicTime) def test_full(self): self.queue._chatSent.extend(self.now for i in range(5)) self.queue._whisperSent.extend(self.now for i in range(5)) self.queue._chatQueues[0].append( ChatMessage(self.bgt_channel, 'PogChamp')) self.queue._chatQueues[1].append( ChatMessage(self.mgt_channel, 'Kreygasm')) self.queue._chatQueues[2].append( ChatMessage(self.mbt_channel, 'Kappa')) self.assertIsNone(self.queue._getChatMessage(self.now)) self.assertFalse(self.queue._lowQueueRecent) self.assertFalse(self.queue._publicTime) def test_single_mod(self): msg = ChatMessage(self.mgt_channel, 'Kreygasm') self.queue._chatQueues[0].append(msg) self.assertIs(self.queue._getChatMessage(self.now), msg) self.assertFalse(self.queue._lowQueueRecent) self.assertEqual(self.queue._publicTime[msg.channel.channel], self.queue._publicTime.default_factory()) self.assertIsNone(self.queue._getChatMessage(self.now)) def test_single_notmod(self): msg = ChatMessage(self.mbt_channel, 'BionicBunion') self.queue._chatQueues[0].append(msg) self.assertIs(self.queue._getChatMessage(self.now), msg) self.assertFalse(self.queue._lowQueueRecent) self.assertEqual(self.queue._publicTime[msg.channel.channel], self.now) def test_double(self): msg1 = ChatMessage(self.bgt_channel, 'PJSalt') msg2 = ChatMessage(self.mgt_channel, 'PJSugar') self.queue._chatQueues[0].append(msg1) self.queue._chatQueues[0].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) self.assertIs(self.queue._getChatMessage(self.now), msg2) def test_lowest_priority(self): msg = ChatMessage(self.bgt_channel, 'ResidentSleeper') self.queue._chatQueues[-1].append(msg) self.assertIs(self.queue._getChatMessage(self.now), msg) self.assertEqual(self.queue._publicTime[msg.channel.channel], self.queue._publicTime.default_factory()) self.assertIn(msg.channel.channel, self.queue._lowQueueRecent) def test_lowest_priority_nonmod(self): msg = ChatMessage(self.mbt_channel, 'OneHand') self.queue._chatQueues[-1].append(msg) self.assertIs(self.queue._getChatMessage(self.now), msg) self.assertFalse(self.queue._lowQueueRecent) self.assertEqual(self.queue._publicTime[msg.channel.channel], self.now) def test_lowest_priority_multiple(self): msgs1 = [ChatMessage(self.bgt_channel, 'KappaRoss') for _ in range(2)] msgs2 = [ChatMessage(self.mgt_channel, 'KappaPride') for _ in range(2)] self.queue._chatQueues[-1].append(msgs1[0]) self.queue._chatQueues[-1].extend(msgs2) self.queue._chatQueues[-1].append(msgs1[1]) self.assertIs(self.queue._getChatMessage(self.now), msgs1[0]) self.assertEqual(list(self.queue._lowQueueRecent), [self.bgt_channel.channel]) self.assertIs(self.queue._getChatMessage(self.now), msgs2[0]) self.assertEqual(list(self.queue._lowQueueRecent), [self.bgt_channel.channel, self.mgt_channel.channel]) self.assertIs(self.queue._getChatMessage(self.now), msgs1[1]) self.assertEqual(list(self.queue._lowQueueRecent), [self.mgt_channel.channel, self.bgt_channel.channel]) self.assertIs(self.queue._getChatMessage(self.now), msgs2[1]) self.assertEqual(list(self.queue._lowQueueRecent), [self.bgt_channel.channel, self.mgt_channel.channel]) def test_prioirity_top_two_1(self): msg1 = ChatMessage(self.bgt_channel, 'CorgiDerp') msg2 = ChatMessage(self.bgt_channel, 'OhMyDog') self.queue._chatQueues[0].append(msg1) self.queue._chatQueues[1].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) self.assertIs(self.queue._getChatMessage(self.now), msg2) def test_prioirity_top_two_2(self): msg1 = ChatMessage(self.bgt_channel, 'BudBlast') msg2 = ChatMessage(self.bgt_channel, 'BudStar') self.queue._chatQueues[0].append(msg1) self.queue._chatQueues[2].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) self.assertIs(self.queue._getChatMessage(self.now), msg2) def test_prioirity_top_two_3(self): msg1 = ChatMessage(self.bgt_channel, 'Kippa') msg2 = ChatMessage(self.bgt_channel, 'Keepo') self.queue._chatQueues[1].append(msg1) self.queue._chatQueues[2].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) self.assertIs(self.queue._getChatMessage(self.now), msg2) def test_nonmod_mod(self): msg1 = ChatMessage(self.bgt_channel, 'KAPOW') msg2 = ChatMessage(self.mbt_channel, 'FunRun') self.queue._chatQueues[1].append(msg1) self.queue._chatQueues[1].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) def test_prioirity_nonmod_mod(self): msg1 = ChatMessage(self.bgt_channel, 'KAPOW') msg2 = ChatMessage(self.mbt_channel, 'FunRun') self.queue._chatQueues[0].append(msg1) self.queue._chatQueues[1].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) def test_nonmod_semifull(self): self.queue._chatSent.extend(self.now for i in range(1)) msg1 = ChatMessage(self.bgt_channel, 'KAPOW') msg2 = ChatMessage(self.mbt_channel, 'FunRun') self.queue._chatQueues[1].append(msg1) self.queue._chatQueues[1].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) def test_nonmod_full(self): self.queue._chatSent.extend(self.now for i in range(2)) msg1 = ChatMessage(self.bgt_channel, 'KAPOW') msg2 = ChatMessage(self.mbt_channel, 'FunRun') self.queue._chatQueues[1].append(msg1) self.queue._chatQueues[1].append(msg2) self.assertIs(self.queue._getChatMessage(self.now), msg1) self.assertIsNone(self.queue._getChatMessage(self.now)) self.assertIs(self.queue._chatQueues[1][0], msg2)
gpl-3.0
1,793,848,831,706,144,500
49.851138
79
0.647403
false
linsalrob/CrAPy
pairwise_correlations.py
1
1848
# coding: utf-8 """ generate all pairwise correlations and print a list of contig1, contig2, correlation score """ import os,sys from scipy.stats.stats import pearsonr import numpy as np from multiprocessing import Pool, Lock NUM_THREADS=6 def pairwise_correlation(data, ids, i, j): try: pearson, p = pearsonr(data[ids[i]], data[ids[j]]) if pearson == np.nan: pearson = 0 p = 0 except Exception as e: sys.stderr.write("There was an error when i: " + str(i) + " and j " + str(j) + " messsage: " + str(e) + "\n") return [ids[i], ids[j], pearson, p] if __name__ == '__main__': #data_file = "test_data/output.contigs2reads.contigs.100.txt" try: data_file = sys.argv[1] noc = float(sys.argv[2]) except: sys.exit(sys.argv[0] + " <data file e.g. test_data/output.contigs2reads.contigs.100.txt> <minimum number of occurences (try 3)>") if noc < 3: sys.stderr.write("Warning: you choose reads that have at least " + str(noc) + " occurrences, but this might be too low. Continuing anyway\n") data = {} with open(data_file, 'r') as fin: for l in fin: p=l.strip().split("\t") did = p.pop(0) p = map(float, p) # only keep those that have > 3 zeros if len(p) - p.count(0) <= noc: continue data[did] = p # now we need to find all pairwise correlation scores ids = data.keys() # initial cluster pool = Pool(NUM_THREADS) # measure all correlations results = [pool.apply_async(pairwise_correlation, [data, ids, i, j]) for i in range(len(ids)) for j in range(i+1, len(ids))] results = [p.get() for p in results] for r in results: print("\t".join(map(str, r)))
mit
4,348,821,302,040,000,500
25.782609
149
0.569805
false
Ophiuchus1312/enigma2-master
lib/python/Screens/EpgSelection.py
1
57966
from Screen import Screen from Screens.HelpMenu import HelpableScreen from Components.About import about from Components.ActionMap import HelpableActionMap, HelpableNumberActionMap from Components.Button import Button from Components.config import config, configfile, ConfigClock from Components.EpgList import EPGList, EPGBouquetList, TimelineText, EPG_TYPE_SINGLE, EPG_TYPE_SIMILAR, EPG_TYPE_MULTI, EPG_TYPE_ENHANCED, EPG_TYPE_INFOBAR, EPG_TYPE_INFOBARGRAPH, EPG_TYPE_GRAPH, MAX_TIMELINES from Components.Label import Label from Components.Pixmap import Pixmap from Components.Sources.ServiceEvent import ServiceEvent from Components.Sources.Event import Event from Components.Sources.StaticText import StaticText from Components.UsageConfig import preferredTimerPath from Screens.TimerEdit import TimerSanityConflict from Screens.EventView import EventViewEPGSelect, EventViewSimple from Screens.ChoiceBox import ChoiceBox from Screens.MessageBox import MessageBox from Screens.PictureInPicture import PictureInPicture from Screens.Setup import Setup from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN from TimeDateInput import TimeDateInput from enigma import eServiceReference, eTimer, eServiceCenter, ePoint from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT from TimerEntry import TimerEntry, InstantRecordTimerEntry from ServiceReference import ServiceReference from time import localtime, time, strftime, mktime mepg_config_initialized = False # PiPServiceRelation installed? try: from Plugins.SystemPlugins.PiPServiceRelation.plugin import getRelationDict plugin_PiPServiceRelation_installed = True except: plugin_PiPServiceRelation_installed = False class EPGSelection(Screen, HelpableScreen): EMPTY = 0 ADD_TIMER = 1 REMOVE_TIMER = 2 ZAP = 1 def __init__(self, session, service = None, zapFunc = None, eventid = None, bouquetChangeCB=None, serviceChangeCB = None, EPGtype = None, StartBouquet = None, StartRef = None, bouquets = None): Screen.__init__(self, session) HelpableScreen.__init__(self) self.zapFunc = zapFunc self.serviceChangeCB = serviceChangeCB self.bouquets = bouquets graphic = False if EPGtype == 'single': self.type = EPG_TYPE_SINGLE elif EPGtype == 'infobar': self.type = EPG_TYPE_INFOBAR elif EPGtype == 'enhanced': self.type = EPG_TYPE_ENHANCED elif EPGtype == 'graph': self.type = EPG_TYPE_GRAPH if config.epgselection.graph_type_mode.getValue() == "graphics": graphic = True elif EPGtype == 'infobargraph': self.type = EPG_TYPE_INFOBARGRAPH if config.epgselection.infobar_type_mode.getValue() == "graphics": graphic = True elif EPGtype == 'multi': self.type = EPG_TYPE_MULTI else: self.type = EPG_TYPE_SIMILAR if not self.type == EPG_TYPE_SINGLE: self.StartBouquet = StartBouquet self.StartRef = StartRef self.servicelist = None self.longbuttonpressed = False self.ChoiceBoxDialog = None self.ask_time = -1 self.closeRecursive = False self.eventviewDialog = None self.eventviewWasShown = False self.currch = None self.session.pipshown = False if plugin_PiPServiceRelation_installed: self.pipServiceRelation = getRelationDict() else: self.pipServiceRelation = {} self.zapnumberstarted = False self.NumberZapTimer = eTimer() self.NumberZapTimer.callback.append(self.dozumberzap) self.NumberZapField = None self.CurrBouquet = None self.CurrService = None self["number"] = Label() self["number"].hide() self['Service'] = ServiceEvent() self['Event'] = Event() self['lab1'] = Label(_('Please wait while gathering data...')) self.key_green_choice = self.EMPTY self['key_red'] = Button(_('IMDb Search')) self['key_green'] = Button(_('Add Timer')) self['key_yellow'] = Button(_('EPG Search')) self['key_blue'] = Button(_('Add AutoTimer')) self['dialogactions'] = HelpableActionMap(self, 'WizardActions', { 'back': (self.closeChoiceBoxDialog, _('Close dialog')), }, -1) self['dialogactions'].csel = self self["dialogactions"].setEnabled(False) self['okactions'] = HelpableActionMap(self, 'OkCancelActions', { 'cancel': (self.closeScreen, _('Exit EPG')), 'OK': (self.OK, _('Zap to channel (setup in menu)')), 'OKLong': (self.OKLong, _('Zap to channel and close (setup in menu)')) }, -1) self['okactions'].csel = self self['colouractions'] = HelpableActionMap(self, 'ColorActions', { 'red': (self.redButtonPressed, _('IMDB search for current event')), 'redlong': (self.redlongButtonPressed, _('Sort EPG List')), 'green': (self.greenButtonPressed, _('Add/Remove timer for current event')), 'yellow': (self.yellowButtonPressed, _('Search for similar events')), 'greenlong': (self.showTimerList, _('Show Timer List')), 'blue': (self.blueButtonPressed, _('Add a auto timer for current event')), 'bluelong': (self.bluelongButtonPressed, _('Show AutoTimer List')) }, -1) self['colouractions'].csel = self self['recordingactions'] = HelpableActionMap(self, 'InfobarInstantRecord', { 'ShortRecord': (self.recButtonPressed, _('Add a record timer for current event')), 'LongRecord': (self.reclongButtonPressed, _('Add a zap timer for current event')) }, -1) self['recordingactions'].csel = self if self.type == EPG_TYPE_SIMILAR: self.currentService = service self.eventid = eventid self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions', { 'info': (self.Info, _('Show detailed event info')), 'infolong': (self.InfoLong, _('Show single epg for current channel')), 'menu': (self.createSetup, _('Setup menu')) }, -1) self['epgactions'].csel = self elif self.type == EPG_TYPE_SINGLE: self.currentService = ServiceReference(service) self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions', { 'info': (self.Info, _('Show detailed event info')), 'epg': (self.Info, _('Show detailed event info')), 'menu': (self.createSetup, _('Setup menu')) }, -1) self['epgactions'].csel = self self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.prevPage, _('Move up a page')), 'right': (self.nextPage, _('Move down a page')), 'up': (self.moveUp, _('Goto previous channel')), 'down': (self.moveDown, _('Goto next channel')) }, -1) self['epgcursoractions'].csel = self elif self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_ENHANCED: if self.type == EPG_TYPE_INFOBAR: self.skinName = 'QuickEPG' self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions', { 'nextBouquet': (self.nextBouquet, _('Goto next bouquet')), 'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')), 'nextService': (self.nextPage, _('Move down a page')), 'prevService': (self.prevPage, _('Move up a page')), 'input_date_time': (self.enterDateTime, _('Goto specific data/time')), 'info': (self.Info, _('Show detailed event info')), 'infolong': (self.InfoLong, _('Show single epg for current channel')), 'menu': (self.createSetup, _('Setup menu')) }, -1) self['epgactions'].csel = self self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.prevService, _('Goto previous channel')), 'right': (self.nextService, _('Goto next channel')), 'up': (self.moveUp, _('Goto previous channel')), 'down': (self.moveDown, _('Goto next channel')) }, -1) self['epgcursoractions'].csel = self elif self.type == EPG_TYPE_ENHANCED: self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions', { 'nextBouquet': (self.nextBouquet, _('Goto next bouquet')), 'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')), 'nextService': (self.nextService, _('Goto next channel')), 'prevService': (self.prevService, _('Goto previous channel')), 'input_date_time': (self.enterDateTime, _('Goto specific data/time')), 'info': (self.Info, _('Show detailed event info')), 'infolong': (self.InfoLong, _('Show single epg for current channel')), 'menu': (self.createSetup, _('Setup menu')) }, -1) self['epgactions'].csel = self self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.prevPage, _('Move up a page')), 'right': (self.nextPage, _('Move down a page')), 'up': (self.moveUp, _('Goto previous channel')), 'down': (self.moveDown, _('Goto next channel')) }, -1) self['epgcursoractions'].csel = self self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions', { '0': (self.keyNumberGlobal, _('enter number to jump to channel.')), '1': (self.keyNumberGlobal, _('enter number to jump to channel.')), '2': (self.keyNumberGlobal, _('enter number to jump to channel.')), '3': (self.keyNumberGlobal, _('enter number to jump to channel.')), '4': (self.keyNumberGlobal, _('enter number to jump to channel.')), '5': (self.keyNumberGlobal, _('enter number to jump to channel.')), '6': (self.keyNumberGlobal, _('enter number to jump to channel.')), '7': (self.keyNumberGlobal, _('enter number to jump to channel.')), '8': (self.keyNumberGlobal, _('enter number to jump to channel.')), '9': (self.keyNumberGlobal, _('enter number to jump to channel.')) }, -1) self['input_actions'].csel = self self.list = [] self.servicelist = service self.currentService = self.session.nav.getCurrentlyPlayingServiceOrGroup() elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: if self.type == EPG_TYPE_GRAPH: if not config.epgselection.graph_pig.getValue(): self.skinName = 'GraphicalEPG' else: self.skinName = 'GraphicalEPGPIG' elif self.type == EPG_TYPE_INFOBARGRAPH: self.skinName = 'GraphicalInfoBarEPG' now = time() - int(config.epg.histminutes.getValue()) * 60 if self.type == EPG_TYPE_GRAPH: self.ask_time = self.ask_time = now - now % (int(config.epgselection.graph_roundto.getValue()) * 60) elif self.type == EPG_TYPE_INFOBARGRAPH: self.ask_time = self.ask_time = now - now % (int(config.epgselection.infobar_roundto.getValue()) * 60) self.closeRecursive = False self.bouquetlist_active = False self['bouquetlist'] = EPGBouquetList(graphic=graphic) self['bouquetlist'].hide() self['timeline_text'] = TimelineText(type=self.type,graphic=graphic) self['Event'] = Event() self['primetime'] = Label(_('PRIMETIME')) self['change_bouquet'] = Label(_('CHANGE BOUQUET')) self['jump'] = Label(_('JUMP 24 HOURS')) self['page'] = Label(_('PAGE UP/DOWN')) self.time_lines = [] for x in range(0, MAX_TIMELINES): pm = Pixmap() self.time_lines.append(pm) self['timeline%d' % x] = pm self['timeline_now'] = Pixmap() self.updateTimelineTimer = eTimer() self.updateTimelineTimer.callback.append(self.moveTimeLines) self.updateTimelineTimer.start(60000) self['bouquetokactions'] = HelpableActionMap(self, 'OkCancelActions', { 'cancel': (self.BouquetlistHide, _('Close bouquet list.')), 'OK': (self.BouquetOK, _('Chnage to bouquet')), }, -1) self['bouquetokactions'].csel = self self["bouquetokactions"].setEnabled(False) self['bouquetcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.leftPressed, _('Goto previous event')), 'right': (self.rightPressed, _('Goto next event')), 'up': (self.moveBouquetUp, _('Goto previous channel')), 'down': (self.moveBouquetDown, _('Goto next channel')) }, -1) self['bouquetcursoractions'].csel = self self["bouquetcursoractions"].setEnabled(False) self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.leftPressed, _('Goto previous event')), 'right': (self.rightPressed, _('Goto next event')), 'up': (self.moveUp, _('Goto previous channel')), 'down': (self.moveDown, _('Goto next channel')) }, -1) self['epgcursoractions'].csel = self self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions', { 'nextService': (self.nextService, _('Jump forward 24 hours')), 'prevService': (self.prevService, _('Jump back 24 hours')), 'nextBouquet': (self.nextBouquet, _('Goto next bouquet')), 'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')), 'input_date_time': (self.enterDateTime, _('Goto specific data/time')), 'info': (self.Info, _('Show detailed event info')), 'infolong': (self.InfoLong, _('Show single epg for current channel')), 'tv': (self.Bouquetlist, _('Toggle between bouquet/epg lists')), 'tvlong': (self.togglePIG, _('Toggle Picture In Graphics')), 'menu': (self.createSetup, _('Setup menu')) }, -1) self['epgactions'].csel = self self['input_actions'] = HelpableNumberActionMap(self, 'NumberActions', { '1': (self.keyNumberGlobal, _('Reduce time scale')), '2': (self.keyNumberGlobal, _('Page up')), '3': (self.keyNumberGlobal, _('Increase time scale')), '4': (self.keyNumberGlobal, _('page left')), '5': (self.keyNumberGlobal, _('Jump to current time')), '6': (self.keyNumberGlobal, _('Page right')), '7': (self.keyNumberGlobal, _('No of items switch (increase or reduced)')), '8': (self.keyNumberGlobal, _('Page down')), '9': (self.keyNumberGlobal, _('Jump to prime time')), '0': (self.keyNumberGlobal, _('Move to home of list')) }, -1) self['input_actions'].csel = self elif self.type == EPG_TYPE_MULTI: self.skinName = 'EPGSelectionMulti' self['bouquetlist'] = EPGBouquetList(graphic=graphic) self['bouquetlist'].hide() self['now_button'] = Pixmap() self['next_button'] = Pixmap() self['more_button'] = Pixmap() self['now_button_sel'] = Pixmap() self['next_button_sel'] = Pixmap() self['more_button_sel'] = Pixmap() self['now_text'] = Label() self['next_text'] = Label() self['more_text'] = Label() self['date'] = Label() self.bouquetlist_active = False self['bouquetokactions'] = HelpableActionMap(self, 'OkCancelActions', { 'OK': (self.BouquetOK, _('Chnage to bouquet')), }, -1) self['bouquetokactions'].csel = self self["bouquetokactions"].setEnabled(False) self['bouquetcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.leftPressed, _('Goto previous event')), 'right': (self.rightPressed, _('Goto next event')), 'up': (self.moveBouquetUp, _('Goto previous channel')), 'down': (self.moveBouquetDown, _('Goto next channel')) }, -1) self['bouquetcursoractions'].csel = self self['bouquetcursoractions'].setEnabled(False) self['epgcursoractions'] = HelpableActionMap(self, 'DirectionActions', { 'left': (self.leftPressed, _('Goto previous event')), 'right': (self.rightPressed, _('Goto next event')), 'up': (self.moveUp, _('Goto previous channel')), 'down': (self.moveDown, _('Goto next channel')) }, -1) self['epgcursoractions'].csel = self self['epgactions'] = HelpableActionMap(self, 'EPGSelectActions', { 'nextService': (self.nextPage, _('Move down a page')), 'prevService': (self.prevPage, _('Move up a page')), 'nextBouquet': (self.nextBouquet, _('Goto next bouquet')), 'prevBouquet': (self.prevBouquet, _('Goto previous bouquet')), 'input_date_time': (self.enterDateTime, _('Goto specific data/time')), 'info': (self.Info, _('Show detailed event info')), 'infolong': (self.InfoLong, _('Show single epg for current channel')), 'tv': (self.Bouquetlist, _('Toggle between bouquet/epg lists')), 'menu': (self.createSetup, _('Setup menu')) }, -1) self['epgactions'].csel = self if self.type == EPG_TYPE_GRAPH: time_epoch=config.epgselection.graph_prevtimeperiod.getValue() elif self.type == EPG_TYPE_INFOBARGRAPH: time_epoch=config.epgselection.infobar_prevtimeperiod.getValue() else: time_epoch=None self['list'] = EPGList(type=self.type, selChangedCB=self.onSelectionChanged, timer=session.nav.RecordTimer, time_epoch=time_epoch, overjump_empty=config.epgselection.overjump.getValue(), graphic=graphic) self.refreshTimer = eTimer() self.refreshTimer.timeout.get().append(self.refreshlist) self.listTimer = eTimer() self.listTimer.callback.append(self.hidewaitingtext) if about.getCPUString() != 'BCM7346B2': self.createTimer = eTimer() self.createTimer.callback.append(self.onCreate) self.onLayoutFinish.append(self.LayoutFinish) else: self.onLayoutFinish.append(self.onCreate) def createSetup(self): self.closeEventViewDialog() key = None if self.type == EPG_TYPE_SINGLE: key = 'epgsingle' elif self.type == EPG_TYPE_MULTI: key = 'epgmulti' elif self.type == EPG_TYPE_ENHANCED: key = 'epgenhanced' elif self.type == EPG_TYPE_INFOBAR: key = 'epginfobar' elif self.type == EPG_TYPE_GRAPH: key = 'epggraphical' elif self.type == EPG_TYPE_INFOBARGRAPH: key = 'epginfobargraphical' if key: self.session.openWithCallback(self.onSetupClose, Setup, key) def onSetupClose(self, test = None): if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: if self.type == EPG_TYPE_GRAPH: self.close('reopengraph') elif self.type == EPG_TYPE_INFOBARGRAPH: self.close('reopeninfobargraph') else: if self.type == EPG_TYPE_INFOBAR: self.close('reopeninfobar') def togglePIG(self): if not config.epgselection.graph_pig.getValue(): config.epgselection.graph_pig.setValue(True) else: config.epgselection.graph_pig.setValue(False) config.epgselection.graph_pig.save() configfile.save() self.close('reopengraph') def hidewaitingtext(self): self.listTimer.stop() self['lab1'].hide() def getBouquetServices(self, bouquet): services = [] servicelist = eServiceCenter.getInstance().list(bouquet) if not servicelist is None: while True: service = servicelist.getNext() if not service.valid(): #check if end of list break if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services continue services.append(ServiceReference(service)) return services def LayoutFinish(self): self['lab1'].show() self.createTimer.start(800) def onCreate(self): if about.getCPUString() != 'BCM7346B2': self.createTimer.stop() serviceref = self.session.nav.getCurrentlyPlayingServiceOrGroup() title = None self['list'].recalcEntrySize() if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: self.services = self.getBouquetServices(self.StartBouquet) self['list'].fillGraphEPG(self.services, self.ask_time) self['list'].moveToService(serviceref) self['list'].setCurrentlyPlaying(serviceref) self['bouquetlist'].recalcEntrySize() self['bouquetlist'].fillBouquetList(self.bouquets) self['bouquetlist'].moveToService(self.StartBouquet) self['bouquetlist'].setCurrentBouquet(self.StartBouquet ) self.setTitle(self['bouquetlist'].getCurrentBouquet()) if self.type == EPG_TYPE_GRAPH: self['list'].setShowServiceMode(config.epgselection.graph_servicetitle_mode.getValue()) self.moveTimeLines() if config.epgselection.graph_channel1.getValue(): self['list'].instance.moveSelectionTo(0) elif self.type == EPG_TYPE_INFOBARGRAPH: self['list'].setShowServiceMode(config.epgselection.infobar_servicetitle_mode.getValue()) self.moveTimeLines() elif self.type == EPG_TYPE_MULTI: self['bouquetlist'].recalcEntrySize() self['bouquetlist'].fillBouquetList(self.bouquets) self['bouquetlist'].moveToService(self.StartBouquet) self['bouquetlist'].fillBouquetList(self.bouquets) self.services = self.getBouquetServices(self.StartBouquet) self['list'].fillMultiEPG(self.services, self.ask_time) self['list'].moveToService(serviceref) self['list'].setCurrentlyPlaying(serviceref) self.setTitle(self['bouquetlist'].getCurrentBouquet()) elif self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: if self.type == EPG_TYPE_SINGLE: service = self.currentService elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: service = ServiceReference(self.servicelist.getCurrentSelection()) title = ServiceReference(self.servicelist.getRoot()).getServiceName() self['Service'].newService(service.ref) if title: title = title + ' - ' + service.getServiceName() else: title = service.getServiceName() self.setTitle(title) self['list'].fillSingleEPG(service) self['list'].sortSingleEPG(int(config.epgselection.sort.getValue())) else: self['list'].fillSimilarList(self.currentService, self.eventid) self.listTimer.start(10) def refreshlist(self): self.refreshTimer.stop() if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines() elif self.type == EPG_TYPE_MULTI: self['list'].fillMultiEPG(self.services, self.ask_time) elif self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: if self.type == EPG_TYPE_SINGLE: service = self.currentService elif self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: service = ServiceReference(self.servicelist.getCurrentSelection()) index = self['list'].getCurrentIndex() self['list'].fillSingleEPG(service) self['list'].sortSingleEPG(int(config.epgselection.sort.getValue())) self['list'].setCurrentIndex(index) def moveUp(self): self['list'].moveTo(self['list'].instance.moveUp) def moveDown(self): self['list'].moveTo(self['list'].instance.moveDown) def updEvent(self, dir, visible = True): ret = self['list'].selEntry(dir, visible) if ret: self.moveTimeLines(True) def nextPage(self): self['list'].moveTo(self['list'].instance.pageDown) def prevPage(self): self['list'].moveTo(self['list'].instance.pageUp) def toTop(self): self['list'].moveTo(self['list'].instance.moveTop) def toEnd(self): self['list'].moveTo(self['list'].instance.moveEnd) def leftPressed(self): if self.type == EPG_TYPE_MULTI: self['list'].updateMultiEPG(-1) else: self.updEvent(-1) def rightPressed(self): if self.type == EPG_TYPE_MULTI: self['list'].updateMultiEPG(1) else: self.updEvent(+1) def Bouquetlist(self): if not self.bouquetlist_active: self.BouquetlistShow() else: self.BouquetlistHide() def BouquetlistShow(self): self.curindex = self['bouquetlist'].l.getCurrentSelectionIndex() self["epgcursoractions"].setEnabled(False) self["okactions"].setEnabled(False) self['bouquetlist'].show() self["bouquetokactions"].setEnabled(True) self["bouquetcursoractions"].setEnabled(True) self.bouquetlist_active = True def BouquetlistHide(self, cancel=True): self["bouquetokactions"].setEnabled(False) self["bouquetcursoractions"].setEnabled(False) self['bouquetlist'].hide() if cancel: self['bouquetlist'].setCurrentIndex(self.curindex) self["okactions"].setEnabled(True) self["epgcursoractions"].setEnabled(True) self.bouquetlist_active = False def getCurrentBouquet(self): if self.has_key('bouquetlist'): cur = self["bouquetlist"].l.getCurrentSelection() return cur and cur[1] else: return self.servicelist.getRoot() def BouquetOK(self): now = time() - int(config.epg.histminutes.getValue()) * 60 self.services = self.getBouquetServices(self.getCurrentBouquet()) if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: if self.type == EPG_TYPE_GRAPH: self.ask_time = self.ask_time = now - now % (int(config.epgselection.graph_roundto.getValue()) * 60) elif self.type == EPG_TYPE_INFOBARGRAPH: self.ask_time = self.ask_time = now - now % (int(config.epgselection.infobar_roundto.getValue()) * 60) self['list'].resetOffset() self['list'].fillGraphEPG(self.services, self.ask_time) self.moveTimeLines(True) elif self.type == EPG_TYPE_MULTI: self['list'].fillMultiEPG(self.services, self.ask_time) self['list'].instance.moveSelectionTo(0) self.setTitle(self['bouquetlist'].getCurrentBouquet()) self.BouquetlistHide(False) def moveBouquetUp(self): self['bouquetlist'].moveTo(self['bouquetlist'].instance.moveUp) self['bouquetlist'].fillBouquetList(self.bouquets) def moveBouquetDown(self): self['bouquetlist'].moveTo(self['bouquetlist'].instance.moveDown) self['bouquetlist'].fillBouquetList(self.bouquets) def nextBouquet(self): if self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: self.moveBouquetDown() self.BouquetOK() elif (self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR) and config.usage.multibouquet.getValue(): self.CurrBouquet = self.servicelist.getCurrentSelection() self.CurrService = self.servicelist.getRoot() self.servicelist.nextBouquet() self.onCreate() def prevBouquet(self): if self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: self.moveBouquetUp() self.BouquetOK() elif (self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR) and config.usage.multibouquet.getValue(): self.CurrBouquet = self.servicelist.getCurrentSelection() self.CurrService = self.servicelist.getRoot() self.servicelist.prevBouquet() self.onCreate() def nextService(self): if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: self.CurrBouquet = self.servicelist.getCurrentSelection() self.CurrService = self.servicelist.getRoot() self['list'].instance.moveSelectionTo(0) if self.servicelist.inBouquet(): prev = self.servicelist.getCurrentSelection() if prev: prev = prev.toString() while True: if config.usage.quickzap_bouquet_change.getValue() and self.servicelist.atEnd(): self.servicelist.nextBouquet() else: self.servicelist.moveDown() cur = self.servicelist.getCurrentSelection() if not cur or (not (cur.flags & 64)) or cur.toString() == prev: break else: self.servicelist.moveDown() if self.isPlayable(): self.onCreate() if not self['list'].getCurrent()[1] and config.epgselection.overjump.getValue(): self.nextService() else: self.nextService() elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: if self.type == EPG_TYPE_GRAPH: timeperiod = config.epgselection.graph_prevtimeperiod.getValue() elif self.type == EPG_TYPE_INFOBARGRAPH: timeperiod = config.epgselection.infobar_prevtimeperiod.getValue() if timeperiod == 60: for i in range(24): self.updEvent(+2) if timeperiod == 120: for i in range(12): self.updEvent(+2) if timeperiod == 180: for i in range(8): self.updEvent(+2) if timeperiod == 240: for i in range(6): self.updEvent(+2) if timeperiod == 300: for i in range(4): self.updEvent(+2) elif self.serviceChangeCB: self.serviceChangeCB(1, self) def prevService(self): if self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: self.CurrBouquet = self.servicelist.getCurrentSelection() self.CurrService = self.servicelist.getRoot() self['list'].instance.moveSelectionTo(0) if self.servicelist.inBouquet(): prev = self.servicelist.getCurrentSelection() if prev: prev = prev.toString() while True: if config.usage.quickzap_bouquet_change.getValue(): if self.servicelist.atBegin(): self.servicelist.prevBouquet() self.servicelist.moveUp() cur = self.servicelist.getCurrentSelection() if not cur or (not (cur.flags & 64)) or cur.toString() == prev: break else: self.servicelist.moveUp() if self.isPlayable(): self.onCreate() if not self['list'].getCurrent()[1] and config.epgselection.overjump.getValue(): self.prevService() else: self.prevService() elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: if self.type == EPG_TYPE_GRAPH: timeperiod = config.epgselection.graph_prevtimeperiod.getValue() elif self.type == EPG_TYPE_INFOBARGRAPH: timeperiod = config.epgselection.infobar_prevtimeperiod.getValue() if timeperiod == 60: for i in range(24): self.updEvent(-2) if timeperiod == 120: for i in range(12): self.updEvent(-2) if timeperiod == 180: for i in range(8): self.updEvent(-2) if timeperiod == 240: for i in range(6): self.updEvent(-2) if timeperiod == 300: for i in range(4): self.updEvent(-2) elif self.serviceChangeCB: self.serviceChangeCB(-1, self) def enterDateTime(self): global mepg_config_initialized if self.type == EPG_TYPE_MULTI: if not mepg_config_initialized: config.misc.prev_mepg_time = ConfigClock(default=time()) mepg_config_initialized = True self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.misc.prev_mepg_time) elif self.type == EPG_TYPE_GRAPH: self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.epgselection.graph_prevtime) elif self.type == EPG_TYPE_INFOBARGRAPH: self.session.openWithCallback(self.onDateTimeInputClosed, TimeDateInput, config.epgselection.infobar_prevtime) def onDateTimeInputClosed(self, ret): if len(ret) > 1: if ret[0]: if self.type == EPG_TYPE_MULTI: self.ask_time = ret[1] self['list'].fillMultiEPG(self.services, ret[1]) elif self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: now = time() - int(config.epg.histminutes.getValue()) * 60 if self.type == EPG_TYPE_GRAPH: self.ask_time = self.ask_time - self.ask_time % (int(config.epgselection.graph_roundto.getValue()) * 60) elif self.type == EPG_TYPE_INFOBARGRAPH: self.ask_time = self.ask_time - self.ask_time % (int(config.epgselection.infobar_roundto.getValue()) * 60) l = self['list'] l.resetOffset() l.fillGraphEPG(None, self.ask_time) self.moveTimeLines(True) if self.eventviewDialog and (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH): self.infoKeyPressed(True) def closeScreen(self): if self.type == None: self.close() return if self.type == EPG_TYPE_SINGLE: self.close() return # stop and do not continue. if self.CurrBouquet and self.CurrService and (self.CurrBouquet != self.StartBouquet or self.CurrService != self.StartRef): self.zapToNumber(self.StartRef, self.StartBouquet) if self.session.nav.getCurrentlyPlayingServiceOrGroup() and self.StartRef and self.session.nav.getCurrentlyPlayingServiceOrGroup().toString() != self.StartRef.toString(): if self.zapFunc and ((self.type == EPG_TYPE_GRAPH and config.epgselection.graph_preview_mode.getValue()) or (self.type == EPG_TYPE_MULTI and config.epgselection.multi_preview_mode.getValue()) or ((self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH) and (config.epgselection.infobar_preview_mode.getValue() == '1' or config.epgselection.infobar_preview_mode.getValue() == '2')) or (self.type == EPG_TYPE_ENHANCED and config.epgselection.enhanced_preview_mode.getValue())) and self.StartRef and self.StartBouquet: if self.StartRef.toString().find('0:0:0:0:0:0:0:0:0') == -1: self.zapFunc(None, zapback = True) elif self.StartRef.toString().find('0:0:0:0:0:0:0:0:0') != -1: self.session.nav.playService(self.StartRef) if self.session.pipshown: self.session.pipshown = False del self.session.pip self.closeEventViewDialog() self.close(True) def infoKeyPressed(self, eventviewopen=False): cur = self['list'].getCurrent() event = cur[0] service = cur[1] if event is not None and not self.eventviewDialog and not eventviewopen: if self.type != EPG_TYPE_SIMILAR: if self.type == EPG_TYPE_INFOBARGRAPH: self.eventviewDialog = self.session.instantiateDialog(EventViewSimple,event, service, skin='InfoBarEventView') self.eventviewDialog.show() else: self.session.open(EventViewEPGSelect, event, service, callback=self.eventViewCallback, similarEPGCB=self.openSimilarList) elif self.eventviewDialog and not eventviewopen: self.eventviewDialog.hide() del self.eventviewDialog self.eventviewDialog = None elif event is not None and self.eventviewDialog and eventviewopen: if self.type != EPG_TYPE_SIMILAR: if self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH: self.eventviewDialog.hide() self.eventviewDialog = self.session.instantiateDialog(EventViewSimple,event, service, skin='InfoBarEventView') self.eventviewDialog.show() def redButtonPressed(self): self.closeEventViewDialog() if not self.longbuttonpressed: self.openIMDb() else: self.longbuttonpressed = False def redlongButtonPressed(self): self.closeEventViewDialog() self.longbuttonpressed = True self.sortEpg() def greenButtonPressed(self): self.closeEventViewDialog() if not self.longbuttonpressed: self.timerAdd() else: self.longbuttonpressed = False def greenlongButtonPressed(self): self.closeEventViewDialog() self.longbuttonpressed = True self.showAutoTimerList() def yellowButtonPressed(self): self.closeEventViewDialog() if not self.longbuttonpressed: self.openEPGSearch() else: self.longbuttonpressed = False def blueButtonPressed(self): self.closeEventViewDialog() if not self.longbuttonpressed: self.addAutoTimer() else: self.longbuttonpressed = False def bluelongButtonPressed(self): self.closeEventViewDialog() self.longbuttonpressed = True self.showAutoTimerList() def openSimilarList(self, eventid, refstr): self.session.open(EPGSelection, refstr, None, eventid) def setServices(self, services): self.services = services self.onCreate() def setService(self, service): self.currentService = service self.onCreate() def eventViewCallback(self, setEvent, setService, val): l = self['list'] old = l.getCurrent() if self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH: self.updEvent(val, False) elif val == -1: self.moveUp() elif val == +1: self.moveDown() cur = l.getCurrent() if (self.type == EPG_TYPE_MULTI or self.type == EPG_TYPE_GRAPH or self.type == EPG_TYPE_INFOBARGRAPH) and cur[0] is None and cur[1].ref != old[1].ref: self.eventViewCallback(setEvent, setService, val) else: setService(cur[1]) setEvent(cur[0]) def eventSelected(self): self.infoKeyPressed() def sortEpg(self): if self.type == EPG_TYPE_SINGLE or self.type == EPG_TYPE_ENHANCED or self.type == EPG_TYPE_INFOBAR: if config.epgselection.sort.getValue() == '0': config.epgselection.sort.setValue('1') else: config.epgselection.sort.setValue('0') config.epgselection.sort.save() configfile.save() self['list'].sortSingleEPG(int(config.epgselection.sort.getValue())) def OpenSingleEPG(self): cur = self['list'].getCurrent() event = cur[0] serviceref = cur[1].ref if serviceref is not None: self.session.open(SingleEPG, serviceref) def openIMDb(self): try: from Plugins.Extensions.IMDb.plugin import IMDB, IMDBEPGSelection try: cur = self['list'].getCurrent() event = cur[0] name = event.getEventName() except: name = '' self.session.open(IMDB, name, False) except ImportError: self.session.open(MessageBox, _('The IMDb plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10) def openEPGSearch(self): try: from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch try: cur = self['list'].getCurrent() event = cur[0] name = event.getEventName() except: name = '' self.session.open(EPGSearch, name, False) except ImportError: self.session.open(MessageBox, _('The EPGSearch plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10) def addAutoTimer(self): try: from Plugins.Extensions.AutoTimer.AutoTimerEditor import addAutotimerFromEvent cur = self['list'].getCurrent() event = cur[0] if not event: return serviceref = cur[1] addAutotimerFromEvent(self.session, evt=event, service=serviceref) self.refreshTimer.start(3000) except ImportError: self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10) def addAutoTimerSilent(self): try: from Plugins.Extensions.AutoTimer.AutoTimerEditor import addAutotimerFromEventSilent cur = self['list'].getCurrent() event = cur[0] if not event: return serviceref = cur[1] addAutotimerFromEventSilent(self.session, evt=event, service=serviceref) self.refreshTimer.start(3000) except ImportError: self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10) def showTimerList(self): from Screens.TimerEdit import TimerEditList self.session.open(TimerEditList) def showAutoTimerList(self): global autopoller global autotimer try: from Plugins.Extensions.AutoTimer.plugin import main, autostart from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller autopoller = AutoPoller() autotimer = AutoTimer() try: autotimer.readXml() except SyntaxError as se: self.session.open(MessageBox, _('Your config file is not well-formed:\n%s') % str(se), type=MessageBox.TYPE_ERROR, timeout=10) return if autopoller is not None: autopoller.stop() from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview self.session.openWithCallback(self.editCallback, AutoTimerOverview, autotimer) except ImportError: self.session.open(MessageBox, _('The AutoTimer plugin is not installed!\nPlease install it.'), type=MessageBox.TYPE_INFO, timeout=10) def editCallback(self, session): global autopoller global autotimer if session is not None: autotimer.writeXml() autotimer.parseEPG() if config.plugins.autotimer.autopoll.getValue(): if autopoller is None: from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller autopoller = AutoPoller() autopoller.start() else: autopoller = None autotimer = None def timerAdd(self): cur = self['list'].getCurrent() event = cur[0] serviceref = cur[1] if event is None: return eventid = event.getEventId() refstr = serviceref.ref.toString() for timer in self.session.nav.RecordTimer.timer_list: if timer.eit == eventid and timer.service_ref.ref.toString() == refstr: cb_func = lambda ret: self.removeTimer(timer) menu = [(_("Yes"), 'CALLFUNC', cb_func), (_("No"), 'CALLFUNC', self.ChoiceBoxCB, self.ChoiceBoxNull)] self.ChoiceBoxDialog = self.session.instantiateDialog(ChoiceBox, text=_('Do you really want to remove the timer for %s?') % event.getEventName(), list=menu, skin_name="RemoveTimerQuestion") self.showChoiceBoxDialog() break else: newEntry = RecordTimerEntry(serviceref, checkOldTimers=True, dirname=preferredTimerPath(), *parseEvent(event)) self.session.openWithCallback(self.finishedAdd, TimerEntry, newEntry) def finishedAdd(self, answer): if answer[0]: entry = answer[1] simulTimerList = self.session.nav.RecordTimer.record(entry) if simulTimerList is not None: for x in simulTimerList: if x.setAutoincreaseEnd(entry): self.session.nav.RecordTimer.timeChanged(x) simulTimerList = self.session.nav.RecordTimer.record(entry) if simulTimerList is not None: self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList) self['key_green'].setText(_('Remove timer')) self.key_green_choice = self.REMOVE_TIMER else: self['key_green'].setText(_('Add Timer')) self.key_green_choice = self.ADD_TIMER try: self.refreshlist() except: pass def finishSanityCorrection(self, answer): self.finishedAdd(answer) def removeTimer(self, timer): timer.afterEvent = AFTEREVENT.NONE self.session.nav.RecordTimer.removeEntry(timer) self['key_green'].setText(_('Add Timer')) self.key_green_choice = self.ADD_TIMER self.closeChoiceBoxDialog() try: self.refreshlist() except: pass def RecordTimerQuestion(self): cur = self['list'].getCurrent() event = cur[0] serviceref = cur[1] if event is None: return eventid = event.getEventId() refstr = serviceref.ref.toString() for timer in self.session.nav.RecordTimer.timer_list: if timer.eit == eventid and timer.service_ref.ref.toString() == refstr: cb_func = lambda ret: self.removeTimer(timer) menu = [(_("Yes"), 'CALLFUNC', cb_func), (_("No"), 'CALLFUNC', self.ChoiceBoxCB, self.ChoiceBoxNull)] self.ChoiceBoxDialog = self.session.instantiateDialog(ChoiceBox, text=_('Do you really want to remove the timer for %s?') % event.getEventName(), list=menu, skin_name="RemoveTimerQuestion") self.showChoiceBoxDialog() break else: menu = [(_("Record once"), 'CALLFUNC', self.ChoiceBoxCB, self.doRecordTimer), (_("Add AutoTimer"), 'CALLFUNC', self.ChoiceBoxCB, self.addAutoTimerSilent)] self.ChoiceBoxDialog = self.session.instantiateDialog(ChoiceBox, title="%s?" % event.getEventName(), list=menu, skin_name="RecordTimerQuestion") serviceref = eServiceReference(str(self['list'].getCurrent()[1])) posy = self['list'].getSelectionPosition(serviceref) self.ChoiceBoxDialog.instance.move(ePoint(posy[0]-self.ChoiceBoxDialog.instance.size().width(),self.instance.position().y()+posy[1])) self.showChoiceBoxDialog() def recButtonPressed(self): if not self.longbuttonpressed: self.RecordTimerQuestion() else: self.longbuttonpressed = False def reclongButtonPressed(self): self.longbuttonpressed = True self.doZapTimer() def ChoiceBoxNull(self): return def ChoiceBoxCB(self, choice): if choice[3]: try: choice[3]() except: choice[3] self.closeChoiceBoxDialog() def showChoiceBoxDialog(self): self['okactions'].setEnabled(False) self['epgcursoractions'].setEnabled(False) self['colouractions'].setEnabled(False) self['recordingactions'].setEnabled(False) self['epgactions'].setEnabled(False) self["dialogactions"].setEnabled(True) self.ChoiceBoxDialog['actions'].execBegin() self.ChoiceBoxDialog.show() if self.has_key('input_actions'): self['input_actions'].setEnabled(False) def closeChoiceBoxDialog(self): self["dialogactions"].setEnabled(False) if self.ChoiceBoxDialog: self.ChoiceBoxDialog['actions'].execEnd() self.session.deleteDialog(self.ChoiceBoxDialog) self['okactions'].setEnabled(True) self['epgcursoractions'].setEnabled(True) self['colouractions'].setEnabled(True) self['recordingactions'].setEnabled(True) self['epgactions'].setEnabled(True) if self.has_key('input_actions'): self['input_actions'].setEnabled(True) def doRecordTimer(self): self.doInstantTimer(0) def doZapTimer(self): self.doInstantTimer(1) def doInstantTimer(self, zap): cur = self['list'].getCurrent() event = cur[0] serviceref = cur[1] if event is None: return eventid = event.getEventId() refstr = serviceref.ref.toString() newEntry = RecordTimerEntry(serviceref, checkOldTimers=True, *parseEvent(event)) self.InstantRecordDialog = self.session.instantiateDialog(InstantRecordTimerEntry, newEntry, zap) retval = [True, self.InstantRecordDialog.retval()] self.session.deleteDialogWithCallback(self.finishedAdd, self.InstantRecordDialog, retval) def OK(self): if self.zapnumberstarted: self.dozumberzap() else: if config.epgselection.graph_ok.getValue() == 'Zap' or config.epgselection.enhanced_ok.getValue() == 'Zap' or config.epgselection.infobar_ok.getValue() == 'Zap' or config.epgselection.multi_ok.getValue() == 'Zap': self.zapTo() if config.epgselection.graph_ok.getValue() == 'Zap + Exit' or config.epgselection.enhanced_ok.getValue() == 'Zap + Exit' or config.epgselection.infobar_ok.getValue() == 'Zap + Exit' or config.epgselection.multi_ok.getValue() == 'Zap + Exit': self.zap() def OKLong(self): if self.zapnumberstarted: self.dozumberzap() else: if config.epgselection.graph_oklong.getValue() == 'Zap' or config.epgselection.enhanced_oklong.getValue() == 'Zap' or config.epgselection.infobar_oklong.getValue() == 'Zap' or config.epgselection.multi_oklong.getValue() == 'Zap': self.zapTo() if config.epgselection.graph_oklong.getValue() == 'Zap + Exit' or config.epgselection.enhanced_oklong.getValue() == 'Zap + Exit' or config.epgselection.infobar_oklong.getValue() == 'Zap + Exit' or config.epgselection.multi_oklong.getValue() == 'Zap + Exit': self.zap() def Info(self): if (self.type == EPG_TYPE_GRAPH and config.epgselection.graph_info.getValue() == 'Channel Info'): self.infoKeyPressed() elif (self.type == EPG_TYPE_GRAPH and config.epgselection.graph_info.getValue() == 'Single EPG'): self.OpenSingleEPG() else: self.infoKeyPressed() def InfoLong(self): if self.type == EPG_TYPE_GRAPH and config.epgselection.graph_infolong.getValue() == 'Channel Info': self.infoKeyPressed() elif self.type == EPG_TYPE_GRAPH and config.epgselection.graph_infolong.getValue() == 'Single EPG': self.OpenSingleEPG() else: self.OpenSingleEPG() def applyButtonState(self, state): if state == 0: self['now_button'].hide() self['now_button_sel'].hide() self['next_button'].hide() self['next_button_sel'].hide() self['more_button'].hide() self['more_button_sel'].hide() self['now_text'].hide() self['next_text'].hide() self['more_text'].hide() self['key_red'].setText('') else: if state == 1: self['now_button_sel'].show() self['now_button'].hide() else: self['now_button'].show() self['now_button_sel'].hide() if state == 2: self['next_button_sel'].show() self['next_button'].hide() else: self['next_button'].show() self['next_button_sel'].hide() if state == 3: self['more_button_sel'].show() self['more_button'].hide() else: self['more_button'].show() self['more_button_sel'].hide() def onSelectionChanged(self): cur = self['list'].getCurrent() event = cur[0] self['Event'].newEvent(event) if cur[1] is None: self['Service'].newService(None) else: self['Service'].newService(cur[1].ref) if self.type == EPG_TYPE_MULTI: count = self['list'].getCurrentChangeCount() if self.ask_time != -1: self.applyButtonState(0) elif count > 1: self.applyButtonState(3) elif count > 0: self.applyButtonState(2) else: self.applyButtonState(1) datestr = '' if event is not None: now = time() beg = event.getBeginTime() nowTime = localtime(now) begTime = localtime(beg) if nowTime[2] != begTime[2]: datestr = strftime(_('%A %e %b'), begTime) else: datestr = '%s' % _('Today') self['date'].setText(datestr) if cur[1] is None or cur[1].getServiceName() == '': if self.key_green_choice != self.EMPTY: self['key_green'].setText('') self.key_green_choice = self.EMPTY return if event is None: if self.key_green_choice != self.EMPTY: self['key_green'].setText('') self.key_green_choice = self.EMPTY return serviceref = cur[1] eventid = event.getEventId() refstr = serviceref.ref.toString() isRecordEvent = False for timer in self.session.nav.RecordTimer.timer_list: if timer.eit == eventid and timer.service_ref.ref.toString() == refstr: isRecordEvent = True break if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER: self['key_green'].setText(_('Remove timer')) self.key_green_choice = self.REMOVE_TIMER elif not isRecordEvent and self.key_green_choice != self.ADD_TIMER: self['key_green'].setText(_('Add Timer')) self.key_green_choice = self.ADD_TIMER if self.eventviewDialog and (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH): self.infoKeyPressed(True) def moveTimeLines(self, force = False): self.updateTimelineTimer.start((60 - int(time()) % 60) * 1000) self['timeline_text'].setEntries(self['list'], self['timeline_now'], self.time_lines, force) self['list'].l.invalidate() def isPlayable(self): current = ServiceReference(self.servicelist.getCurrentSelection()) return not current.ref.flags & (eServiceReference.isMarker | eServiceReference.isDirectory) def setServicelistSelection(self, bouquet, service): if self.servicelist: if self.servicelist.getRoot() != bouquet: self.servicelist.clearPath() self.servicelist.enterPath(self.servicelist.bouquet_root) self.servicelist.enterPath(bouquet) self.servicelist.setCurrentSelection(service) def closeEventViewDialog(self): if self.eventviewDialog: self.eventviewDialog.hide() del self.eventviewDialog self.eventviewDialog = None def zap(self): if self.zapFunc: self.zapSelectedService() self.closeEventViewDialog() self.close(True) else: self.closeEventViewDialog() self.close() def zapSelectedService(self, prev=False): if self.session.pipshown: self.prevch = str(self.session.pip.getCurrentService().toString()) else: self.prevch = str(self.session.nav.getCurrentlyPlayingServiceReference().toString()) lst = self["list"] count = lst.getCurrentChangeCount() if count == 0: ref = lst.getCurrent()[1] if ref is not None: if (self.type == EPG_TYPE_INFOBAR or self.type == EPG_TYPE_INFOBARGRAPH) and config.epgselection.infobar_preview_mode.getValue() == '2': if not self.session.pipshown: self.session.pip = self.session.instantiateDialog(PictureInPicture) self.session.pip.show() self.session.pipshown = True n_service = self.pipServiceRelation.get(str(ref.ref), None) if n_service is not None: service = eServiceReference(n_service) else: service = ref.ref if self.session.pipshown and self.currch == service.toString(): self.session.pipshown = False del self.session.pip self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = False) return self.session.pip.playService(service) self.currch = str(self.session.pip.getCurrentService().toString()) else: self.zapFunc(ref.ref, bouquet = self.getCurrentBouquet(), preview = prev) self.currch = str(self.session.nav.getCurrentlyPlayingServiceReference().toString()) self['list'].setCurrentlyPlaying(self.session.nav.getCurrentlyPlayingServiceOrGroup()) def zapTo(self): if self.session.nav.getCurrentlyPlayingServiceOrGroup() and self.session.nav.getCurrentlyPlayingServiceOrGroup().toString().find('0:0:0:0:0:0:0:0:0') != -1: from Screens.InfoBarGenerics import setResumePoint setResumePoint(self.session) if self.zapFunc: self.zapSelectedService(True) self.refreshTimer.start(2000) if not self.currch or self.currch == self.prevch: if self.zapFunc: self.zapFunc(None, False) self.closeEventViewDialog() self.close('close') else: self.closeEventViewDialog() self.close() def keyNumberGlobal(self, number): if self.type == EPG_TYPE_GRAPH: if number == 1: timeperiod = config.epgselection.graph_prevtimeperiod.getValue() if timeperiod > 60: timeperiod = timeperiod - 60 self['list'].setEpoch(timeperiod) config.epgselection.graph_prevtimeperiod.setValue(timeperiod) self.moveTimeLines() elif number == 2: self.prevPage() elif number == 3: timeperiod = config.epgselection.graph_prevtimeperiod.getValue() if timeperiod < 300: timeperiod = timeperiod + 60 self['list'].setEpoch(timeperiod) config.epgselection.graph_prevtimeperiod.setValue(timeperiod) self.moveTimeLines() elif number == 4: self.updEvent(-2) elif number == 5: now = time() - int(config.epg.histminutes.getValue()) * 60 self.ask_time = now - now % (int(config.epgselection.graph_roundto.getValue()) * 60) self['list'].resetOffset() self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines(True) elif number == 6: self.updEvent(+2) elif number == 7: if config.epgselection.graph_heightswitch.getValue(): config.epgselection.graph_heightswitch.setValue(False) else: config.epgselection.graph_heightswitch.setValue(True) self['list'].setItemsPerPage() self['list'].fillGraphEPG(None) self.moveTimeLines() elif number == 8: self.nextPage() elif number == 9: basetime = localtime(self['list'].getTimeBase()) basetime = (basetime[0], basetime[1], basetime[2], int(config.epgselection.graph_primetimehour.getValue()), int(config.epgselection.graph_primetimemins.getValue()), 0, basetime[6], basetime[7], basetime[8]) self.ask_time = mktime(basetime) if self.ask_time + 3600 < time(): self.ask_time = self.ask_time + 86400 self['list'].resetOffset() self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines(True) elif number == 0: self.toTop() now = time() - int(config.epg.histminutes.getValue()) * 60 self.ask_time = now - now % (int(config.epgselection.graph_roundto.getValue()) * 60) self['list'].resetOffset() self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines() elif self.type == EPG_TYPE_INFOBARGRAPH: if number == 1: timeperiod = config.epgselection.infobar_prevtimeperiod.getValue() if timeperiod > 60: timeperiod = timeperiod - 60 self['list'].setEpoch(timeperiod) config.epgselection.infobar_prevtimeperiod.setValue(timeperiod) self.moveTimeLines() elif number == 2: self.prevPage() elif number == 3: timeperiod = config.epgselection.infobar_prevtimeperiod.getValue() if timeperiod < 300: timeperiod = timeperiod + 60 self['list'].setEpoch(timeperiod) config.epgselection.infobar_prevtimeperiod.setValue(timeperiod) self.moveTimeLines() elif number == 4: self.updEvent(-2) elif number == 5: now = time() - int(config.epg.histminutes.getValue()) * 60 self.ask_time = now - now % (int(config.epgselection.infobar_roundto.getValue()) * 60) self['list'].resetOffset() self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines(True) elif number == 6: self.updEvent(+2) elif number == 8: self.nextPage() elif number == 9: basetime = localtime(self['list'].getTimeBase()) basetime = (basetime[0], basetime[1], basetime[2], int(config.epgselection.infobar_primetimehour.getValue()), int(config.epgselection.infobar_primetimemins.getValue()), 0, basetime[6], basetime[7], basetime[8]) self.ask_time = mktime(basetime) if self.ask_time + 3600 < time(): self.ask_time = self.ask_time + 86400 self['list'].resetOffset() self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines(True) elif number == 0: self.toTop() now = time() - int(config.epg.histminutes.getValue()) * 60 self.ask_time = now - now % (int(config.epgselection.infobar_roundto.getValue()) * 60) self['list'].resetOffset() self['list'].fillGraphEPG(None, self.ask_time) self.moveTimeLines() else: self.zapnumberstarted = True self.NumberZapTimer.start(5000, True) if not self.NumberZapField: self.NumberZapField = str(number) else: self.NumberZapField = self.NumberZapField + str(number) self.handleServiceName() self["number"].setText(self.zaptoservicename+'\n'+self.NumberZapField) self["number"].show() if len(self.NumberZapField) >= 4: self.dozumberzap() def dozumberzap(self): self.zapnumberstarted = False self.numberEntered(self.service, self.bouquet) def handleServiceName(self): if self.searchNumber: self.service, self.bouquet = self.searchNumber(int(self.NumberZapField)) self.zaptoservicename = ServiceReference(self.service).getServiceName() def numberEntered(self, service = None, bouquet = None): if service is not None: self.zapToNumber(service, bouquet) def searchNumberHelper(self, serviceHandler, num, bouquet): servicelist = serviceHandler.list(bouquet) if servicelist is not None: serviceIterator = servicelist.getNext() while serviceIterator.valid(): if num == serviceIterator.getChannelNum(): return serviceIterator serviceIterator = servicelist.getNext() return None def searchNumber(self, number): bouquet = self.servicelist.getRoot() service = None serviceHandler = eServiceCenter.getInstance() service = self.searchNumberHelper(serviceHandler, number, bouquet) if config.usage.multibouquet.getValue(): service = self.searchNumberHelper(serviceHandler, number, bouquet) if service is None: bouquet = self.servicelist.bouquet_root bouquetlist = serviceHandler.list(bouquet) if bouquetlist is not None: bouquet = bouquetlist.getNext() while bouquet.valid(): if bouquet.flags & eServiceReference.isDirectory: service = self.searchNumberHelper(serviceHandler, number, bouquet) if service is not None: playable = not service.flags & (eServiceReference.isMarker | eServiceReference.isDirectory) or service.flags & eServiceReference.isNumberedMarker if not playable: service = None break if config.usage.alternative_number_mode.getValue(): break bouquet = bouquetlist.getNext() return (service, bouquet) def zapToNumber(self, service, bouquet): self["number"].hide() self.NumberZapField = None self.CurrBouquet = bouquet self.CurrService = service if service is not None: self.setServicelistSelection(bouquet, service) self.onCreate() class SingleEPG(EPGSelection): def __init__(self, session, service, EPGtype="single"): EPGSelection.__init__(self, session, service=service, EPGtype=EPGtype) self.skinName = 'EPGSelection'
gpl-2.0
-1,216,276,254,895,182,600
37.955645
531
0.700566
false
marios-zindilis/musicbrainz-django-models
musicbrainz_django_models/models/deleted_entity.py
1
1680
""" .. module:: deleted_entity The **Deleted Entity** Model. PostgreSQL Definition --------------------- The :code:`deleted_entity` table is defined in the MusicBrainz Server as: .. code-block:: sql CREATE TABLE deleted_entity ( gid UUID NOT NULL, -- PK data JSONB NOT NULL, deleted_at timestamptz NOT NULL DEFAULT now() ); """ from django.db import models from django.utils.encoding import python_2_unicode_compatible import uuid @python_2_unicode_compatible class deleted_entity(models.Model): """ Not all parameters are listed here, only those that present some interest in their Django implementation. :param gid: This is interesting because it cannot be NULL but a default is not defined in SQL. The default `uuid.uuid4` in Django will generate a UUID during the creation of an instance. :param data: This field uses the Postgres-specific data type `jsonb`, see: https://www.postgresql.org/docs/9.6/static/datatype-json.html This has a lot of value-added functionality in Postgres, like indexing and queries of the key-value pairs in the stored JSON. In Django, there is no backend-independent model field that can offer those features, because the implementation is Postgres-specific. The closest is a `TextField`, with JSON-specific functionality added as model methods. """ gid = models.UUIDField(default=uuid.uuid4, primary_key=True) data = models.TextField() deleted_at = models.DateTimeField(auto_now=True) def __str__(self): return 'Deleted Entity' class Meta: db_table = 'deleted_entity'
gpl-2.0
7,795,889,254,290,050,000
29.545455
78
0.685119
false
YetAnotherTimeTracker/yatt
handlers/interaction_handler.py
1
5682
""" Created by anthony on 15.10.17 start_handler """ import json import logging import datetime from emoji import emojize from telegram import ParseMode from telegram.ext import MessageHandler, Filters, CallbackQueryHandler import g from components.automata import CONTEXT_COMMANDS, CONTEXT_TASK, CONTEXT_LANG, CONTEXT_ACTION from components.filter import command_filter from components.message_source import message_source from config.state_config import CallbackData, Language, ADMINS from services import state_service from utils.handler_utils import get_command_type, is_callback, deserialize_data log = logging.getLogger(__name__) def command_handler(): return MessageHandler(Filters.all, handle) def callback_handler(): return CallbackQueryHandler(callback=handle) ERR_COUNTER = {} def handle(bot, update): chat = update.effective_chat chat_id = chat.id curr_context = None try: curr_command = None curr_action = None if is_callback(update): # Callback handling log.info(f'--> Callback from {chat.username} ({chat.id})') data = update.callback_query.data deserialized = deserialize_data(update.callback_query.data) curr_command = deserialized[CallbackData.COMMAND] curr_action = deserialized[CallbackData.ACTION] else: # Regular message/command handling log.info(f'--> Message from {chat.username} ({chat.id})') text = update.message.text if command_filter.known_command(text) is False: # check if user is admin if chat_id in ADMINS: if '/stats' == text: bot.send_message(chat_id=chat_id, text=json.dumps(ERR_COUNTER, indent=2)) return elif text.startswith('/notify_all'): cmd = text.split(' ') text_to_send = ' '.join(cmd[1:]) welcome_back(bot, text_to_send) return else: if text.startswith('/suggest'): cmd = text.split(' ') text_to_send = ' '.join(cmd[1:]) log.warning(f'-->--> Suggest from {chat.username} ({chat_id}): {text}') bot.send_message(chat_id=ADMINS[0], text=text_to_send) bot.send_message(chat_id=chat_id, text='Thanks for feedback :)') return reply_on_unknown(bot, chat_id) return curr_command = get_command_type(text) # get all params needed to render state curr_state = g.automata.get_state(chat_id) curr_context = g.automata.get_context(chat_id) log.info(f'curr_state: {curr_state}, curr_command: {curr_command}') # find out state to be rendered next_state = g.automata.get_transition(curr_state, curr_command) if g.dev_mode: bot.send_message(chat_id=chat_id, text=f'prev state: {curr_state.name} ({curr_state.value})\n' f'cmd: {curr_command.name} ({curr_command.value})\n' f'new state: {next_state.name} ({next_state.value})') # get state from service and render handler = state_service.states()[next_state] log.info(f'rendering state: {handler.__name__}') handler(bot, update, curr_context) # update params log.info(f'Updating state to: {next_state.name}') g.automata.set_state(chat.id, next_state) curr_context[CONTEXT_COMMANDS].append(curr_command) curr_context[CONTEXT_ACTION].append(curr_action) if g.dev_mode: print_dev_info(bot, chat_id, curr_context) except Exception as e: try: ERR_COUNTER[str(len(ERR_COUNTER.values()) + 1)] = { 'datetime': str(datetime.datetime.now()), 'chat': chat_id, 'error': str(e), 'context': str(curr_context) } except Exception: pass log.error('Error has been caught in handler: ', e) lang = curr_context[CONTEXT_LANG] if curr_context is not None else Language.ENG.value text = message_source[lang]['error'] bot.send_message(chat_id=chat_id, text=emojize(text, use_aliases=True), parse_mode=ParseMode.MARKDOWN) else: log.info('<-- Successfully handled') def reply_on_unknown(bot, chat_id): log.info('x-- Replying on unknown command') lang = g.automata.get_context(chat_id)[CONTEXT_LANG] text = message_source[lang]['filter.unknown'] bot.send_message(chat_id=chat_id, text=emojize(text, use_aliases=True), parse_mode=ParseMode.MARKDOWN) def print_dev_info(bot, chat_id, curr_context): context_commands = [c.name for c in curr_context[CONTEXT_COMMANDS]] context_task = curr_context[CONTEXT_TASK].get_id() if curr_context[CONTEXT_TASK] else '-' bot.send_message(chat_id=chat_id, text=f'Latest task: {context_task}\nLatest commands: {context_commands}') def welcome_back(bot, text): for u in fellow_users: try: bot.send_message(chat_id=u, text=text, parse_mode=ParseMode.MARKDOWN) log.info(f'Sent to {u}') # in case bot was blocked except Exception as e: log.error(e) fellow_users = [ ]
mit
8,570,073,784,590,220,000
33.436364
97
0.57251
false
QTB-HHU/petcmodel
parametersPETC.py
1
7001
# File parametersPETC.py # -*- coding: utf-8 -*- """ Copyright (C) 2014-2015 Anna Matuszyńska, Oliver Ebenhöh This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program (license.txt). If not, see <http://www.gnu.org/licenses/>. """ from numpy import log, exp class ParametersPETC: defaultparameterset = { # pool sizes 'PSIItot': 2.5, # [mmol/molChl] total concentration of PSII 'PSItot': 2.5, 'PQtot': 17.5, # [mmol/molChl] 'PCtot': 4., # Bohme1987, but other sources give different values - seems to depend greatly on organism and conditions 'Fdtot': 5., # Bohme1987 'Ctot': 2.5, #source unclear (Schoettler says 0.4...?, but plausible to assume that complexes (PSII,PSI,b6f) have approx. same abundance) 'NADPtot': 25., # estimate from ~ 0.8 mM, Heineke1991 'APtot': 60., # [mmol/molChl] Bionumbers ~2.55mM (=81mmol/molChl) (FIXME: Soma had 50) # parameters associated with photosystem II 'kH': 0., 'kH0': 5.e8, # base quenching' after calculation with Giovanni 'kF': 6.25e7, # fluorescence 16ns 'k1': 5.e9, # excitation of Pheo / charge separation 200ps 'k1rev': 1.e10, 'k2': 5.e9, # original 5e9 (charge separation limiting step ~ 200ps) - made this faster for higher Fs fluorescence # parameters associated with photosystem I 'kStt7': 0.0035, # [s-1] fitted to the FM dynamics 'kPph1': 0.0013, # [s-1] fitted to the FM dynamics 'KM_ST': 0.2, # Switch point (half-activity of Stt7) for 20% PQ oxidised (80% reduced) 'n_ST': 2., # Hill coefficient of 4 -> 1/(2.5^4)~1/40 activity at PQox=PQred 'staticAntI': 0.2, 'staticAntII': 0.0, # ATP and NADPH parameters 'kATPsynth': 20., # taken from MATLAB 'kATPcons': 10., # taken from MATLAB 'kATPimport': 0., # TODO possibility for ATP import at night - NOT YET IMPLEMENTED! 'ATPcyt': 0.5, # only relative levels are relevant (normalised to 1) to set equilibrium 'Pi_mol': 0.01, 'DeltaG0_ATP': 30.6, # 30.6kJ/mol / RT 'HPR': 14./3., 'kNADPHimport': 0., # TODO possibility for NADPH import - NOT YET IMPLEMENTED! 'kNADPHcons': 15., # taken from MATLAB 'NADPHcyt': 0.5, # only relatice levels # global conversion factor of PFD to excitation rate 'cPFD': 4., # [m^2/mmol PSII] # pH and protons 'pHstroma': 7.8, 'kLeak': 0.010, # [1/s] leakage rate -- inconsistency with Kathrine 'bH': 100., # proton buffer: ratio total / free protons # rate constants 'kPQred': 250., # [1/(s*(mmol/molChl))] 'kCytb6f': 2.5, # a rough estimate: transfer PQ->cytf should be ~10ms 'kPTOX': .01, # ~ 5 electrons / seconds. This gives a bit more (~20) 'kPCox': 2500., # a rough estimate: half life of PC->P700 should be ~0.2ms 'kFdred': 2.5e5, # a rough estimate: half life of PC->P700 should be ~2micro-s 'kcatFNR': 500., # Carrillo2003 (kcat~500 1/s) 'kcyc': 1., 'O2ext': 8., # corresponds to 250 microM, corr. to 20% 'kNDH': .002, # re-introduce e- into PQ pool. Only positive for anaerobic (reducing) condition 'kNh': 0.05, 'kNr': 0.004, 'NPQsw': 5.8, 'nH': 5., 'EFNR': 3., # Bohme1987 'KM_FNR_F': 1.56, # corresponds to 0.05 mM (Aliverti1990) 'KM_FNR_N': 0.22, # corresponds to 0.007 mM (Shin1971, Aliverti2004) # standard redox potentials (at pH=0) in V 'E0_QA': -0.140, 'E0_PQ': 0.354, 'E0_cytf': 0.350, 'E0_PC': 0.380, 'E0_P700': 0.480, 'E0_FA': -0.550, 'E0_Fd': -0.430, 'E0_NADP': -0.113, # physical constants 'F': 96.485, # Faraday constant 'R': 8.3e-3, # universal gas constant 'T': 298., # Temperature in K - for now assumed to be constant at 25 C } def __init__(self, pars = {}): mypars = pars.copy() for k in ParametersPETC.defaultparameterset.keys(): mypars.setdefault(k,ParametersPETC.defaultparameterset[k]) for k in mypars.keys(): setattr(self,k,mypars[k]) self.setCompositeParameters() setattr(self,'KeqPQred',self.Keq_PQred()) setattr(self,'KeqCyc', self.Keq_cyc()) setattr(self,'KeqCytfPC', self.Keq_cytfPC()) setattr(self,'KeqFAFd', self.Keq_FAFd()) setattr(self,'KeqPCP700', self.Keq_PCP700()) setattr(self,'KeqNDH', self.Keq_NDH()) setattr(self,'KeqFNR', self.Keq_FNR()) def setCompositeParameters(self): setattr(self, 'RT', self.R * self.T) setattr(self, 'dG_pH', log(10)*self.RT) setattr(self, 'Hstroma', 3.2e4*10**(-self.pHstroma)) # proton concentration in stroma setattr(self, 'kProtonation', 4e-3 / self.Hstroma) # [1/s] converted from 4 * 10^-6 [1/ms] protonation of LHCs (L), depends on pH value in lumen def Keq_PQred(self): DG1 = -self.E0_QA * self.F DG2 = -2 * self.E0_PQ * self.F DG = -2 * DG1 + DG2 + 2 * self.pHstroma * self.dG_pH K = exp(-DG/self.RT) return K def Keq_cyc(self): DG1 = -self.E0_Fd * self.F DG2 = -2 * self.E0_PQ * self.F DG = -2 * DG1 + DG2 + 2 * self.dG_pH * self.pHstroma K = exp(-DG/self.RT) return K def Keq_cytfPC(self): DG1 = -self.E0_cytf * self.F DG2 = -self.E0_PC * self.F DG = -DG1 + DG2 K = exp(-DG/self.RT) return K def Keq_FAFd(self): DG1 = -self.E0_FA * self.F DG2 = -self.E0_Fd * self.F DG = -DG1 + DG2 K = exp(-DG/self.RT) return K def Keq_PCP700(self): DG1 = -self.E0_PC * self.F DG2 = -self.E0_P700 * self.F DG = -DG1 + DG2 K = exp(-DG/self.RT) return K def Keq_NDH(self): DG1 = -2 * self.E0_NADP * self.F DG2 = -2 * self.E0_PQ * self.F DG = -DG1 + DG2 + self.dG_pH * self.pHstroma K = exp(-DG/self.RT) return K def Keq_FNR(self): DG1 = -self.E0_Fd * self.F DG2 = -2 * self.E0_NADP * self.F DG = -2 * DG1 + DG2 + self.dG_pH * self.pHstroma K = exp(-DG/self.RT) return K
agpl-3.0
-1,539,084,325,273,719,600
36.245902
152
0.565938
false
Apogaea/voldb
volunteer/apps/departments/api/v2/views.py
1
1166
from collections import OrderedDict from rest_framework import response from rest_framework import viewsets from rest_framework import generics from rest_framework import pagination from volunteer.apps.departments.models import ( Department, Role, ) from volunteer.apps.departments.api.v2.serializers import ( RoleSerializer, DepartmentSerializer, ) class PassThroughPaginator(pagination.BasePagination): def get_paginated_response(self, data): return response.Response(OrderedDict([ ('count', len(data)), ('next', None), ('previous', None), ('results', data) ])) def paginate_queryset(self, queryset, *args, **kwargs): return queryset class DepartmentViewSet(generics.ListAPIView, viewsets.GenericViewSet): queryset = Department.objects.all() serializer_class = DepartmentSerializer pagination_class = PassThroughPaginator class RoleViewSet(generics.ListAPIView, generics.RetrieveAPIView, viewsets.GenericViewSet): queryset = Role.objects.all() serializer_class = RoleSerializer
gpl-3.0
7,566,624,450,728,403,000
26.116279
59
0.685249
false
adrianschroeter/kiwi
test/unit/system_root_bind_test.py
1
6450
from mock import patch from mock import call import mock from .test_helper import raises from kiwi.exceptions import ( KiwiMountKernelFileSystemsError, KiwiMountSharedDirectoryError, KiwiSetupIntermediateConfigError ) from kiwi.system.root_bind import RootBind class TestRootBind(object): def setup(self): root = mock.Mock() root.root_dir = 'root-dir' self.bind_root = RootBind(root) # stub config files and bind locations self.bind_root.config_files = ['/foo'] self.bind_root.bind_locations = ['/proc'] # stub files/dirs and mountpoints to cleanup self.mount_manager = mock.Mock() self.bind_root.cleanup_files = ['/foo.kiwi'] self.bind_root.mount_stack = [self.mount_manager] self.bind_root.dir_stack = ['/mountpoint'] @raises(KiwiMountKernelFileSystemsError) @patch('kiwi.system.root_bind.MountManager.bind_mount') @patch('kiwi.system.root_bind.RootBind.cleanup') @patch('os.path.exists') def test_kernel_file_systems_raises_error( self, mock_exists, mock_cleanup, mock_mount ): mock_exists.return_value = True mock_mount.side_effect = KiwiMountKernelFileSystemsError( 'mount-error' ) self.bind_root.mount_kernel_file_systems() mock.cleanup.assert_called_once_with() @raises(KiwiMountSharedDirectoryError) @patch('kiwi.system.root_bind.MountManager.bind_mount') @patch('kiwi.system.root_bind.Path.create') @patch('kiwi.system.root_bind.RootBind.cleanup') def test_shared_directory_raises_error( self, mock_cleanup, mock_path, mock_mount ): mock_mount.side_effect = KiwiMountSharedDirectoryError( 'mount-error' ) self.bind_root.mount_shared_directory() mock.cleanup.assert_called_once_with() @raises(KiwiSetupIntermediateConfigError) @patch('kiwi.command.Command.run') @patch('kiwi.system.root_bind.RootBind.cleanup') @patch('os.path.exists') def test_intermediate_config_raises_error( self, mock_exists, mock_cleanup, mock_command ): mock_exists.return_value = True mock_command.side_effect = KiwiSetupIntermediateConfigError( 'config-error' ) self.bind_root.setup_intermediate_config() mock.cleanup.assert_called_once_with() @patch('kiwi.system.root_bind.os.path.exists') @patch('kiwi.system.root_bind.MountManager') def test_mount_kernel_file_systems(self, mock_mount, mock_exists): mock_exists.return_value = True shared_mount = mock.Mock() mock_mount.return_value = shared_mount self.bind_root.mount_kernel_file_systems() mock_mount.assert_called_once_with( device='/proc', mountpoint='root-dir/proc' ) shared_mount.bind_mount.assert_called_once_with() @patch('kiwi.system.root_bind.MountManager') @patch('kiwi.system.root_bind.Path.create') def test_mount_shared_directory(self, mock_path, mock_mount): shared_mount = mock.Mock() mock_mount.return_value = shared_mount self.bind_root.mount_shared_directory() mock_path.call_args_list = [ call('root-dir/var/cache/kiwi'), call('/var/cache/kiwi') ] mock_mount.assert_called_once_with( device='/var/cache/kiwi', mountpoint='root-dir/var/cache/kiwi' ) shared_mount.bind_mount.assert_called_once_with() @patch('kiwi.command.Command.run') @patch('os.path.exists') def test_intermediate_config(self, mock_exists, mock_command): mock_exists.return_value = True self.bind_root.setup_intermediate_config() assert mock_command.call_args_list == [ call([ 'cp', '/foo', 'root-dir/foo.kiwi' ]), call([ 'ln', '-s', '-f', 'foo.kiwi', 'root-dir/foo' ]) ] @patch('kiwi.system.root_bind.MountManager.is_mounted') @patch('kiwi.system.root_bind.Command.run') @patch('kiwi.system.root_bind.Path.remove_hierarchy') @patch('os.path.islink') @patch('os.path.exists') @patch('shutil.move') def test_cleanup( self, mock_move, mock_exists, mock_islink, mock_remove_hierarchy, mock_command, mock_is_mounted ): mock_is_mounted.return_value = False mock_exists.return_value = True mock_islink.return_value = True self.bind_root.cleanup() self.mount_manager.umount_lazy.assert_called_once_with() mock_remove_hierarchy.assert_called_once_with('root-dir/mountpoint') mock_command.assert_called_once_with( ['rm', '-f', 'root-dir/foo.kiwi', 'root-dir/foo'] ) mock_move.assert_called_once_with( 'root-dir/foo.rpmnew', 'root-dir/foo' ) @patch('os.path.islink') @patch('kiwi.logger.log.warning') @patch('kiwi.command.Command.run') @patch('kiwi.system.root_bind.Path.remove_hierarchy') def test_cleanup_continue_on_error( self, mock_remove_hierarchy, mock_command, mock_warn, mock_islink ): mock_islink.return_value = True mock_remove_hierarchy.side_effect = Exception mock_command.side_effect = Exception self.mount_manager.umount_lazy.side_effect = Exception self.bind_root.cleanup() assert mock_warn.call_args_list == [ call( 'Image root directory %s not cleanly umounted: %s', 'root-dir', '' ), call( 'Failed to remove directory %s: %s', '/mountpoint', '' ), call( 'Failed to remove intermediate config files: %s', '' ) ] @patch('kiwi.logger.log.warning') @patch('kiwi.command.Command.run') @patch('kiwi.system.root_bind.Path.remove_hierarchy') def test_cleanup_nothing_mounted( self, mock_remove_hierarchy, mock_command, mock_warn ): self.mount_manager.is_mounted.return_value = False self.mount_manager.mountpoint = '/mountpoint' self.bind_root.cleanup() mock_warn.assert_called_once_with( 'Path %s not a mountpoint', '/mountpoint' ) def test_move_to_root(self): assert self.bind_root.move_to_root( [self.bind_root.root_dir + '/argument'] ) == ['/argument']
gpl-3.0
-5,257,759,619,213,173,000
35.235955
76
0.62062
false
elbaschid/cc-django-app
{{cookiecutter.repo_name}}/conftest.py
1
2219
import os import sys from django.conf import settings location = lambda x: os.path.join( os.path.dirname(os.path.realpath(__file__)), x) sandbox = lambda x: location("sandbox/{}".format(x)) sys.path.insert(0, sandbox('.')) def pytest_configure(): if not settings.configured: settings.configure( DATABASES={ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } }, MEDIA_ROOT=sandbox('public/media'), MEDIA_URL='/media/', STATIC_URL='/static/', STATICFILES_DIRS=[sandbox('static')], STATIC_ROOT=sandbox('public/static'), STATICFILES_FINDERS=( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ), TEMPLATE_LOADERS=( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ), TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.request", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.contrib.messages.context_processors.messages", ), MIDDLEWARE_CLASSES=( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ), ROOT_URLCONF='sandbox.sandbox.urls', TEMPLATE_DIRS=[sandbox('templates')], LOGIN_REDIRECT_URL='/accounts/', APPEND_SLASH=True, SITE_ID=1, )
bsd-3-clause
-335,322,127,208,969,400
37.929825
74
0.561064
false
benjolitz/aerospike
aerospike/utils.py
1
1802
import sys from ctypes.util import find_library from .constants import (DEPENDENCY, NONBLOCKING, BLOCKING, AEROSPIKE_3, AEROSPIKE_2,) class SharedLibrary(object): def __init__(self, *names): self.names = names def __str__(self): return 'SharedLibrary({0})'.format(', '.join(self.names)) def find_library(self): for name in self.names: dependency = find_library(name) if dependency: return dependency return None LIBRARY_TYPES = { AEROSPIKE_3: [ DEPENDENCY(SharedLibrary('aerospike'), BLOCKING, [])], AEROSPIKE_2: [DEPENDENCY( SharedLibrary('ev2citrusleaf-2.0'), NONBLOCKING, [SharedLibrary('event', 'event-2.0')]), DEPENDENCY( SharedLibrary('citrusleaf-2.0'), BLOCKING, [])] } def detect_aerospike_libraries(aerospike_version=None): """ Locate Aerospike libraries using ctypes.util.find_library. The library that will be used is determined by the desired Aerospike capabilities (2 or 3) followed by the higher ranked type (i.e. if a NONBLOCKING is found, it will win over BLOCKING) This function does not depend upon anything and thus is safe to call from setup.py. """ aerospike_version = aerospike_version or (AEROSPIKE_2 | AEROSPIKE_3) if sys.platform in ('win32', 'cygwin'): raise NotImplementedError("Win32 not considered") for library_version, potential_libraries in LIBRARY_TYPES.items(): if library_version & aerospike_version: for lib in potential_libraries: if lib.shared_object.find_library() and \ all(name.find_library() for name in lib.dependencies): yield library_version, lib
bsd-2-clause
4,120,497,122,915,130,400
33
78
0.631521
false
AiolosLiu/ToolKit
Python/simpleTools/webserver.py
1
1916
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from urlparse import urlparse, parse_qs #import os import json DEFAULT_HOST = '127.0.0.1' DEFAULT_PORT = '8081' class RequestHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() return def do_POST(self): #if self.headers.has_key('Cookie'): #print "Request Cookies %s" % self.headers['Cookie'] #params=parse_qs(urlparse(self.path).query) #print "%s" % params #for (d,x) in params.items(): #print "%s:%s" % (d,x) length = int(self.headers.getheader('content-length')) field_data = self.rfile.read(length) print field_data params=json.loads(field_data) #params=parse_qs(field_data, keep_blank_values=int) #print length print "%s" % params #for (d,x) in params.items(): # print "%s:%s" % (d,x) self.send_response(200) self.send_header('Content-type','text/html') # cookie_send='SessionID=%0.17f' % random.random() # print "Respond Cookies %s" % cookie_send # self.send_header('Set-Cookie',cookie_send) self.end_headers() # # Send the message to browser # self.wfile.write("cookies test!") return def run_server(): try: server_address=(DEFAULT_HOST, DEFAULT_PORT) server= HTTPServer(server_address,RequestHandler) print "Custom HTTP server started on port: %s" % DEFAULT_PORT server.serve_forever() except Exception, err: print "Error:%s" %err except KeyboardInterrupt: print "Server interrupted and is shutting down..." server.socket.close() if __name__ == "__main__": run_server()
gpl-3.0
3,074,087,451,827,574,000
31.474576
73
0.581942
false
EdwardBetts/osm-wikidata
tests/test_overpass.py
1
3661
from matcher.overpass import oql_from_tag, oql_for_area, group_tags from pprint import pprint tags = ['admin_level', 'amenity=arts_centre', 'amenity=astronomical_observatory', 'amenity=bar', 'amenity=clock', 'amenity=college', 'amenity=community_centre', 'amenity=concert_hall', 'amenity=conference_centre', 'amenity=courthouse', 'amenity=grave_yard', 'amenity=hospital', 'amenity=library', 'amenity=marketplace', 'amenity=monastery', 'amenity=music_venue', 'site=school', 'site=station', 'site=university', 'sport', 'tourism', 'type=bridge', 'type=site', 'waterway=lock_gate'] def test_oql_from_tag(): ret = oql_from_tag('site', filters='area.a') assert ret == ['\n rel(area.a)[site][~"^(addr:housenumber|.*name.*)$"~".",i];'] def test_oql_for_area(): bbox = 'bbox:52.157942,0.068639,52.237230,0.184552' oql = oql_for_area('rel', 295355, ['amenity=library'], bbox, '') expect = ''' [timeout:600][out:xml][bbox:bbox:52.157942,0.068639,52.237230,0.184552]; area(3600295355) -> .a; ( node(area.a)["amenity"="library"]; way(area.a)["amenity"="library"]; rel(area.a)["amenity"="library"]; ) -> .b; ( rel(295355); node.b[~"^(addr:housenumber|.*name.*)$"~".",i]; way.b[~"^(addr:housenumber|.*name.*)$"~".",i]; rel.b[~"^(addr:housenumber|.*name.*)$"~".",i]; ); (._;>;); out;''' assert oql == expect oql = oql_for_area('rel', 295355, tags, bbox, '') expect = ''' [timeout:600][out:xml][bbox:bbox:52.157942,0.068639,52.237230,0.184552]; area(3600295355) -> .a; ( node(area.a)["admin_level"]; way(area.a)["admin_level"]; rel(area.a)["admin_level"]; node(area.a)["amenity"~"^(arts_centre|astronomical_observatory|bar|clock|college|community_centre|concert_hall|conference_centre|courthouse|grave_yard|hospital|library|marketplace|monastery|music_venue)$"]; way(area.a)["amenity"~"^(arts_centre|astronomical_observatory|bar|clock|college|community_centre|concert_hall|conference_centre|courthouse|grave_yard|hospital|library|marketplace|monastery|music_venue)$"]; rel(area.a)["amenity"~"^(arts_centre|astronomical_observatory|bar|clock|college|community_centre|concert_hall|conference_centre|courthouse|grave_yard|hospital|library|marketplace|monastery|music_venue)$"]; rel(area.a)["site"~"^(school|station|university)$"]; node(area.a)["sport"]; way(area.a)["sport"]; rel(area.a)["sport"]; node(area.a)["tourism"]; way(area.a)["tourism"]; rel(area.a)["tourism"]; rel(area.a)["type"~"^(bridge|site)$"]; node(area.a)["waterway"="lock_gate"]; way(area.a)["waterway"="lock_gate"]; rel(area.a)["waterway"="lock_gate"]; ) -> .b; ( rel(295355); node.b[~"^(addr:housenumber|.*name.*)$"~".",i]; way.b[~"^(addr:housenumber|.*name.*)$"~".",i]; rel.b[~"^(addr:housenumber|.*name.*)$"~".",i]; ); (._;>;); out;''' assert oql == expect def test_group_tags(): ret = group_tags(tags) expect = { 'admin_level': [], 'amenity': ['arts_centre', 'astronomical_observatory', 'bar', 'clock', 'college', 'community_centre', 'concert_hall', 'conference_centre', 'courthouse', 'grave_yard', 'hospital', 'library', 'marketplace', 'monastery', 'music_venue'], 'site': ['school', 'station', 'university'], 'sport': [], 'tourism': [], 'type': ['bridge', 'site'], 'waterway': ['lock_gate'] } assert ret == expect
gpl-3.0
4,955,505,936,118,938,000
34.201923
206
0.582901
false
Gustavo6046/GusBot-2
pluginbackup/listeners.py
1
1569
from plugincon import easy_bot_command, bot_command, get_message_target from Queue import Queue listener_targets = {} @easy_bot_command("addlistener", True) def add_listeners(message, raw): if raw: return if len(message["arguments"]) < 3: return "Syntax: addlistener <in channel> <out channel>" in_chan = message["arguments"][1] out_chan = message["arguments"][2] try: listener_targets[in_chan].append(out_chan) except KeyError: listener_targets[in_chan] = [out_chan] return ["Listener added succesfully!"] @easy_bot_command("removelistener", True) def add_listeners(message, raw): if raw: return if len(message["arguments"]) < 3: return "Syntax: removelistener <in channel> <out channel>" in_chan = message["arguments"][1] out_chan = message["arguments"][2] try: listener_targets[in_chan].remove(out_chan) except ValueError: return "No such listener out channel!" except KeyError: return "No such listener in channel!" if listener_targets[in_chan] == []: listener_targets.__delitem__(in_chan) return ["Listener removed succesfully!"] @bot_command("ListenerParse", False, True, True) def listener_parsing(message, connector, index, raw): def message_target(msg, target=None): if not target: target = get_message_target(connector, message, index) connector.send_message(index, target, msg) if raw: return try: for target in listener_targets[message["channel"]]: message_target("<{}> {}".format(message["nickname"], message["message"]), target) except KeyError: pass
mit
358,127,725,340,450,200
22.787879
84
0.695347
false
jsirois/pants
src/python/pants/backend/python/util_rules/pex_from_targets_test.py
1
4273
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from textwrap import dedent from typing import Optional import pytest from pants.backend.python.target_types import PythonLibrary, PythonRequirementLibrary from pants.backend.python.util_rules import pex_from_targets from pants.backend.python.util_rules.pex import PexRequest, PexRequirements from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest from pants.build_graph.address import Address from pants.engine.internals.scheduler import ExecutionError from pants.python.python_setup import ResolveAllConstraintsOption from pants.testutil.rule_runner import QueryRule, RuleRunner @pytest.fixture def rule_runner() -> RuleRunner: return RuleRunner( rules=[ *pex_from_targets.rules(), QueryRule(PexRequest, (PexFromTargetsRequest,)), ], target_types=[PythonLibrary, PythonRequirementLibrary], ) def test_constraints_validation(rule_runner: RuleRunner) -> None: rule_runner.add_to_build_file( "", dedent( """ python_requirement_library(name="foo", requirements=["foo-bar>=0.1.2"]) python_requirement_library(name="bar", requirements=["bar==5.5.5"]) python_requirement_library(name="baz", requirements=["baz"]) python_library(name="util", sources=[], dependencies=[":foo", ":bar"]) python_library(name="app", sources=[], dependencies=[":util", ":baz"]) """ ), ) rule_runner.create_file( "constraints1.txt", dedent( """ # Comment. --find-links=https://duckduckgo.com Foo._-BAR==1.0.0 # Inline comment. bar==5.5.5 baz==2.2.2 qux==3.4.5 """ ), ) rule_runner.create_file( "constraints2.txt", dedent( """ foo==1.0.0 bar==5.5.5 qux==3.4.5 """ ), ) def get_pex_request( constraints_file: Optional[str], resolve_all: Optional[ResolveAllConstraintsOption], *, direct_deps_only: bool = False, ) -> PexRequest: args = ["--backend-packages=pants.backend.python"] request = PexFromTargetsRequest( [Address("", target_name="app")], output_filename="demo.pex", internal_only=True, direct_deps_only=direct_deps_only, ) if resolve_all: args.append(f"--python-setup-resolve-all-constraints={resolve_all.value}") if constraints_file: args.append(f"--python-setup-requirement-constraints={constraints_file}") rule_runner.set_options(args) return rule_runner.request(PexRequest, [request]) pex_req1 = get_pex_request("constraints1.txt", ResolveAllConstraintsOption.NEVER) assert pex_req1.requirements == PexRequirements(["foo-bar>=0.1.2", "bar==5.5.5", "baz"]) pex_req1_direct = get_pex_request( "constraints1.txt", ResolveAllConstraintsOption.NEVER, direct_deps_only=True ) assert pex_req1_direct.requirements == PexRequirements(["baz"]) pex_req2 = get_pex_request("constraints1.txt", ResolveAllConstraintsOption.ALWAYS) assert pex_req2.requirements == PexRequirements( ["Foo._-BAR==1.0.0", "bar==5.5.5", "baz==2.2.2", "qux==3.4.5"] ) pex_req2_direct = get_pex_request( "constraints1.txt", ResolveAllConstraintsOption.ALWAYS, direct_deps_only=True ) assert pex_req2_direct.requirements == PexRequirements( ["Foo._-BAR==1.0.0", "bar==5.5.5", "baz==2.2.2", "qux==3.4.5"] ) with pytest.raises(ExecutionError) as err: get_pex_request(None, ResolveAllConstraintsOption.ALWAYS) assert len(err.value.wrapped_exceptions) == 1 assert isinstance(err.value.wrapped_exceptions[0], ValueError) assert ( "[python-setup].resolve_all_constraints is set to always, so " "[python-setup].requirement_constraints must also be provided." ) in str(err.value) # Shouldn't error, as we don't explicitly set --resolve-all-constraints. get_pex_request(None, None)
apache-2.0
-3,758,804,864,064,442,400
35.521368
92
0.631407
false
Merzavcev/py-helpers
reduce01/reduce01.py
1
1480
# -*- coding: utf-8 -*- import csv, sys from datetime import datetime def main(input_file, output_file): csvfile = open(input_file, 'r') reader = csv.DictReader(csvfile, delimiter=';', fieldnames=['date', 'uid']) counter = 0 uids = {} dt_tpl = '%Y-%m-%d' for line in reader: uid = int(line['uid']) uids.setdefault(uid, []) uids[uid].append(line['date']) # if counter > 3000: # break counter += 1 summary = [(datetime.strptime(dates[-1], dt_tpl) - datetime.strptime(dates[0], dt_tpl)).days for dates in uids.values()] uniq_days = list(set(summary)) count_of_uids = [summary.count(i) for i in uniq_days] # print uids # print summary # print zip(uniq_days, count_of_uids) acc_count_of_uids = count_of_uids[::-1] sum = 0 for index, val in enumerate(count_of_uids): acc_count_of_uids[index] += sum sum = acc_count_of_uids[index] # print zip(uniq_days, acc_count_of_uids[::-1]) with open(output_file, 'w') as output: writer = csv.writer(output, delimiter=',') writer.writerows(zip(uniq_days, acc_count_of_uids[::-1])) print 'input file: ' + input_file print 'output file: ' + output_file if __name__ == '__main__': if len(sys.argv) == 3: input_file, output_file = sys.argv[1:] else: input_file = 'cookies_sorted.csv' output_file = 'cookies_lifetime.csv' main(input_file, output_file)
mit
24,609,536,738,603,290
30.510638
124
0.585135
false
showmen15/testEEE
src/amberdriver/collision_avoidance/collision_avoidance.py
1
10844
import logging import logging.config import threading import time import math from ambercommon.common import runtime import os from amberdriver.tools import config import collision_avoidance_logic as logic __author__ = 'paoolo' pwd = os.path.dirname(os.path.abspath(__file__)) logging.config.fileConfig('%s/collision_avoidance.ini' % pwd) config.add_config_ini('%s/collision_avoidance.ini' % pwd) LOGGER_NAME = 'CollisionAvoidance' ROBO_WIDTH = float(config.ROBO_WIDTH) MAX_SPEED = float(config.MAX_SPEED) MAX_ROTATING_SPEED = float(config.MAX_ROTATING_SPEED) SOFT_LIMIT = float(config.SOFT_LIMIT) HARD_LIMIT = float(config.HARD_LIMIT) SCANNER_DIST_OFFSET = float(config.SCANNER_DIST_OFFSET) ANGLE_RANGE = float(config.ANGLE_RANGE) DISTANCE_ALPHA = float(config.DISTANCE_ALPHA) RODEO_SWAP_ALPHA = float(config.RODEO_SWAP_ALPHA) def bound_sleep_interval(value, min_value=0.2, max_value=2.0): return value if min_value < value < max_value else max_value if value > max_value else min_value class CollisionAvoidance(object): def __init__(self, roboclaw_proxy, hokuyo_proxy): self.__roboclaw_proxy = roboclaw_proxy self.__hokuyo_proxy = hokuyo_proxy self.__scan = [] self.__scan_timestamp = 0.0 self.__scanning_lock = threading.Condition() self.__driving_speed = (0, 0, 0, 0) self.__driving_speed_timestamp = 0.0 self.__driving_lock = threading.Condition() self.__is_active = True self.__wait_for_data_lock = threading.Condition() self.__logger = logging.getLogger(LOGGER_NAME) runtime.add_shutdown_hook(self.terminate) def set_speed(self, front_left, front_right, rear_left, rear_right): try: self.__driving_lock.acquire() self.__driving_speed = front_left, front_right, rear_left, rear_right self.__driving_speed_timestamp = time.time() self.__notify() finally: self.__driving_lock.release() def stop(self): self.set_speed(0, 0, 0, 0) def get_scan(self): try: self.__scanning_lock.acquire() return self.__scan finally: self.__scanning_lock.release() def scanning_loop(self): sleep_interval = 0.2 last_scan_timestamp = 0.0 while self.__is_active: scan = self.__hokuyo_proxy.get_single_scan() scan.wait_available(sleep_interval * 1.1) if scan.is_available(): try: self.__scanning_lock.acquire() self.__scan = scan.get_points() self.__scan_timestamp = scan.get_timestamp() current_scan_timestamp = scan.get_timestamp() self.__notify() finally: self.__scanning_lock.release() scan_interval = current_scan_timestamp - last_scan_timestamp last_scan_timestamp = current_scan_timestamp if scan_interval < 2.0: sleep_interval += 0.5 * (scan_interval - sleep_interval) sleep_interval = bound_sleep_interval(sleep_interval) time.sleep(sleep_interval) def driving_loop(self): wait_timeout = 0.2 last_scan_timestamp = 0.0 last_command_timestamp = 0.0 last_left, last_right = 0.0, 0.0 while self.__is_active: self.__wait(wait_timeout * 1.1) try: self.__driving_lock.acquire() front_left, front_right, rear_left, rear_right = self.__driving_speed current_command_timestamp = self.__driving_speed_timestamp finally: self.__driving_lock.release() try: self.__scanning_lock.acquire() scan = self.__scan current_scan_timestamp = self.__scan_timestamp finally: self.__scanning_lock.release() if current_scan_timestamp > last_scan_timestamp or current_command_timestamp > last_command_timestamp: left = sum([front_left, rear_left]) / 2.0 right = sum([front_right, rear_right]) / 2.0 left, right = CollisionAvoidance.rodeo_swap(left, right, scan) left, right = CollisionAvoidance.limit_due_to_reverse_direction(left, right) left, right = CollisionAvoidance.limit_due_to_distance(left, right, scan) left, right = CollisionAvoidance.low_pass_filter(left, right) left, right = CollisionAvoidance.limit_to_max_speed(left, right) else: left, right = last_left, last_right current_timestamp = time.time() trust_level = CollisionAvoidance.scan_trust(current_scan_timestamp, current_timestamp) * \ CollisionAvoidance.command_trust(current_command_timestamp, current_timestamp) left *= trust_level right *= trust_level left, right = int(left), int(right) self.__roboclaw_proxy.send_motors_command(left, right, left, right) last_left, last_right = left, right command_interval = current_command_timestamp - last_command_timestamp last_command_timestamp = current_command_timestamp scan_interval = current_scan_timestamp - last_scan_timestamp last_scan_timestamp = current_command_timestamp min_interval = min(command_interval, scan_interval) if min_interval < 2.0: wait_timeout += 0.5 * (min_interval - wait_timeout) wait_timeout = bound_sleep_interval(wait_timeout) def terminate(self): self.stop() self.__is_active = False @staticmethod def limit_due_to_distance(left, right, scan): if left > 0 or right > 0: current_angle = logic.get_angle(left, right, ROBO_WIDTH) current_speed = logic.get_speed(left, right) if scan is not None: min_distance, _ = logic.get_min_distance(scan, current_angle, SCANNER_DIST_OFFSET, ANGLE_RANGE) if min_distance is not None: soft_limit = logic.get_soft_limit(current_speed, MAX_SPEED, SOFT_LIMIT * 1.3, HARD_LIMIT * 1.3, DISTANCE_ALPHA) if HARD_LIMIT * 1.3 < min_distance < soft_limit: max_speed = logic.get_max_speed(min_distance, soft_limit, HARD_LIMIT * 1.3, MAX_SPEED) if current_speed > max_speed: left, right = CollisionAvoidance.__calculate_new_left_right(left, right, max_speed, current_speed) elif min_distance <= HARD_LIMIT * 1.3: left, right = 0, 0 else: print 'distance: no scan!' left, right = 0.0, 0.0 return left, right @staticmethod def __calculate_new_left_right(left, right, max_speed, current_speed): if current_speed > 0: divide = max_speed / current_speed return left * divide, right * divide else: return left, right @staticmethod def limit_to_max_speed(left, right): left = CollisionAvoidance.__limit_to_max_speed(left) right = CollisionAvoidance.__limit_to_max_speed(right) return left, right @staticmethod def __limit_to_max_speed(value): max_speed = MAX_SPEED return max_speed if value > max_speed \ else -max_speed if value < -max_speed \ else value @staticmethod def limit_due_to_reverse_direction(left, right): max_speed = MAX_SPEED if (left + right) / 2.0 < 0: if left < 0 and right < 0: left = left if left > -max_speed else -max_speed right = right if right > -max_speed else -max_speed elif left < 0 < right: right = right if right < max_speed else max_speed left = -right elif left > 0 > right: left = left if left < max_speed else max_speed right = -left return left, right @staticmethod def rodeo_swap(left, right, scan): current_angle = logic.get_angle(left, right, ROBO_WIDTH) current_speed = logic.get_speed(left, right) min_distance, min_distance_angle = logic.get_min_distance(scan, current_angle, SCANNER_DIST_OFFSET, ANGLE_RANGE) if min_distance is not None: soft_limit = logic.get_soft_limit(current_speed, MAX_SPEED, SOFT_LIMIT, HARD_LIMIT, RODEO_SWAP_ALPHA) if min_distance < soft_limit: if min_distance_angle < current_angle: if left > 0: left = left if left < MAX_ROTATING_SPEED else MAX_ROTATING_SPEED right = -left else: if right > 0: _t = left left = right right = _t else: if right > 0: right = right if right < MAX_ROTATING_SPEED else MAX_ROTATING_SPEED left = -right else: if left > 0: _t = right right = left left = _t elif min_distance < soft_limit * 0.4: left = -left right = -right return left, right @staticmethod def low_pass_filter(left, right): # TODO implement low pass filter return left, right @staticmethod def scan_trust(scan_timestamp, current_timestamp): val = scan_timestamp / 1000.0 - current_timestamp return math.pow(4.0 / 3.0, val) @staticmethod def command_trust(command_timestamp, current_timestamp): val = command_timestamp - current_timestamp return math.pow(4.0 / 3.0, val) def __notify(self): self.__wait_for_data_lock.acquire() try: self.__wait_for_data_lock.notify_all() finally: self.__wait_for_data_lock.release() def __wait(self, wait_timeout): self.__wait_for_data_lock.acquire() try: self.__wait_for_data_lock.wait(wait_timeout) finally: self.__wait_for_data_lock.release()
mit
2,363,641,249,624,883,000
34.557377
114
0.546293
false
jblupus/PyLoyaltyProject
db/cassandra_db/result_handler.py
1
1147
from cassandra.cluster import Event class PagedResultHandler: def __init__(self, future): self.page_rows = [] self.error = None self.finished_event = Event() self.future = future self.future.add_callbacks( callback=self.handle_page, errback=self.handle_error) def handle_page(self, rows): self.process_rows(rows) if self.future.has_more_pages: self.future.start_fetching_next_page() else: self.finished_event.set() def handle_error(self, exc): self.error = exc self.finished_event.set() def process_rows(self, rows): self.page_rows.extend(rows) class SeedHandler(PagedResultHandler): def __init__(self, future, obj): PagedResultHandler.__init__(self, future) self.obj = obj def process_rows(self, rows): self.obj.handle_seeds(rows) class FriendsHandler(PagedResultHandler): def __init__(self, future, ft): PagedResultHandler.__init__(self, future) self.ft = ft def process_rows(self, rows): self.ft.handle_friends(rows)
bsd-2-clause
7,231,973,324,801,520,000
25.068182
50
0.607672
false
phearbot/python-email
emailsender.py
1
6197
#!/usr/bin/env python import os import smtplib import mimetypes import argparse from email.mime.multipart import MIMEMultipart from email import encoders from email.message import Message from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.text import MIMEText # Argument Parser parser = argparse.ArgumentParser(description='Process inputs', formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=55)) parser.add_argument("-s", "--sender", metavar="<sender>", type=str, default="[email protected]", help="def: [email protected]") parser.add_argument("-r", "--recipient", metavar="<recipient>", type=str, required=True) parser.add_argument("-m", "--mta", metavar="<ip address>", type=str, required=True, help="IP address of next-hop MTA") parser.add_argument("-p", "--port", metavar="<port>", type=str, help="Port email will send on (def: 25)", default="25") parser.add_argument("-a", "--attach", metavar="<attachment>", type=str, nargs='+', help="Full or relative path to attachment") parser.add_argument("-S", "--subject", metavar="<subject>", type=str, help="Subject of the email", default="email sent by python script") # Mutually exclusive group for body types (you can use a string or a file, not both) body_group = parser.add_mutually_exclusive_group() body_group.add_argument("-b", "--body", metavar="<body>", type=str, help="String for the body of the email") # body_group.add_argument("-B", "--body", metavar="<body>", type=str, help="Full or relative path to email body file") parser.add_argument("-H", action="store_true", help="Adds an HTML body in addition to the plain text body") parser.add_argument("-t", action="store_true", help="Enable TLS") parser.add_argument("-q", action="store_true", help="Attempts to get a queue id, but may have unexpected results") parser.add_argument("-v", action="store_true", help="Verbose mode") args = parser.parse_args() # Creates key/value pair to return qids and filenames qids = {} def main(): # Build the SMTP Connection server = buildsmtp() # Iterate through, building and sending messages for each attachment provided for a in args.attach: msg = buildmsg(a) qid = sendmsg(server, msg) qids[qid] = a # Close SMTP connection prquit = server.docmd("QUIT") if (args.v): print prquit # Debugging #for x in qids: # print x, qids[x] return qids def buildsmtp(): # Create the SMTP object (server format "ip:port") Note: This actually checks to see if the port is open try: server = smtplib.SMTP(args.mta + ":" + args.port) except: print "Error 001: Unable to connect to " + args.mta + " on port " + args.port exit() # If selected, attempts to negotiate TLS (also, prhelo = print helo) if args.t: prhelo = server.ehlo() try: server.starttls() server.ehlo() if args.v: print "TlS started successfully." except: print "TLS was not accepted by " + args.mta + ". \nAttempting to send unencrypted." # If no TLS flag, initiates the connection else: try: prhelo = server.docmd("helo", "labs.test") except: print "Error 002: Sending email failed, could be a bad address?" if args.v: print "Attempting to send the email to " + args.mta + ":" + args.port if args.v: print prhelo # NOT YET IMPLEMENTED # This can be used for server auth (like gmail), but it's disabled. You will need to add the 'server.login(username,password)' line in somewhere # username = "user" # password = "password" # server.login(username,password) return server def buildmsg(a): # Create the message and add sender, recipient and subject (This will be used if you aren't using the -q flag) msg = MIMEMultipart() msg["From"] = args.sender msg["To"] = args.recipient msg["Subject"] = args.subject msg.preamble = args.subject # Create the alternative for the text/plain and text/html. This object is attached inside the multipart message alt_msg = MIMEMultipart('alternative') # Verbose logging to display to/from/subj if args.v: print "\n### Verbose Output Enabled ###\n" print "From: " + args.sender print "To: " + args.recipient print "Subject: " + args.subject if a: print "Attachment: " + os.path.basename(a) + "\n" # Attaches text/plain. Also attaches HTML if it is selected # https://docs.python.org/3/library/email-examples.html (RFC 2046) alt_msg.attach(MIMEText(args.body, "plain")) if args.H: alt_msg.attach(MIMEText(args.body, "html")) msg.attach(alt_msg) # Checks for an attachment argument, and if there is one identify it's type. # Borrowed from https://docs.python.org/2.4/lib/node597.html if a is not None: ctype, encoding = mimetypes.guess_type(a) if ctype is None or encoding is not None: ctype = "application/octet-stream" maintype, subtype = ctype.split("/", 1) if maintype == "text": fp = open(a) # Note: we should handle calculating the charset attachment = MIMEText(fp.read(), _subtype=subtype) fp.close() elif maintype == "image": fp = open(a, "rb") attachment = MIMEImage(fp.read(), _subtype=subtype) fp.close() elif maintype == "audio": fp = open(a, "rb") attachment = MIMEAudio(fp.read(), _subtype=subtype) fp.close() else: fp = open(a, "rb") attachment = MIMEBase(maintype, subtype) attachment.set_payload(fp.read()) fp.close() encoders.encode_base64(attachment) attachment.add_header("Content-Disposition", "attachment", filename=os.path.basename(a)) msg.attach(attachment) # This line will literally print the entire email including headers # print "\n\n\n" + msg.as_string() + "\n\n\n" return msg def sendmsg(server, msg): # Sends the email DATA prfrom = server.docmd("MAIL from:", args.sender) prto = server.docmd("RCPT to:", args.recipient) prdata = server.docmd("DATA") qidline = server.docmd(msg.as_string() + "\r\n.") # Prints what happened above when attempting to send if args.v: print prfrom print prto print prdata print qidline qid = qidline[1].split(" ")[4] if args.q: print qid return qid if __name__== "__main__": main()
apache-2.0
-446,742,571,279,302,700
32.139037
145
0.689043
false
MKLab-ITI/reveal-graph-embedding
reveal_graph_embedding/experiments/demo.py
1
3853
__author__ = 'Georgios Rizos ([email protected])' import numpy as np from reveal_graph_embedding.common import get_threads_number from reveal_graph_embedding.experiments.utility import run_experiment ######################################################################################################################## # Configure experiments by setting values for the capital letter variables and also for the parameters. ######################################################################################################################## DATASET_NAME = "youtube" # snow2014, flickr, youtube, politicsuk DATASET_FOLDER = "/path/to/dataset/folder" FEATURE_EXTRACTION_METHOD_NAME = "arcte" # acte, lapeig, repeig, louvain, mroc, basecomm def get_feature_extraction_parameters(feature_extraction_method_name): feature_extraction_parameters = dict() if feature_extraction_method_name == "arcte": feature_extraction_parameters["epsilon"] = 0.00001 feature_extraction_parameters["rho"] = 0.1 feature_extraction_parameters["community_weighting"] = "chi2" # chi2, ivf, None elif feature_extraction_method_name == "mroc": feature_extraction_parameters["community_weighting"] = "chi2" # chi2, ivf, None feature_extraction_parameters["alpha"] = 1000 elif feature_extraction_method_name == "louvain": feature_extraction_parameters["community_weighting"] = "chi2" # chi2, ivf, None elif feature_extraction_method_name == "basecomm": feature_extraction_parameters["community_weighting"] = "chi2" # chi2, ivf, None elif feature_extraction_method_name == "lapeig": feature_extraction_parameters["dimensionality"] = 50 elif feature_extraction_method_name == "repeig": feature_extraction_parameters["dimensionality"] = 50 else: print("Invalid method name.") raise RuntimeError return feature_extraction_parameters def get_classifier_parameters(feature_extraction_method_name): classifier_parameters = dict() if feature_extraction_method_name == "arcte": classifier_parameters["C"] = 1.0 classifier_parameters["fit_intercept"] = True elif feature_extraction_method_name == "mroc": classifier_parameters["C"] = 1.0 classifier_parameters["fit_intercept"] = True elif feature_extraction_method_name == "louvain": classifier_parameters["C"] = 1.0 classifier_parameters["fit_intercept"] = True elif feature_extraction_method_name == "basecomm": classifier_parameters["C"] = 1.0 classifier_parameters["fit_intercept"] = True elif feature_extraction_method_name == "lapeig": classifier_parameters["C"] = 50.0 classifier_parameters["fit_intercept"] = False elif feature_extraction_method_name == "repeig": classifier_parameters["C"] = 50.0 classifier_parameters["fit_intercept"] = False else: print("Invalid method name.") raise RuntimeError return classifier_parameters PERCENTAGES = np.arange(1, 11) # [1, 10] TRIAL_NUM = 10 THREAD_NUM = get_threads_number() ######################################################################################################################## # Experiment execution. ######################################################################################################################## feature_extraction_parameters = get_feature_extraction_parameters(FEATURE_EXTRACTION_METHOD_NAME) classifier_parameters = get_classifier_parameters(FEATURE_EXTRACTION_METHOD_NAME) run_experiment(DATASET_NAME, DATASET_FOLDER, FEATURE_EXTRACTION_METHOD_NAME, PERCENTAGES, TRIAL_NUM, THREAD_NUM, feature_extraction_parameters, classifier_parameters)
apache-2.0
-3,191,426,867,047,894,000
44.329412
120
0.600571
false
jeffkistler/django-require-media
src/require_media/templatetags/require_media_tags.py
1
6448
from django import template from require_media.conf import settings from require_media.renderers import get_renderer from require_media.utils import determine_requirement_group register = template.Library() def get_manager(context): return context.get(settings.CONTEXT_VAR_NAME, None) # # require_inline # class RequireInlineNode(template.Node): """ Registers an inline media requirement with the requirement manager. """ def __init__(self, requirement, nodelist, group=None, depends=None): self.requirement = requirement self.nodelist = nodelist self.group = group self.depends = depends or [] def render(self, context): manager = get_manager(context) if manager is not None: manager.add_inline(self.requirement, self.nodelist, self.group, self.depends) return u"" def compile_require_inline_node(parser, token): """ Register a required inline asset, such as CSS or JavaScript. There are two required arguments: ``name``, which is a name for the block, and ``group``, which is a type identifier for the block. All further arguments specify requirements the one being defined depends upon. A simple example:: {% require_inline sidebar css %} #sidebar { color: #ff0; } {% end_require_inline %} """ parts = token.split_contents() nodelist = parser.parse(('end_require_inline',)) parser.delete_first_token() if not len(parts) >= 3: raise template.TemplateSyntaxError("%s tag requires two arguments: name and group" % parts[0]) tag_name, requirement, group, depends_on = parts[0], parts[1], parts[2], parts[3:] requirement_aliases = settings.REQUIREMENT_ALIASES or {} group_aliases = settings.REQUIREMENT_GROUP_ALIASES or {} requirement = requirement_aliases.get(requirement) or requirement group = group_aliases.get(group) or group depends_on = [requirement_aliases.get(dependency, dependency) for dependency in depends_on] return RequireInlineNode(requirement, nodelist, group, depends_on) register.tag("require_inline", compile_require_inline_node) # # require # class RequireNode(template.Node): """ Registers an external media requirement with the requirement manager. """ def __init__(self, requirement, group=None, depends_on=None): self.requirement = requirement self.group = group self.depends_on = depends_on or [] def render(self, context): manager = get_manager(context) if manager is not None: manager.add_external(self.requirement, self.group, self.depends_on) return u"" def compile_require_node(parser, token): """ Registers an external requirement with the current request context. The tag is invoked with the following signature:: {% require [<group>] requirement [<depends> ...] %} The optional ``group`` argument specifies a requirement group, such as "css" or "js", the ``requirement`` argument specifies the desired requirement, and all further arguments are interpreted as requirements of the one currently being specified. A simple example that causes ``jquery.js`` to be added to the list of ``js`` requirements for the current request:: {% require js jquery.js %} This more complicated example would add ``jquery-ui.js`` to the list of ``js`` requirements after ``jquery.js``:: {% require js jquery-ui.js jquery.js %} """ parts = token.split_contents() if not len(parts) >= 2: raise template.TemplateSyntaxError("%s tag requires one or more arguments" % parts[0]) tag_name, args = parts[0], parts[1:] group_aliases = settings.REQUIREMENT_GROUP_ALIASES or {} requirement_aliases = settings.REQUIREMENT_ALIASES or {} potential_group = group_aliases.get(args[0]) or args[0] if potential_group and potential_group in settings.GROUPS: if not len(args) >= 2: raise template.TemplateSyntaxError("%s tag requires a requirement to be specified" % parts[0]) group = potential_group args = args[1:] else: group = None requirement = requirement_aliases.get(args[0]) or args[0] depends_on = args[1:] depends_on = [requirement_aliases.get(dependency, dependency) for dependency in depends_on] if group is None: group = determine_requirement_group(requirement, settings.GROUPS) return RequireNode(requirement, group, depends_on) register.tag("require", compile_require_node) # # render_requirements # # nodelist.render coerces node render output # nodelist.render is called by: # blocknode # autoescapecontrolnode # filternode # fornode # spacelessnode # withnode class DelayedRequirementsRenderer(object): """ Delays requirement lookup until unicode coercion. """ def __init__(self, manager, groups, context): self.manager = manager self.groups = groups self.context = context def __unicode__(self): parts = [] requirements = self.manager.get_sorted_requirements_for_groups(self.groups) for requirement in requirements: renderer = get_renderer(requirement.group) if renderer: parts.append(renderer.render(requirement, self.context)) return u"".join(parts) class RenderRequirementsNode(template.Node): """ Renders registered requirements for a given group. NOTE: Should only be used in a base template to capture all declared requirements. """ def __init__(self, groups=None): self.groups = groups self.request_var = template.Variable("request") def render(self, context): manager = get_manager(context) if manager: return DelayedRequirementsRenderer(manager, self.groups, context) return u"" def compile_render_requirements_node(parser, token): """ Renders required media assets for the current request context. Accepts an optional positional argument to specify the asset group to render. The following example renders all CSS assets:: {% render_requirements css %} """ args = token.split_contents() groups = args[1:] or settings.GROUPS return RenderRequirementsNode(groups) register.tag("render_requirements", compile_render_requirements_node)
bsd-3-clause
617,413,400,390,660,600
30.763547
106
0.673697
false
sjsj0101/backtestengine
backtest/strategys/boll/boll-stoploss.py
1
8359
# encoding: utf-8 """ @version: python3.6 @author: ‘sj‘ @contact: [email protected] @file: boll-stoploss.py @time: 11/6/17 1:59 PM """ from backtest.core.backteststrategy import * from backtest.optimizer.optimizer import * from backtest.tools.ta import * from datetime import datetime as dtL class BollStrategyReverse(BacktestStrategy): def initialize(self): self.context.universe = ['SR801'] self.context.run_info.strategy_name = 'bollReverse15m-sr_main' self.context.run_info.feed_frequency = '15m' self.context.run_info.start_date = '2017-09-01' self.context.run_info.end_date = '2017-09-28' self.context.run_info.ip = localip self.context.run_info.main_contract = True self.context.init_cash = 1000000 self.context.boll = Boll() self.context.cash = 1000000 # 初始资金 self.context.cash_rate = 0.8 # 资金利用率 # self.context.future_info = ts.get_future_info(self.context.universe[0]) # 获取合约属性 self.context.slippage = 0 # 开仓价 变化幅度 2 个变动单位 self.context.direction = '' self.context.open_vol = 0 # 当前开仓手数 self.context.open_flag = False # false表示没有开仓 true表示已经开仓了 self.context.can_open_flag = True # true 表示能继续开仓 false 表示已经开足仓了 self.context.close_count = 0 # 平仓计数器 self.context.open_price = 0 self.context.max_dev = 0 # def order_change(self,order): # print('时间:%s 报单变化 %s' % (datetime.now(), order)) # if order['status'] == 'AllTraded': # if order['offset'] == '0': # 开 # self.context.open_vol += order['vol'] # elif order['offset'] == '3': # 平今 # self.context.open_vol -= order['vol'] # # print('时间:%s 当前开仓手数: %d ' % (datetime.now(), self.context.open_vol)) # # # 成交量> 0 开仓, 成交量 =0 未开仓 # if self.context.open_vol > 0: # self.context.open_flag = True # else: # self.context.open_flag = False # self.context.direction = '' def order_change(self,order): print('update unit %s:' % datetime.datetime.now(),5) # print(self.context.open_vol) # print(order['vol']) # self.context.last_price = data['limit_price'] if order['offset'] == OPEN: # 开 self.context.open_vol += order['vol'] elif order['offset'] == CLOSE: # 平今 self.context.open_vol -= order['vol'] print(self.context.open_vol) if self.context.open_vol > 0: self.context.open_flag = True else: self.context.open_flag = False self.context.direction = '' print(self.context.open_flag) def handle_data(self, data): boll = self.context.boll.compute(data) if boll is not None: print('date%d,time:%s, barclose:%d,up:%d,dn:.%d,mb:%d'%(self.context.date,self.context.current_bar.end_time,data.close,boll.up,boll.dn,boll.mb)) print(self.context.open_price, self.context.instmt_info['tick_size']) print(data.close, self.context.open_price) print(self.context.open_flag, self.context.can_open_flag, self.context.direction) if not self.context.open_flag: if data.close > boll.up and self.context.direction == '': # print('时间:%s 突破上轨' % datetime.now()) self.context.direction = BUY print('change to buy') elif data.close < boll.dn and self.context.direction == '': # print('时间:%s 突破下轨' % datetime.now()) self.context.direction = SELL print('change to sell') if self.context.open_flag and self.context.direction == BUY: cur_dev = self.context.open_price - data.low max_dev = self.context.max_dev self.context.max_dev = max(cur_dev, max_dev) if self.context.open_flag and self.context.direction == SELL: cur_dev = data.high - self.context.open_price max_dev = self.context.max_dev self.context.max_dev = max(cur_dev, max_dev) # 突破上轨后跌破中轨多单 if data.close < boll.mb and self.context.direction == BUY and self.context.can_open_flag: self._open(data) # 突破下轨后涨破中轨开空单 if data.close > boll.mb and self.context.direction == SELL and self.context.can_open_flag: self._open(data) # 开过多单后 收盘价超过中轨 平仓计数器 + 1 超过3次平仓 if data.close > boll.mb and self.context.direction == BUY and self.context.open_flag: self._close(data) # 开过空单后 收盘价跌破中轨 平仓计数器 + 1 超过3次平仓 if data.close < boll.mb and self.context.direction == SELL and self.context.open_flag: self._close(data) # if data.close < (self.context.open_price - 15 * self.context.instmt_info['tick_size']) and self.context.direction == BUY and self.context.open_flag: # print('多单止损', self.context.open_price, data.close) # self.context.close_count = 3 # self._close(data,type='stoploss') # # if data.close > (self.context.open_price + 15 * self.context.instmt_info['tick_size']) and self.context.direction == SELL and self.context.open_flag: # print('空单止损', self.context.open_price, data.close) # self.context.close_count = 3 # self._close(data,type='stoploss') def _open(self, bar): # 开空 open_price = bar.close - self.context.slippage if self.context.direction == BUY: # 开多 open_price = bar.close + self.context.slippage self.context.open_price = open_price # 计算当前bar的close价下最多能开多少手 # 开仓手数 = (总资金 * 资金利用率)/(开仓价 * 保证金比例 * 每手吨数) open_vol = int((self.context.portfolio.avail_cash * self.context.cash_rate) / (open_price * self.context.instmt_info['broker_margin'] * 0.01 * self.context.instmt_info['contract_size'])) # 部分成交情况下 满足布林开仓条件继续开仓 available_open_vol = open_vol - self.context.open_vol if available_open_vol > 0: # print('时间:%s 开:%s 手' % (datetime.now(), available_open_vol)) print('时间:%d %s 开:%s手' % (self.context.date, self.context.current_bar.end_time,available_open_vol)) self.order(self.context.current_contract[0], self.context.direction, OPEN, available_open_vol, limit_price=open_price) self.context.can_open_flag = False # else: # # 开足仓位 # self.context.can_open_flag = False def _close(self, bar, type=''): self.context.close_count += 1 if self.context.close_count >= 3: # 平空 open_price = bar.close + self.context.slippage direction = BUY if self.context.direction == BUY: # 平多 open_price = bar.close - self.context.slippage direction = SELL if self.context.open_vol > 0: # print('时间:%s 平今:%s 手' % (datetime.now(), self.context.open_vol)) print('时间:%d %s 平:%s 手' % (self.context.date, self.context.current_bar.end_time, self.context.open_vol)) self.order(self.context.current_contract[0], direction, CLOSE, self.context.open_vol, limit_price=open_price) self.context.can_open_flag = True self.context.open_flag = False self.context.close_count = 0 # else: # 平光了又能开仓了 # self.context.can_open_flag = True # self.context.close_count = 0 if __name__ == '__main__': t = BollStrategyReverse() t.run()
apache-2.0
-493,712,290,891,596,860
40.913978
194
0.569981
false
jasonfleming/asgs
output/unstreamline.py
1
4812
#!/usr/bin/env python import vtk import sys from optparse import OptionParser from vtk.util.colors import * frame = 1 interact = 0 # get command line argument for the frame number if (len(sys.argv) > 1): frame = sys.argv[1] if (len(sys.argv) > 2): if (sys.argv[2] == "-i"): interact = 1 # Create a structured grid with these points meshReader = vtk.vtkUnstructuredGridReader() #meshReader = vtk.vtkXMLUnstructuredGridReader() #meshReader = vtk.vtkPVDReader() input_file = 'spatial_data_%03d.d' % int(frame) #input_file = 'v6brivers.14_fort.74_%03d.vtu' % int(frame) #input_file = 'v6brivers.14_fort.74.pvd' #input_file = 'femar3_irene30_maxwvel.vtu' meshReader.SetFileName(input_file) meshReader.SetScalarsName("WindSpeed") meshReader.SetVectorsName("WindVelocity") #meshReader.SetPointArrayStatus("WindVelocity",1) ##meshReader.SetPointArrayStatus("MaximumWindSpeed",1) meshReader.Update() # create actor for unstructured grid outline #outlineMesh = vtk.vtkOutlineFilter() meshGeometryFilter = vtk.vtkGeometryFilter() meshGeometryFilter.SetInput(meshReader.GetOutput()) outlineMesh = vtk.vtkFeatureEdges() outlineMesh.SetInputConnection(meshGeometryFilter.GetOutputPort()) outlineMesh.BoundaryEdgesOn() outlineMeshMapper = vtk.vtkPolyDataMapper() outlineMeshMapper.SetInputConnection(outlineMesh.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMeshMapper) outlineActor.GetProperty().SetColor(1,1,1) # create a color scale (color lookup table) refLut = vtk.vtkLookupTable() lut = vtk.vtkLookupTable() refLut.SetNumberOfColors(256) lut.SetNumberOfColors(256) refLut.SetHueRange(0.0, 0.667) refLut.Build() lut.Build() for j in range(256): lut.SetTableValue(j, refLut.GetTableValue(255-j)) # planeMapper = vtk.vtkDataSetMapper() planeMapper.SetInputConnection(meshReader.GetOutputPort()) planeMapper.SetScalarRange(meshReader.GetOutput().GetScalarRange()) planeMapper.SetLookupTable(lut) gridActor = vtk.vtkActor() gridActor.SetMapper(planeMapper) gridActor.SetScale(1.0,1.0,0.0000001) gridActor.GetProperty().SetRepresentationToWireframe() # create streamlines #xmin= -1953764.5423199304 xmax= 1473590.7094357659 #ymin= -3071352.8797937827 ymax= 1150611.477018431 seedsSphere = vtk.vtkPointSource() seedsSphere.SetRadius(3071352.0) seedsSphere.SetCenter(0.0, 0.0, 0.0) seedsSphere.SetNumberOfPoints(5000) seedTransform = vtk.vtkTransform() seedTransform.Scale(1.0,1.0,0.0) #seedTransform.RotateZ(1.0*float(frame)) # 1 degree seedFilter = vtk.vtkTransformPolyDataFilter() seedFilter.SetTransform(seedTransform) seedFilter.SetInputConnection(seedsSphere.GetOutputPort()) integ = vtk.vtkRungeKutta4() streamer = vtk.vtkStreamTracer() streamer.SetInputConnection(meshReader.GetOutputPort()) #streamer.SetStartPosition(0.18474886E+01, 0.12918899E+00, 0.00000000E+00) streamer.SetSource(seedFilter.GetOutput()) streamer.SetMaximumPropagation(160000.0) #streamer.SetMaximumPropagationUnitToTimeUnit() streamer.SetInitialIntegrationStep(1.0) #streamer.SetInitialIntegrationStepUnitToCellLengthUnit() streamer.SetIntegrationDirectionToBoth() streamer.SetIntegrator(integ) # streamTube = vtk.vtkTubeFilter() streamTube.SetInputConnection(streamer.GetOutputPort()) #streamTube.SetInputArrayToProcess(1,0,0,vtkDataObject::FIELD_ASSOCIATION_POINTS, vectors) streamTube.SetRadius(5000.0) streamTube.SetNumberOfSides(12) streamWarp = vtk.vtkWarpScalar() streamWarp.SetInputConnection(streamTube.GetOutputPort()) streamWarp.SetNormal(0.0,0.0,1.0) streamWarp.UseNormalOn() streamWarp.SetScaleFactor(10000.0) mapStreamTube = vtk.vtkPolyDataMapper() mapStreamTube.SetInputConnection(streamWarp.GetOutputPort()) #mapStreamTube.SetInputConnection(streamer.GetOutputPort()) #mapStreamTube.SetInputConnection(streamTube.GetOutputPort()) mapStreamTube.SetScalarRange(meshReader.GetOutput().GetPointData().GetScalars().GetRange()) mapStreamTube.SetLookupTable(lut) streamTubeActor = vtk.vtkActor() streamTubeActor.SetMapper(mapStreamTube) streamTubeActor.GetProperty().SetColor(0.0,0.0,0.0) # Create the usual rendering stuff. ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) renWin.SetSize(700, 700) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) ren.SetBackground(1.0, 1.0, 1.0) ren.AddActor(gridActor) ren.AddActor(outlineActor) ren.AddActor(streamTubeActor) ren.ResetCamera() cam = ren.GetActiveCamera() cam.Zoom(1.3) if ( interact == 0 ): renWin.OffScreenRenderingOn() renWin.Render() # write a png w2if = vtk.vtkWindowToImageFilter() w2if.SetInput(renWin) w2if.Update() writer = vtk.vtkPNGWriter() filename = 'spatial_data_%03d.png' % int(frame) writer.SetFileName(filename) writer.SetInput(w2if.GetOutput()) writer.Write() # Interact with the data. if (interact == 1): iren.Initialize() iren.Start()
gpl-3.0
718,788,734,203,853,000
31.295302
91
0.798836
false
aslab/rct
higgs/branches/ros-fuerte/OMRosDrivers_py/src/test_action.py
1
2247
#!/usr/bin/env python ''' Created on Nov 10, 2011 @author: chcorbato @summary: ros node that publishes MetaAction messages from console input for testing purposes ''' import roslib; roslib.load_manifest('OMRosDrivers_py') import rospy import sys import getopt from OMRosDrivers_py.msg import MetaAction, MetaActionResult def result_CB(msg): print "action-" + str(msg.actionID) + " --> " + msg.result print "\t log:\t" + msg.log def main(): rospy.init_node('meta_action_tester') action_pub = rospy.Publisher('/meta_action', MetaAction) result_sub = rospy.Subscriber('/meta_action_result', MetaActionResult, result_CB) n_actions = 0 print "enter:\n k to kill a node\n l to launch a launchfile\n q to quit" #------------------------------------------------------ # working loop -------------------------------------------------------------------- while not rospy.is_shutdown(): #rospy.sleep(0.01) if raw_input() == 'k': node = raw_input("write the full name of the node to kill:\n") action = MetaAction() action.actionID = n_actions n_actions = n_actions + 1 action.actionName = 'KILL' action.nodeName = node action_pub.publish(action) print "command sent to kill node: " + node if raw_input() == 'l': pkg = raw_input("write the name of the package:\n") launchfile = raw_input("write the name of the launchfile:\n") action = MetaAction() action.actionID = n_actions n_actions = n_actions + 1 action.actionName = 'LAUNCH' action.pckg = pkg action.launchfile = launchfile action_pub.publish(action) print "command sent to launch: " + pkg + " " + launchfile if raw_input() == 'q': rospy.loginfo("node ended") return # termination ----------------------------------------------------------------- rospy.loginfo("meta_action tester ended") if __name__ == '__main__': try: main() except rospy.ROSInterruptException: pass
gpl-3.0
5,719,488,678,066,558,000
29.780822
87
0.522474
false
mpiotrewicz/py-micromouse
maze/blocks.py
1
1392
''' Basic building blocks of a maze. ''' NORTH = 'N' EAST = 'E' SOUTH = 'S' WEST = 'W' class Wall: ''' Implements a wall of a maze cell. A wall can be set or cleared. It can also be locked to prevent changing its state or unlocked to make set and clear operations work again. ''' def __init__(self, state = False, locked = False): self._state = state self.locked = locked self._adjacent_cells = [] @property def state(self): return self._state @state.setter def state(self, value): if not self.locked: self._state = value def bind(self, *cells): for cell in cells: if len(self._adjacent_cells) < 2: self._adjacent_cells.append(cell) def adjacent(self, cell): if cell not in self._adjacent_cells: return None for adjacent in self._adjacent_cells: if cell != adjacent: return adjacent class Cell: def __init__(self, north, east, south, west): self.walls = {NORTH: north, EAST: east, SOUTH: south, WEST: west} for wall in self.walls.values(): try: wall.bind(self) except AttributeError: raise ValueError from None def neighbour(self, direction): return self.walls[direction].adjacent(self)
mit
8,728,085,950,165,865,000
24.309091
73
0.563937
false
fabiofortkamp/nemplot
nemplot/plots.py
1
3415
from pathlib import Path import math import numpy as np import matplotlib.pyplot as plt from .config import nemplot_parameters def refine_list(original_list, factor): """ Return a new list, inserting more elements between the number in 'original_list'. Assumes 'original_list' is evenly-spaced. The spacing between each element is divided by 'factor' >>>refine_list([1,2,3],factor=2) [1.0,1.5,2.0,2.5,3.0] """ # calculate the original spacing between elements and refine it d = original_list[1] - original_list[0] d_refined = d / factor max_value = max(original_list) min_value = min(original_list) # the number of elements is the number of divisions between # the limit values, plus one aditional n_refined = ((max_value - min_value) / d_refined) + 1 return np.linspace(min_value,max_value,math.ceil(n_refined)) def set_all_fontsizes_from_axis(ax): for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(nemplot_parameters["FONTSIZE"]) def refine_yticks(ax,factor): """ Take an Axis object 'ax' and refine the y-ticks on it by 'factor' Parameters ---------- ax : matplotlib.pyplot.Axex.Axis() object factor: int Returns ------- out : None """ ax.set_yticks(refine_list(ax.get_yticks(),factor),minor=True) def refine_xticks(ax,factor): """ Take an Axis object 'ax' and refine the x-ticks on it by 'factor' Parameters ---------- ax : matplotlib.pyplot.Axex.Axis() object factor: int Returns ------- out : None """ ax.set_xticks(refine_list(ax.get_xticks(),factor),minor=True) def create_plot(xlabel="", ylabel="", title=""): """ Return (fig,axis) correspondent to a line plot, with 'xlabel','ylabel' and 'title' The size of the figure is controlled by FIGSIZE_INCHES""" figsize_inches = nemplot_parameters["FIGSIZE_INCHES"] fig = plt.figure(figsize=(figsize_inches,figsize_inches)) axis = fig.add_subplot(111) axis.set_ylabel(ylabel) axis.set_xlabel(xlabel) axis.set_title(title) set_all_fontsizes_from_axis(axis) return fig, axis def create_two_axes_plot(xlabel="", ylabel_left="", ylabel_right="", title=""): """ Return (fig, axis_left, axis_right) correspondent to a line plot with two y-axes. The size of the figure is controlled by FIGSIZE_INCHES """ fig, axis_left = create_plot(xlabel, ylabel_left, title) axis_right = axis_left.twinx() axis_right.set_ylabel(ylabel_right) set_all_fontsizes_from_axis(axis_right) return (fig, axis_left, axis_right) def save_figure(fig,name): """ Save the 'fig' Figure object as 'name' (with extension PLOT_EXTENSION), inside FIG_FILE_PATH (a Path-like object).""" fig_path = nemplot_parameters["FIG_FILE_PATH"] file_basename = name + nemplot_parameters["PLOT_EXTENSION"] file_path = fig_path / file_basename fig.savefig(str(file_path), dpi=nemplot_parameters["DPI"], bbox_inches='tight')
mit
3,325,522,625,204,729,300
25.472868
75
0.596779
false
meerkat-cv/annotator-supreme
annotator_supreme/views/webapp/annotation_view.py
1
2364
import flask from flask.ext.classy import FlaskView, route, request from annotator_supreme.views.view_tools import * from annotator_supreme import app from annotator_supreme.controllers.dataset_controller import DatasetController from flask import render_template from annotator_supreme.controllers.color_utils import ColorUtils import json class AnnotationViewWebApp(FlaskView): route_base = '/' def __init__(self): self.dataset_controller = DatasetController() @route('/annotation', methods=['GET']) def annotation(self): # get the datasets to see which one the user wants to annotate datasets = self.dataset_controller.get_datasets() dataset_names = [] for i,d in enumerate(datasets): dataset_names.append(d["name"]) datasets[i]["category_colors"] = ColorUtils.distiguishable_colors_hex(len(d["image_categories"])) datasets[i]["last_modified"] = "" # it is not serialiable # the user can choose to annotate a specif image/dataset sel_dataset = "" sel_image = "" if request.args.get("dataset") is not None: sel_dataset = request.args.get("dataset") if request.args.get("image") is not None: sel_image = request.args.get("image") return render_template('annotation.html', dataset_names=dataset_names, datasets=json.dumps(datasets), sel_dataset=sel_dataset, sel_image=sel_image) @route('/label-annotation', methods=['GET']) def label_annotation(self): # get the datasets to see which one the user wants to annotate datasets = self.dataset_controller.get_datasets() dataset_names = [] for i,d in enumerate(datasets): dataset_names.append(d["name"]) datasets[i]["category_colors"] = ColorUtils.distiguishable_colors_hex(len(d["image_categories"])) datasets[i]["last_modified"] = "" # it is not serialiable # the user can choose to annotate a specif dataset sel_dataset = "" if request.args.get("dataset") is not None: sel_dataset = request.args.get("dataset") app.logger.info("sel_dataset: "+sel_dataset) return render_template('annotation_label.html', dataset_names=dataset_names, datasets=json.dumps(datasets), sel_dataset=sel_dataset)
mit
-2,795,673,514,854,720,500
41.981818
155
0.659052
false
jmccrae/yuzu
python/yuzu/test_jsonld.py
1
7809
import unittest from rdflib import Graph, URIRef, BNode, Literal from yuzu.jsonld import jsonld_from_model from yuzu.settings import BASE_NAME from rdflib.namespace import RDF, XSD class JsonLDTest(unittest.TestCase): def ctxt(self, ct): self.maxDiff = None m = {"@base": BASE_NAME} for k in ct: m[k] = ct[k] return m def test_simple(self): g = Graph() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/bar"), Literal("foo", "en"))) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("simple") print(obj) self.assertDictEqual({ "@context": self.ctxt({ "bar": "http://www.example.com/bar" }), "@id": "foo", "bar": { "@value": "foo", "@language": "en" } }, obj) def test_bnode(self): g = Graph() b1 = BNode() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/list"), b1)) g.add((b1, URIRef("http://www.example.com/first"), URIRef("http://www.example.com/value"))) g.add((b1, URIRef("http://www.example.com/rest"), RDF.nil)) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("bnode") print(obj) self.assertDictEqual({ "@context": self.ctxt({ "list": { "@id": "http://www.example.com/list", "@type": "@id" }, "first": { "@id": "http://www.example.com/first", "@type": "@id" }, "rest": { "@id": "http://www.example.com/rest", "@type": "@id" }, "rdf": str(RDF) }), "@id": "foo", "list": { "first": "http://www.example.com/value", "rest": "rdf:nil" } }, obj) def test_inverse(self): g = Graph() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/prop"), Literal("foo"))) g.add((URIRef("http://www.example.com/bar"), URIRef("http://www.example.com/backLink"), URIRef("http://localhost:8080/foo"))) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("inverse") print(obj) self.assertDictEqual({ "@context": self.ctxt({ "prop": "http://www.example.com/prop", "backLink": { "@id": "http://www.example.com/backLink", "@type": "@id" } }), "@id": "foo", "prop": "foo", "@reverse": { "backLink": {"@id": "http://www.example.com/bar"} } }, obj) def test_multi(self): g = Graph() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/mp"), Literal("foo", "en"))) g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/mp"), URIRef("http://www.example.com/bar"))) g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/op"), URIRef("http://localhost:8080/foo#baz"))) g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/op"), URIRef("http://www.example.com/bar"))) g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/dp"), Literal("bar", datatype=URIRef("http://www.example.com/type")))) g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/dp"), Literal("baz"))) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("multi") print(obj) self.assertDictEqual({ "@context": self.ctxt({ "mp": "http://www.example.com/mp", "dp": "http://www.example.com/dp", "op": { "@id": "http://www.example.com/op", "@type": "@id" } }), "@id": "foo", "mp": [ {"@id": "http://www.example.com/bar"}, {"@value": "foo", "@language": "en"} ], "op": ["foo#baz", "http://www.example.com/bar"], "dp": [ {"@value": "bar", "@type": "http://www.example.com/type"}, "baz" ] }, obj) def test_drb(self): g = Graph() b = BNode("bar") g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/prop1"), b)) g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/prop2"), b)) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("drb") print(obj) self.assertDictEqual({ "@context": self.ctxt({ "prop1": { "@id": "http://www.example.com/prop1", "@type": "@id" }, "prop2": { "@id": "http://www.example.com/prop2", "@type": "@id" } }), "@id": "foo", "prop1": "_:bar", "prop2": "_:bar" }, obj) def test_others(self): g = Graph() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/prop"), Literal("foo", "en"))) g.add((URIRef("http://localhost:8080/foo_typo"), URIRef("http://www.example.com/prop"), Literal("bar", "en"))) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("others") print(obj) self.assertDictEqual({ "@context": self.ctxt({ "prop": "http://www.example.com/prop" }), "@graph": [ { "@id": "foo", "prop": { "@value": "foo", "@language": "en" } }, { "@id": "foo_typo", "prop": { "@value": "bar", "@language": "en" } } ] }, obj) def test_type(self): g = Graph() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type"), URIRef("http://www.example.com/Bar"))) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("type") print(obj) self.assertDictEqual({ "@context": self.ctxt({"rdf": str(RDF)}), "@id": "foo", "@type": "http://www.example.com/Bar"}, obj) def test_int(self): g = Graph() g.add((URIRef("http://localhost:8080/foo"), URIRef("http://www.example.com/prop"), Literal("3", datatype=XSD.integer))) obj = jsonld_from_model(g, "http://localhost:8080/foo") print("int") print(obj) self.assertDictEqual({ "@context": self.ctxt({"prop": "http://www.example.com/prop"}), "@id": "foo", "prop": 3}, obj) if __name__ == '__main__': unittest.main()
apache-2.0
-5,423,511,061,216,330,000
32.805195
79
0.417595
false
vicalloy/django-lb-workflow
lbworkflow/views/forms.py
1
6536
from django.core.exceptions import ImproperlyConfigured from django.forms import ModelForm from django.http import HttpResponseRedirect from django.views.generic.base import ContextMixin, View try: from crispy_forms.helper import FormHelper except ImportError: pass __all__ = ( "FormsMixin", "ModelFormsMixin", "FormSetMixin", "FormsView", "BSFormSetMixin", ) class FormsMixin(ContextMixin): """ A mixin that provides a way to show and handle any number of form in a request. """ initial = {} form_classes = None # the main form should named as form success_url = None def get_initial(self, form_class_key): return self.initial.get(form_class_key, {}).copy() def get_form_classes(self): """ Returns the form classes to use in this view """ if not self.form_classes: raise ImproperlyConfigured("Provide form_classes.") return self.form_classes def after_create_form(self, form_class_key, form): return form def create_form(self, form_class_key, form_class): form = form_class(**self.get_form_kwargs(form_class_key, form_class)) self.after_create_form(form_class_key, form) return form def create_forms(self, **form_classes): """ Returns an instance of the forms to be used in this view. forms can be access by self.forms """ forms = {} self.forms = forms for form_class_key, form_class in form_classes.items(): forms[form_class_key] = self.create_form( form_class_key, form_class ) return forms def get_form_kwargs(self, form_class_key, form_class): """ Returns the keyword arguments for instantiating the form. """ kwargs = {"initial": self.get_initial(form_class_key)} if form_class_key != "form": kwargs["prefix"] = form_class_key if self.request.method in ("POST", "PUT"): kwargs.update( { "data": self.request.POST, "files": self.request.FILES, } ) return kwargs def get_success_url(self): """ Returns the supplied success URL. """ if self.success_url: # Forcing possible reverse_lazy evaluation url = self.success_url else: raise ImproperlyConfigured( "No URL to redirect to. Provide a success_url." ) return url def forms_valid(self, **forms): """ If the forms are valid, redirect to the supplied URL. """ return HttpResponseRedirect(self.get_success_url()) def forms_invalid(self, **forms): """ If the forms are invalid, re-render the context data with the data-filled form and errors. """ return self.render_to_response(self.get_context_data(**forms)) class ModelFormsMixin: def get_form_kwargs(self, form_class_key, form_class): kwargs = super().get_form_kwargs(form_class_key, form_class) # not (ModelForm or ModelFormSet) formset_form_class = getattr(form_class, "form", str) if not issubclass(form_class, ModelForm) and not issubclass( formset_form_class, ModelForm ): return kwargs instance = getattr(self, "object", None) # if have main form, try to get instance from main form # other form may have ForeignKey to main object form = self.forms.get("form") if form and getattr(form, "instance", None): instance = getattr(form, "instance", None) kwargs["instance"] = instance return kwargs def is_formset(form): # form class if getattr(form, "__name__", "").endswith("FormSet"): return True # form instance return type(form).__name__.endswith("FormSet") class FormSetMixin: def get_context_data(self, **kwargs): kwargs = super().get_context_data(**kwargs) formset_list = [] for form in self.forms.values(): if is_formset(form): formset_list.append(form) kwargs["formset_list"] = formset_list return kwargs def after_create_formset(self, form_class_key, formset): formset.title = "Items" def after_create_form(self, form_class_key, form): super().after_create_form(form_class_key, form) if is_formset(form): self.after_create_formset(form_class_key, form) return form def get_formset_kwargs(self, form_class_key, form_class): return {} def get_form_kwargs(self, form_class_key, form_class): kwargs = super().get_form_kwargs(form_class_key, form_class) if is_formset(form_class): return kwargs ext_kwargs = self.get_formset_kwargs(form_class_key, form_class) kwargs.update(ext_kwargs) return kwargs class FormsView(FormSetMixin, ModelFormsMixin, FormsMixin, View): """ A mixin that renders any number of forms on GET and processes it on POST. """ def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates a blank version of the forms. """ form_classes = self.get_form_classes() forms = self.create_forms(**form_classes) return self.render_to_response(self.get_context_data(**forms)) def post(self, request, *args, **kwargs): """ Handles POST requests, instantiating a form instance with the passed POST variables and then checked for validity. """ form_classes = self.get_form_classes() forms = self.create_forms(**form_classes) if all([forms[form].is_valid() for form in forms]): return self.forms_valid(**forms) else: return self.forms_invalid(**forms) # PUT is a valid HTTP verb for creating (with a known URL) or editing an # object, note that browsers only support POST for now. def put(self, *args, **kwargs): return self.post(*args, **kwargs) class BSFormSetMixin: """ Crispy & Bootstrap for formset """ def after_create_formset(self, form_class_key, formset): super().after_create_formset(form_class_key, formset) helper = FormHelper() helper.template = "lbadminlte/bootstrap3/table_inline_formset.html" formset.helper = helper return formset
mit
6,143,110,533,236,121,000
31.039216
83
0.604039
false
imiyoo2010/mitmproxy
libmproxy/proxy/config.py
1
6107
from __future__ import absolute_import import os from .. import utils, platform from netlib import http_auth, certutils from .primitives import ConstUpstreamServerResolver, TransparentUpstreamServerResolver TRANSPARENT_SSL_PORTS = [443, 8443] CONF_BASENAME = "mitmproxy" CONF_DIR = "~/.mitmproxy" class ProxyConfig: def __init__(self, confdir=CONF_DIR, clientcerts=None, no_upstream_cert=False, body_size_limit=None, get_upstream_server=None, http_form_in="absolute", http_form_out="relative", authenticator=None, ciphers=None, certs=[], certforward = False ): self.ciphers = ciphers self.clientcerts = clientcerts self.no_upstream_cert = no_upstream_cert self.body_size_limit = body_size_limit self.get_upstream_server = get_upstream_server self.http_form_in = http_form_in self.http_form_out = http_form_out self.authenticator = authenticator self.confdir = os.path.expanduser(confdir) self.ca_file = os.path.join(self.confdir, CONF_BASENAME + "-ca.pem") self.certstore = certutils.CertStore.from_store(self.confdir, CONF_BASENAME) for spec, cert in certs: self.certstore.add_cert_file(spec, cert) self.certforward = certforward def process_proxy_options(parser, options): body_size_limit = utils.parse_size(options.body_size_limit) c = 0 http_form_in, http_form_out = "absolute", "relative" get_upstream_server = None if options.transparent_proxy: c += 1 if not platform.resolver: return parser.error("Transparent mode not supported on this platform.") get_upstream_server = TransparentUpstreamServerResolver(platform.resolver(), TRANSPARENT_SSL_PORTS) http_form_in, http_form_out = "relative", "relative" if options.reverse_proxy: c += 1 get_upstream_server = ConstUpstreamServerResolver(options.reverse_proxy) http_form_in, http_form_out = "relative", "relative" if options.upstream_proxy: c += 1 get_upstream_server = ConstUpstreamServerResolver(options.upstream_proxy) http_form_in, http_form_out = "absolute", "absolute" if options.manual_destination_server: c += 1 get_upstream_server = ConstUpstreamServerResolver(options.manual_destination_server) if c > 1: return parser.error("Transparent mode, reverse mode, upstream proxy mode and " "specification of an upstream server are mutually exclusive.") if options.http_form_in: http_form_in = options.http_form_in if options.http_form_out: http_form_out = options.http_form_out if options.clientcerts: options.clientcerts = os.path.expanduser(options.clientcerts) if not os.path.exists(options.clientcerts) or not os.path.isdir(options.clientcerts): return parser.error( "Client certificate directory does not exist or is not a directory: %s" % options.clientcerts ) if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd): if options.auth_singleuser: if len(options.auth_singleuser.split(':')) != 2: return parser.error("Invalid single-user specification. Please use the format username:password") username, password = options.auth_singleuser.split(':') password_manager = http_auth.PassManSingleUser(username, password) elif options.auth_nonanonymous: password_manager = http_auth.PassManNonAnon() elif options.auth_htpasswd: try: password_manager = http_auth.PassManHtpasswd(options.auth_htpasswd) except ValueError, v: return parser.error(v.message) authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy") else: authenticator = http_auth.NullProxyAuth(None) certs = [] for i in options.certs: parts = i.split("=", 1) if len(parts) == 1: parts = ["*", parts[0]] parts[1] = os.path.expanduser(parts[1]) if not os.path.exists(parts[1]): parser.error("Certificate file does not exist: %s"%parts[1]) certs.append(parts) return ProxyConfig( clientcerts = options.clientcerts, body_size_limit = body_size_limit, no_upstream_cert = options.no_upstream_cert, get_upstream_server = get_upstream_server, confdir = options.confdir, http_form_in = http_form_in, http_form_out = http_form_out, authenticator = authenticator, ciphers = options.ciphers, certs = certs, certforward = options.certforward, ) def ssl_option_group(parser): group = parser.add_argument_group("SSL") group.add_argument( "--cert", dest='certs', default=[], type=str, metavar = "SPEC", action="append", help='Add an SSL certificate. SPEC is of the form "[domain=]path". '\ 'The domain may include a wildcard, and is equal to "*" if not specified. '\ 'The file at path is a certificate in PEM format. If a private key is included in the PEM, '\ 'it is used, else the default key in the conf dir is used. Can be passed multiple times.' ) group.add_argument( "--client-certs", action="store", type=str, dest="clientcerts", default=None, help="Client certificate directory." ) group.add_argument( "--ciphers", action="store", type=str, dest="ciphers", default=None, help="SSL cipher specification." ) group.add_argument( "--cert-forward", action="store_true", dest="certforward", default=False, help="Simply forward SSL certificates from upstream." ) group.add_argument( "--no-upstream-cert", default=False, action="store_true", dest="no_upstream_cert", help="Don't connect to upstream server to look up certificate details." )
mit
-1,318,332,638,147,981,600
41.713287
113
0.637956
false
Cloudify-PS/cloudify-versa-plugin
versa_plugin/versaclient.py
1
5385
import requests import json import os from os import chmod from xml.dom.minidom import parseString from requests.packages.urllib3.exceptions import InsecureRequestWarning from cloudify import exceptions as cfy_exc from cloudify import ctx requests.packages.urllib3.disable_warnings(InsecureRequestWarning) JSON = 'json' XML = 'xml' def _save_key_file(path, value): path = os.path.expanduser(path) with open(path, 'w') as content_file: chmod(path, 0600) content_file.write(value) def _check_response(response, return_code, accept): if response.status_code == requests.codes.no_content: return None if response.status_code != return_code: raise cfy_exc.HttpException(response.url, response.status_code, response.content) if response.content: if accept == JSON: return json.loads(response.content) else: return parseString(response.content) else: return None class VersaClient(): def __init__(self, config, key_file): self.versa_url = config["versa_url"] self.client_id = config["client_id"] self.client_secret = config["client_secret"] self.username = config["username"] self.password = config["password"] self.access_token = None self.verify = False self.key_file = key_file def __enter__(self): self.get_token() return self def __exit__(self, type, value, traceback): # self.revoke_token() pass def read_tokens_form_file(self): if os.path.isfile(self.key_file): with open(self.key_file) as file: self.access_token = file.readline().rstrip() return True return False def save_token_to_file(self): with open(self.key_file, "w") as file: file.write(self.access_token) def get_token(self): if self.read_tokens_form_file(): return data = { "client_id": self.client_id, "client_secret": self.client_secret, "username": self.username, "password": self.password, "grant_type": "password"} headers = self._get_headers(JSON, JSON) result = requests.post(self.versa_url + "/auth/token", headers=headers, data=json.dumps(data), verify=self.verify) try: result = json.loads(result.content) self.access_token = result['access_token'] self.save_token_to_file() except (KeyError, TypeError, ValueError): raise cfy_exc.NonRecoverableError( "Incorrect reply: {}".format(result)) def revoke_token(self): headers = {"Authorization": "Bearer {}".format(self.access_token)} requests.post(self.versa_url + "/auth/revoke", headers=headers, verify=self.verify) if os.path.isfile(self.key_file): os.remove(self.key_file) self.access_token = None def get(self, path, data, content_type, return_code=200, accept=JSON): return self._request(requests.get, path, data, content_type, return_code, accept) def post(self, path, data, content_type, return_code=201, accept=JSON): return self._request(requests.post, path, data, content_type, return_code, accept) def put(self, path, data, content_type, return_code=204, accept=JSON): return self._request(requests.put, path, data, content_type, return_code, accept) def delete(self, path, return_code=204, accept=JSON): return self._request(requests.delete, path, None, None, return_code, accept) def _request(self, request_type, path, data, content_type, return_code, accept): retry = 0 ctx.logger.debug("Sending {0} request to {1} with data {2}".format( request_type.__name__, self.versa_url+path, str(data))) while True: headers = self._get_headers(content_type, accept) response = request_type( self.versa_url + path, headers=headers, data=data, verify=self.verify) if response.status_code == 401: if retry == 1: break retry += 1 self.revoke_token() self.get_token() else: response_str = _check_response(response, return_code, accept) ctx.logger.debug("respose code: {0} string:{1}".format( return_code, response_str)) return response_str def _get_headers(self, content_type, accept): content_dict = {'json': 'application/json', 'xml': 'application/xml'} headers = {} if content_type: try: headers['Content-type'] = content_dict[content_type] except KeyError: raise cfy_exc.NonRecoverableError( "Unknown content-type: {}".format(content_type)) if self.access_token: headers["Authorization"] = "Bearer {}".format(self.access_token) headers['Accept'] = content_dict[accept] return headers
apache-2.0
7,628,428,906,583,440,000
35.883562
77
0.571031
false
Abdoctor/behave
examples/async_step/features/steps/async_dispatch_steps.py
1
1033
from behave import given, then, step from behave.api.async_step import use_or_create_async_context, AsyncContext from hamcrest import assert_that, equal_to, empty import asyncio @asyncio.coroutine def async_func(param): yield from asyncio.sleep(0.2) return str(param).upper() @given('I dispatch an async-call with param "{param}"') def step_dispatch_async_call(context, param): async_context = use_or_create_async_context(context, "async_context1") task = async_context.loop.create_task(async_func(param)) async_context.tasks.append(task) @then('the collected result of the async-calls is "{expected}"') def step_collected_async_call_result_is(context, expected): async_context = context.async_context1 done, pending = async_context.loop.run_until_complete( asyncio.wait(async_context.tasks, loop=async_context.loop)) parts = [task.result() for task in done] joined_result = ", ".join(sorted(parts)) assert_that(joined_result, equal_to(expected)) assert_that(pending, empty())
bsd-2-clause
7,370,843,110,504,015,000
38.730769
75
0.728945
false
kball/ambry
test/test_identity.py
1
27096
""" Created on Jul 6, 2013 @author: eric """ import unittest from ambry.identity import * from testbundle.bundle import Bundle from test_base import TestBase class Test(TestBase): def setUp(self): self.copy_or_build_bundle() self.bundle = Bundle() self.bundle_dir = self.bundle.bundle_dir def tearDown(self): pass def test_id(self): dnn = 1000000 rev = 100 dn = DatasetNumber(dnn) self.assertEquals('d000004c92', str(dn)) dn = DatasetNumber(dnn, rev) self.assertEquals('d000004c9201C', str(dn)) self.assertEquals('d000004c9201C', str(ObjectNumber.parse(str(dn)))) tn = TableNumber(dn, 1) self.assertEquals('t000004c920101C', str(tn)) self.assertEquals('t000004c920101C', str(ObjectNumber.parse(str(tn)))) tnnr = tn.rev(None) self.assertEquals('t000004c9201', str(tnnr)) self.assertEquals('t000004c9201004', str(tnnr.rev(4))) # Other assignment classes dnn = 62*62+11 dn = DatasetNumber(62**3-1,None,'authoritative') self.assertEquals('dZZZ', str(dn)) dn = DatasetNumber(62**3-1,None,'registered') self.assertEquals('d00ZZZ', str(dn)) dn = DatasetNumber(62**3-1,None,'unregistered') self.assertEquals('d0000ZZZ', str(dn)) dn = DatasetNumber(62**3-1,None,'self') self.assertEquals('d000000ZZZ', str(dn)) tn = TableNumber(dn, 2) self.assertEquals('t000000ZZZ02', str(tn)) cn = ColumnNumber(tn, 3) self.assertEquals('c000000ZZZ02003', str(cn)) pn = dn.as_partition(5) self.assertEquals('p000000ZZZ005', str(pn)) def test_name(self): name = Name(source='Source.com', dataset='data set', subset='sub set', variation='vari ation', type='Ty-pe', part='Pa?rt', version='0.0.1') self.assertEquals('source.com-data_set-sub_set-ty_pe-pa_rt-vari_ation-0.0.1', name.vname) name = Name(source='source.com', dataset='dataset', subset='subset', variation='variation', type='type', part='part', version='0.0.1') self.assertEquals('source.com-dataset-subset-type-part-variation', str(name)) self.assertEquals('source.com-dataset-subset-type-part-variation-0.0.1', name.vname) name = name.clone() self.assertEquals('source.com-dataset-subset-type-part-variation', str(name)) self.assertEquals('source.com-dataset-subset-type-part-variation-0.0.1', name.vname) part_name = PartitionName(time = 'time', space='space', table='table', grain='grain', format='format', segment=101, **name.dict ) self.assertEquals('source.com-dataset-subset-type-part-variation-table-time-space-grain-format-101', str(part_name)) self.assertEquals('source.com-dataset-subset-type-part-variation-table-time-space-grain-format-101-0.0.1', part_name.vname) part_name = part_name.clone() self.assertEquals('source.com-dataset-subset-type-part-variation-table-time-space-grain-format-101', str(part_name)) self.assertEquals('source.com-dataset-subset-type-part-variation-table-time-space-grain-format-101-0.0.1', part_name.vname) # Name Query name_query = NameQuery(source='source.com', dataset='dataset', vname='foobar', type=NameQuery.NONE) with self.assertRaises(NotImplementedError): name_query.path d = name_query.dict self.assertEquals('<any>',d['subset']) self.assertEquals('<none>',d['type']) self.assertEquals('dataset',d['dataset']) name_query = name_query.clone() self.assertEquals('<any>',d['subset']) self.assertEquals('<none>',d['type']) self.assertEquals('dataset',d['dataset']) name_query_2 = name_query.with_none() self.assertEquals(None,name_query_2.dict['type']) # With a semantic version spec name = Name(source='source.com', dataset = 'dataset', variation='orig', version='0.0.1') self.assertEquals('source.com-dataset-orig-0.0.1',name.vname) name.version_major = 2 name.version_build = ('foobar',) self.assertEquals('source.com-dataset-orig-2.0.1+foobar',name.vname) name = Name(source='source.com', dataset='dataset',variation='variation', version='>=0.0.1') self.assertEquals('source.com-dataset-variation->=0.0.1',name.vname) name = Name(source='source.com', dataset='dataset',variation='variation', version='0.0.1') self.assertEquals('source.com/dataset-variation-0.0.1',name.path) self.assertEquals('source.com/dataset-variation',name.source_path) self.assertEquals('source.com/dataset-variation-0.0.1.db',name.cache_key) part_name = PartitionName(time = 'time', space='space', table='table', grain='grain', format='format', segment='segment', **name.dict ) self.assertEquals('source.com/dataset-variation-0.0.1/table/time-space/grain-segment',part_name.path) self.assertEquals('source.com/dataset-variation/table/time-space/grain-segment',part_name.source_path) part_name = PartitionName(time = 'time', space='space', table='table', format='db', **name.dict ) self.assertEquals('source.com-dataset-variation-table-time-space',part_name.name) self.assertEquals('source.com-dataset-variation-table-time-space-0.0.1',part_name.vname) self.assertEquals('source.com/dataset-variation-0.0.1/table/time-space',part_name.path) self.assertEquals('source.com/dataset-variation/table/time-space',part_name.source_path) part_name = PartitionName(time = 'time', space='space', format='format', **name.dict ) self.assertEquals('source.com/dataset-variation-0.0.1/time-space',part_name.path) self.assertEquals('source.com/dataset-variation/time-space',part_name.source_path) pname = PartialPartitionName(time = 'time', space='space', table='table', format='csv' ) part_name = pname.promote(name) self.assertEquals('source.com-dataset-variation-table-time-space-csv-0.0.1',part_name.vname) # Cloning part_name = name.as_partition(time = 'time', space='space', table='table', format='geo') self.assertEquals('source.com-dataset-variation-table-time-space-geo-0.0.1',part_name.vname) def test_identity(self): from ambry.partition import new_identity name = Name(source='source.com', dataset='foobar', version='0.0.1', variation='orig') dn = DatasetNumber(10000, 1, assignment_class='registered') ident = Identity(name, dn) self.assertEquals('d002Bi',ident.id_) self.assertEquals('d002Bi001',ident.vid) self.assertEquals('source.com-foobar-orig',str(ident.name)) self.assertEquals('source.com-foobar-orig-0.0.1',ident.vname) self.assertEquals('source.com-foobar-orig-0.0.1~d002Bi001',ident.fqname) self.assertEquals('source.com/foobar-orig-0.0.1',ident.path) self.assertEquals('source.com/foobar-orig',ident.source_path) self.assertEquals('source.com/foobar-orig-0.0.1.db',ident.cache_key) self.assertEquals('source.com-foobar-orig-0.0.1', ident.name.dict['vname']) self.assertEquals({'id','vid','revision','name', 'vname', 'cache_key', 'variation', 'dataset', 'source', 'version'}, set(ident.dict.keys())) self.assertIn('fqname', ident.names_dict) self.assertIn('vname', ident.names_dict) self.assertNotIn('dataset', ident.names_dict) self.assertIn('dataset', ident.ident_dict) self.assertNotIn('fqname', ident.ident_dict) # Clone to get a PartitionIdentity pi = ident.as_partition(7) self.assertEquals('source.com-foobar-orig-0.0.1~p002Bi007001',pi.fqname) pi = ident.as_partition(8,time = 'time', space='space', format='hdf') self.assertEquals('source.com-foobar-orig-time-space-hdf-0.0.1~p002Bi008001',pi.fqname) # PartitionIdentity part_name = PartitionName(time = 'time', space='space', format='hdf', **name.dict ) pn = PartitionNumber(dn, 500) ident = PartitionIdentity.new_subclass(part_name, pn) self.assertEquals(set(['id','vid','revision', 'cache_key', 'name', 'vname', 'space', 'format', 'variation', 'dataset', 'source', 'version', 'time']), set(ident.dict.keys())) self.assertEquals('p002Bi084',ident.id_) self.assertEquals('p002Bi084001',ident.vid) self.assertEquals('source.com-foobar-orig-time-space-hdf',str(ident.name)) self.assertEquals('source.com-foobar-orig-time-space-hdf-0.0.1',ident.vname) self.assertEquals('source.com-foobar-orig-time-space-hdf-0.0.1~p002Bi084001',ident.fqname) self.assertEquals('source.com/foobar-orig-0.0.1/time-space',ident.path) self.assertEquals('source.com/foobar-orig/time-space',ident.source_path) self.assertEquals('source.com/foobar-orig-0.0.1/time-space.hdf',ident.cache_key) # Updating partition names that were partially specified pnq = PartitionNameQuery(time = 'time', space='space', format='hdf' ) #import pprint #pprint.pprint(pnq.dict) # # Locations # print str(ident.locations) self.assertEquals(' ', str(ident.locations)) ident.locations.set(LocationRef.LOCATION.LIBRARY, 1) ident.locations.set(LocationRef.LOCATION.REMOTE, 2) ident.locations.set(LocationRef.LOCATION.SOURCE) self.assertEquals(' SLR ', str(ident.locations)) # Partitions, converting to datasets ident = Identity(name, dn) pi = ident.as_partition(8, time='time', space='space', format='hdf') self.assertEquals('source.com-foobar-orig-time-space-hdf-0.0.1~p002Bi008001', pi.fqname) iid = pi.as_dataset() self.assertEquals(ident.fqname, iid.fqname) def test_identity_from_dict(self): from ambry.partition.sqlite import SqlitePartitionIdentity from ambry.partition.hdf import HdfPartitionIdentity from ambry.partition.csv import CsvPartitionIdentity from ambry.partition.geo import GeoPartitionIdentity name = Name(source='source.com', dataset='foobar', variation='orig', version='0.0.1') dn = DatasetNumber(10000, 1, assignment_class='registered') oident = Identity(name, dn) opident = oident.as_partition(7) idict = oident.dict pidict = opident.dict ident = Identity.from_dict(idict) self.assertIsInstance(ident, Identity) self.assertEquals(ident.fqname, oident.fqname) ident = Identity.from_dict(pidict) self.assertIsInstance(ident, SqlitePartitionIdentity) self.assertEquals('source.com/foobar-orig-0.0.1.db', ident.cache_key) pidict['format'] = 'hdf' ident = Identity.from_dict(pidict) self.assertIsInstance(ident, HdfPartitionIdentity) self.assertEquals('source.com/foobar-orig-0.0.1.hdf', ident.cache_key) pidict['format'] = 'csv' ident = Identity.from_dict(pidict) self.assertIsInstance(ident, CsvPartitionIdentity) self.assertEquals('source.com/foobar-orig-0.0.1.csv', ident.cache_key) pidict['format'] = 'geo' ident = Identity.from_dict(pidict) self.assertIsInstance(ident, GeoPartitionIdentity) self.assertEquals('source.com/foobar-orig-0.0.1.geodb', ident.cache_key) def test_split(self): from semantic_version import Spec name = Name(source='source.com', dataset='foobar', version='1.2.3') dn = DatasetNumber(10000, 1, assignment_class='registered') # NOTE, version is entered as 1.2.3, but will be changed to 1.2.1 b/c # last digit is overridden by revision ident = Identity(name, dn) ip = Identity.classify(name) self.assertEquals(Name, ip.isa) self.assertIsNone(ip.version) ip = Identity.classify(ident.name) self.assertEquals(Name, ip.isa) self.assertIsNone(ip.on) self.assertEquals(ident.sname, ip.name) self.assertIsNone(ip.version) ip = Identity.classify(ident.vname) self.assertEquals(Name, ip.isa) self.assertIsNone(ip.on) self.assertEquals(ident.vname, ip.name) self.assertEquals(ident._name.version, str(ip.version)) ip = Identity.classify(ident.fqname) self.assertEquals(DatasetNumber, ip.isa) self.assertEquals(ident.vname, ip.name) self.assertEquals(str(ip.on), str(ip.on)) ip = Identity.classify(ident.vid) self.assertEquals(DatasetNumber, ip.isa) ip = Identity.classify(ident.id_) self.assertEquals(DatasetNumber, ip.isa) ip = Identity.classify(dn) self.assertEquals(DatasetNumber, ip.isa) ip = Identity.classify(dn.as_partition(10)) self.assertEquals(PartitionNumber, ip.isa) ip = Identity.classify("source.com-foobar-orig") self.assertIsNone(ip.version) self.assertEquals('source.com-foobar-orig',ip.sname) self.assertIsNone(ip.vname) ip = Identity.classify("source.com-foobar-orig-1.2.3") self.assertIsInstance(ip.version, Version) self.assertEquals('source.com-foobar-orig',ip.sname) self.assertEquals('source.com-foobar-orig-1.2.3',ip.vname) ip = Identity.classify("source.com-foobar-orig->=1.2.3") self.assertIsInstance(ip.version, Spec) self.assertEquals('source.com-foobar-orig',ip.sname) self.assertIsNone(ip.vname) ip = Identity.classify("source.com-foobar-orig-==1.2.3") self.assertIsInstance(ip.version, Spec) self.assertEquals('source.com-foobar-orig',ip.sname) self.assertIsNone(ip.vname) def test_bundle_build(self): from testbundle.bundle import Bundle from sqlalchemy.exc import IntegrityError from ambry.dbexceptions import ConflictError bundle = Bundle() # Need to clear the library, or the Bundle's pre_prepare # will cancel the build if this version is already installed bundle.library.purge() bundle.exit_on_fatal = False bundle.clean() bundle.database.create() bp = bundle.partitions with bundle.session: bp._new_orm_partition(PartialPartitionName(time = 't1', space='s1')) bp._new_orm_partition(PartialPartitionName(time = 't1', space='s2')) bp._new_orm_partition(PartialPartitionName(time = 't1', space=None)) bp._new_orm_partition(PartialPartitionName(time = 't2', space='s1')) bp._new_orm_partition(PartialPartitionName(time = 't2', space='s2')) bp._new_orm_partition(PartialPartitionName(time = 't2', space=None)) with self.assertRaises(ConflictError): with bundle.session: bp._new_orm_partition(PartialPartitionName(time = 't1', space='s1')) pnq = PartitionNameQuery(time=NameQuery.ANY, space='s1') names = [p.vname for p in bp._find_orm(pnq).all()] self.assertEqual({u'source-dataset-subset-variation-t2-s1-0.0.1', u'source-dataset-subset-variation-t1-s1-0.0.1'}, set(names)) names = [p.vname for p in bp._find_orm(PartitionNameQuery(space=NameQuery.ANY)).all()] self.assertEqual(6,len(names)) names = [p.vname for p in bp._find_orm(PartitionNameQuery(time='t1',space=NameQuery.ANY)).all()] self.assertEqual({'source-dataset-subset-variation-t1-s2-0.0.1', 'source-dataset-subset-variation-t1-0.0.1', 'source-dataset-subset-variation-t1-s1-0.0.1'}, set(names)) names = [p.vname for p in bp._find_orm(PartitionNameQuery(time='t1',space=NameQuery.NONE)).all()] self.assertEqual({'source-dataset-subset-variation-t1-0.0.1'}, set(names)) # Start over, use a higher level function to create the partitions bundle = Bundle() bundle.exit_on_fatal = False bundle.clean() bundle.database.create() bp = bundle.partitions bp._new_partition(PartialPartitionName(time = 't1', space='s1')) self.assertEquals(1, len(bp.all)) bp._new_partition(PartialPartitionName(time = 't1', space='s2')) self.assertEquals(2, len(bp.all)) bp._new_partition(PartialPartitionName(time = 't1', space=None)) bp._new_partition(PartialPartitionName(time = 't2', space='s1')) bp._new_partition(PartialPartitionName(time = 't2', space='s2')) bp._new_partition(PartialPartitionName(time = 't2', space=None)) self.assertEquals(6, len(bp.all)) names = [p.vname for p in bp._find_orm(PartitionNameQuery(time='t1',space=NameQuery.ANY)).all()] self.assertEqual({'source-dataset-subset-variation-t1-s2-0.0.1', 'source-dataset-subset-variation-t1-0.0.1', 'source-dataset-subset-variation-t1-s1-0.0.1'}, set(names)) # Start over, use a higher level function to create the partitions bundle = Bundle() bundle.exit_on_fatal = False bundle.clean() bundle.database.create() bp = bundle.partitions p = bp.new_db_partition(time = 't1', space='s1') self.assertEquals('source-dataset-subset-variation-t1-s1-0.0.1~piEGPXmDC8001001', p.identity.fqname) p = bp.find_or_new(time = 't1', space='s2') self.assertEquals('source-dataset-subset-variation-t1-s2-0.0.1~piEGPXmDC8002001', p.identity.fqname) # Duplicate p = bp.find_or_new(time = 't1', space='s2') self.assertEquals('source-dataset-subset-variation-t1-s2-0.0.1~piEGPXmDC8002001', p.identity.fqname) p = bp.find_or_new_hdf(time = 't2', space='s1') self.assertEquals('source-dataset-subset-variation-t2-s1-hdf-0.0.1~piEGPXmDC8003001', p.identity.fqname) p = bp.find_or_new_geo(time = 't2', space='s1') self.assertEquals('source-dataset-subset-variation-t2-s1-geo-0.0.1~piEGPXmDC8004001', p.identity.fqname) p = bp.find_or_new_csv(time = 't2', space='s1') self.assertEquals('source-dataset-subset-variation-t2-s1-csv-0.0.1~piEGPXmDC8005001', p.identity.fqname) # Ok! Build! bundle = Bundle() bundle.exit_on_fatal = False bundle.clean() bundle.pre_prepare() bundle.prepare() bundle.post_prepare() bundle.pre_build() bundle.build_db_inserter_codes() bundle.post_build() self.assertEquals('diEGPXmDC8001',bundle.identity.vid) self.assertEquals('source-dataset-subset-variation',bundle.identity.sname) self.assertEquals('source-dataset-subset-variation-0.0.1',bundle.identity.vname) self.assertEquals('source-dataset-subset-variation-0.0.1~diEGPXmDC8001',bundle.identity.fqname) def test_number_service(self): ## For this test, setup these access keys in the ## Redis Server: ## ## redis-cli set assignment_class:test-ac-authoritative authoritative ## redis-cli set assignment_class:test-ac-registered registered ## redis-cli set assignment_class:fe78d179-8e61-4cc5-ba7b-263d8d3602b9 unregistered from ambry.identity import NumberServer from ambry.run import get_runconfig rc = get_runconfig() ng = rc.group('numbers') # You'll need to run a local service at this address host = "numbers" port = 7977 unregistered_key = 'fe78d179-8e61-4cc5-ba7b-263d8d3602b9' ns = NumberServer(host=host, port=port, key='test-ac-registered') n = ns.next() self.assertEqual(6,len(str(n))) # Next request is authoritative, so no need to sleep here. ns = NumberServer(host=host, port=port, key='test-ac-authoritative') n = ns.next() self.assertEqual(4,len(str(n))) ns.sleep() # Avoid being rate limited # Override to use a local numbers server: ns = NumberServer(host=host, port=port, key= unregistered_key) n = ns.next() self.assertEqual(8,len(str(n))) # # This test is turned off because it doesn't delete the bundle at the end, # so the next test fails. # def x_test_rewrite(self): from testbundle.bundle import Bundle from sqlalchemy.exc import IntegrityError import json from ambry.run import get_runconfig # Prepare to rewrite the bundle.yaml file. bundle = Bundle() orig = os.path.join(bundle.bundle_dir,'bundle.yaml') save = os.path.join(bundle.bundle_dir,'bundle.yaml.save') try: os.rename(orig,save) print 'Write to ', orig with open(orig,'w') as f: f.write(json.dumps( { "identity":{ "dataset": "dataset1", "id": "dfoo", "revision": 100, "source": "source1", "subset": "subset1", "variation": "variation1", "version": "1.0.1", "vid": "dfob001", }, "about": { "author": "[email protected]" } } )) get_runconfig.clear() # clear config cache. bundle = Bundle() bundle.clean() bundle.pre_prepare() bundle.prepare() bundle.post_prepare() # Does the rewrite, adding the 'names' # Need to clear and reload one more time for the 'names' to appear get_runconfig.clear() # clear config cache. bundle = Bundle() bundle.exit_on_fatal = False self.assertEquals('dataset1', bundle.config.identity.dataset) self.assertEquals('dfoo', bundle.config.identity.id) self.assertEquals(100, bundle.config.identity.revision) self.assertEquals("source1-dataset1-subset1-variation1-1.0.100~dfoo01C", bundle.config.names.fqname) self.assertEquals("[email protected]", bundle.config.about.author) finally: os.rename(save, orig) self.delete_bundle() def test_format(self): name = Name(source='source.com', dataset='foobar', version='0.0.1') dn = DatasetNumber(10000, 1, assignment_class='registered') for format in ('geo','hdf','csv','db'): pi = Identity(name, dn).as_partition(space='space', format=format) print type(pi), pi.path def test_time_space(self): name = Name(source='source.com', dataset='foobar', version='0.0.1', btime='2010P5Y', bspace='space', variation='orig') self.assertEquals('source.com-foobar-space-2010p5y-orig-0.0.1', name.vname) self.assertEquals('source.com/foobar-space-2010p5y-orig-0.0.1.db', name.cache_key) self.assertEquals('source.com/foobar-space-2010p5y-orig-0.0.1', name.path) self.assertEquals('source.com/space/foobar-2010p5y-orig', name.source_path) return dn = DatasetNumber(10000, 1, assignment_class='registered') ident = Identity(name, dn) self.assertEquals('d002Bi', ident.id_) self.assertEquals('d002Bi001', ident.vid) self.assertEquals('source.com-foobar-orig', str(ident.name)) self.assertEquals('source.com-foobar-orig-0.0.1', ident.vname) self.assertEquals('source.com-foobar-orig-0.0.1~d002Bi001', ident.fqname) self.assertEquals('source.com/foobar-orig-0.0.1', ident.path) self.assertEquals('source.com/foobar-orig', ident.source_path) self.assertEquals('source.com/foobar-orig-0.0.1.db', ident.cache_key) d = { 'id': 'd002Bi', 'source': 'source', 'creator': 'creator', 'dataset': 'dataset', 'subset': 'subset', 'btime': 'time', 'bspace': 'space', 'variation': 'variation', 'revision': 1, 'version': '0.0.1' } def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(Test)) return suite if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
bsd-2-clause
2,132,683,447,517,851,000
35.916894
114
0.568202
false
Franr/pixel-war
client/map_logic.py
1
1432
class MapLogic(object): def __init__(self, array_map): self.dicMap = {} # calculating the map dimensions self.y = len(array_map) self.x = len(array_map[0]) # generate the map structure self._pre_generate_surf() for i in range(len(array_map)): for e in range(len(array_map[i])): obj_id = int(array_map[i][e]) sqm = 'b' if obj_id else 'n' self.dicMap[i, e] = (sqm, obj_id) self._render_sqm(e, i, sqm) self._post_generate_surf() def _pre_generate_surf(self): pass def _render_sqm(self, x, y, sqm_type): pass def _post_generate_surf(self): pass def is_blocking_position(self, x, y): return self.dicMap[y, x][0] == "b" def move_creature(self, creature, x, y): x_ant, y_ant = creature.get_coor() self.set_creature(creature, x, y) self.clean_position(x_ant, y_ant) def set_creature(self, creature, x, y): self.dicMap[y, x] = ("b", creature) def clean_position(self, x, y): self.dicMap[y, x] = ("n", 0) def get_creature(self, x, y): if self.dicMap[y, x][0] == "n": return None elif self.dicMap[y, x][0] == "b": if isinstance(self.dicMap[y, x][1], int): return None else: return self.dicMap[y, x][1]
apache-2.0
5,683,244,477,958,043,000
28.833333
53
0.50838
false
joshwatson/binaryninja-api
python/examples/kaitai/ipv6_packet.py
1
2691
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild from pkg_resources import parse_version from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO import collections if parse_version(ks_version) < parse_version('0.7'): raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version)) from . import protocol_body class Ipv6Packet(KaitaiStruct): SEQ_FIELDS = ["version", "traffic_class", "flow_label", "payload_length", "next_header_type", "hop_limit", "src_ipv6_addr", "dst_ipv6_addr", "next_header", "rest"] def __init__(self, _io, _parent=None, _root=None): self._io = _io self._parent = _parent self._root = _root if _root else self self._debug = collections.defaultdict(dict) def _read(self): self._debug['version']['start'] = self._io.pos() self.version = self._io.read_bits_int(4) self._debug['version']['end'] = self._io.pos() self._debug['traffic_class']['start'] = self._io.pos() self.traffic_class = self._io.read_bits_int(8) self._debug['traffic_class']['end'] = self._io.pos() self._debug['flow_label']['start'] = self._io.pos() self.flow_label = self._io.read_bits_int(20) self._debug['flow_label']['end'] = self._io.pos() self._io.align_to_byte() self._debug['payload_length']['start'] = self._io.pos() self.payload_length = self._io.read_u2be() self._debug['payload_length']['end'] = self._io.pos() self._debug['next_header_type']['start'] = self._io.pos() self.next_header_type = self._io.read_u1() self._debug['next_header_type']['end'] = self._io.pos() self._debug['hop_limit']['start'] = self._io.pos() self.hop_limit = self._io.read_u1() self._debug['hop_limit']['end'] = self._io.pos() self._debug['src_ipv6_addr']['start'] = self._io.pos() self.src_ipv6_addr = self._io.read_bytes(16) self._debug['src_ipv6_addr']['end'] = self._io.pos() self._debug['dst_ipv6_addr']['start'] = self._io.pos() self.dst_ipv6_addr = self._io.read_bytes(16) self._debug['dst_ipv6_addr']['end'] = self._io.pos() self._debug['next_header']['start'] = self._io.pos() self.next_header = protocol_body.ProtocolBody(self.next_header_type, self._io) self.next_header._read() self._debug['next_header']['end'] = self._io.pos() self._debug['rest']['start'] = self._io.pos() self.rest = self._io.read_bytes_full() self._debug['rest']['end'] = self._io.pos()
mit
-2,958,073,929,633,228,300
48.833333
167
0.599405
false
cfpb/porchlight
porchlightapi/admin.py
1
1342
# -*- coding: utf-8 -*- from django.contrib import admin from porchlightapi.models import Repository from porchlightapi.models import ValueDataPoint class RepositoryAdmin(admin.ModelAdmin): list_display = ('name', 'project', 'url',) list_display_links = ('name',) search_fields = ('name', 'url',) list_filter = ('project',) admin.site.register(Repository, RepositoryAdmin) class ValueDataPointAdmin(admin.ModelAdmin): # All the fields are readonly list_display = ('created', 'repository', 'undeployed_datetime', 'undeployed_identifier', 'deployed_datetime', 'deployed_identifier', 'value') readonly_fields = ('repository', 'created', 'undeployed_identifier', 'undeployed_datetime', 'undeployed_value', 'deployed_identifier', 'deployed_datetime', 'deployed_value', 'value',) list_filter = ('repository',) # We don't allow admin to add or change data points. Only view. def has_add_permission(self, request): return False admin.site.register(ValueDataPoint, ValueDataPointAdmin)
cc0-1.0
-1,245,679,545,820,650,000
32.55
67
0.550671
false
ankur22/Butler
ondelabs/copilot/model/ValidationData.py
1
1621
''' Created on 3 Aug 2014 @author: ankur ''' import math from ondelabs.copilot.model.ValidationResult import ValidationResult class ValidationData: def __init__(self, lines): self.__words = lines def validate(self, classes, lexicon): validationResult = ValidationResult() allClassTypes = classes.getAllClassTypes() for key in self.__words.keys(): validationText = self.__words[key] classResult = {} for classTypeKey in allClassTypes: classType = classes.getClass(classTypeKey) words = validationText.getWords() prob = math.log(classType.getPrior()) for word in words: conProb = lexicon.getConditionalProbability(word, classTypeKey) if conProb != 0: prob = prob + math.log(conProb) classResult[classTypeKey] = prob prediction = self.__findClassTypeWithHighestProb(classResult) validationResult.addPrediction(key, prediction, validationText.getClassType()) validationResult.calculateAccuracy() return validationResult def __findClassTypeWithHighestProb(self, probs): bestClassType = -1 highestProb = -9999999 for key in probs.keys(): if probs[key] > highestProb: highestProb = probs[key] bestClassType = key return bestClassType def getTotalValidationTestDataPoints(self): return len(self.__words)
mit
560,873,236,210,924,000
30.192308
90
0.586058
false
Comunitea/CMNT_004_15
project-addons/picking_document/__manifest__.py
1
1335
############################################################################## # # Copyright (C) 2014 Pexego Sistemas Informáticos All Rights Reserved # $Jesús Ventosinos Mayor <[email protected]>$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': "Picking document", 'version': '1.0', 'category': 'stock', 'description': """""", 'author': 'Pexego Sistemas Informáticos', 'website': 'www.pexego.es', "depends": ['stock'], "data": ['views/document_view.xml', 'views/stock_view.xml', 'security/ir.model.access.csv'], "installable": True }
agpl-3.0
6,946,354,833,289,444,000
39.363636
78
0.591592
false
MOA-2011/enigma2-plugin-extensions-openwebif
plugin/controllers/views/ajax/event.py
1
11606
#!/usr/bin/env python ################################################## ## DEPENDENCIES import sys import os import os.path try: import builtins as builtin except ImportError: import __builtin__ as builtin from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers from urllib import quote from json import dumps from Plugins.Extensions.OpenWebif.local import tstrings ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.4' __CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0) __CHEETAH_genTime__ = 1406885499.317004 __CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014' __CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/event.tmpl' __CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class event(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(event, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body etime = time.localtime(VFFSL(SL,"event",True)['begin']) channel = VFN(VFFSL(SL,"event",True)['channel'],"replace",False)("'", r"\'") write(u''' <!-- Icons from: http://findicons.com/pack/1987/eico --> <div id="leftmenu_main"> \t<div id="leftmenu_top" class="handle" style="cursor:move">''') _v = VFFSL(SL,"event",True)['channel'] # u"$event['channel']" on line 9, col 60 if _v is not None: write(_filter(_v, rawExpr=u"$event['channel']")) # from line 9, col 60. write(u''' \t\t<div id="leftmenu_expander_main" class="leftmenu_icon leftmenu_icon_collapse" onclick="$(\'#eventdescription\').hide(200)"></div> \t</div> \t<div id="leftmenu_container_main" style="padding:6px"> \t\t<div style="float:left; width:80px;"> \t\t\t<div id="station" style="background-color: #1c478e; padding:2px; width:75px; text-align:center; overflow:hidden">''') _v = VFFSL(SL,"event",True)['channel'] # u"$event['channel']" on line 14, col 117 if _v is not None: write(_filter(_v, rawExpr=u"$event['channel']")) # from line 14, col 117. write(u'''</div> \t\t\t<div style="background-color: #1c478e; color:#fff; width:79px; font-size:23px; margin-top: 5px; text-align:center"> \t\t\t\t''') _v = VFN(VFFSL(SL,"time",True),"strftime",False)("%H:%M", VFFSL(SL,"etime",True)) # u'$time.strftime("%H:%M", $etime)' on line 16, col 5 if _v is not None: write(_filter(_v, rawExpr=u'$time.strftime("%H:%M", $etime)')) # from line 16, col 5. write(u'''<br/> \t\t\t\t<span style="font-size:12px; color:#A9D1FA">''') _v = VFFSL(SL,"int",False)(VFFSL(SL,"event",True)['duration']/60) # u"$int($event['duration']/60)" on line 17, col 49 if _v is not None: write(_filter(_v, rawExpr=u"$int($event['duration']/60)")) # from line 17, col 49. write(u''' min</span> \t\t\t</div> \t\t\t<div style="background-color: #1c478e; color:#fff; width:79px;margin:5px 0"> \t\t\t\t<div style="font-size:23px; text-align:center">''') _v = VFFSL(SL,"tstrings",True)[("day_" + (VFN(VFFSL(SL,"time",True),"strftime",False)("%w", VFFSL(SL,"etime",True))))] # u'$tstrings[("day_" + ($time.strftime("%w", $etime)))]' on line 21, col 52 if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("day_" + ($time.strftime("%w", $etime)))]')) # from line 21, col 52. write(u'''</div> \t\t\t\t<div style="color:#A9D1FA; text-align:center">''') _v = VFN(VFFSL(SL,"time",True),"strftime",False)("%d", VFFSL(SL,"etime",True)) # u'$time.strftime("%d", $etime)' on line 22, col 51 if _v is not None: write(_filter(_v, rawExpr=u'$time.strftime("%d", $etime)')) # from line 22, col 51. write(u''' ''') _v = VFFSL(SL,"tstrings",True)[("month_" + (VFN(VFFSL(SL,"time",True),"strftime",False)("%m", VFFSL(SL,"etime",True))))] # u'$tstrings[("month_" + ($time.strftime("%m", $etime)))]' on line 22, col 80 if _v is not None: write(_filter(_v, rawExpr=u'$tstrings[("month_" + ($time.strftime("%m", $etime)))]')) # from line 22, col 80. write(u'''</div> \t\t\t</div> \t\t\t<div> \t\t\t <img src="/images/ico_timer.png" alt="''') _v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 26, col 46 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 26, col 46. write(u'''" title="''') _v = VFFSL(SL,"tstrings",True)['add_timer'] # u"$tstrings['add_timer']" on line 26, col 77 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['add_timer']")) # from line 26, col 77. write(u'''" style="cursor:pointer" onclick="addTimer(theevent)" /> \t\t\t <img src="/images/ico_zap.png" alt="Zap" title="''') _v = VFFSL(SL,"tstrings",True)['zap'] # u"$tstrings['zap']" on line 27, col 56 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['zap']")) # from line 27, col 56. write(u'''" style="cursor:pointer" onclick="zapChannel(\'''') _v = VFFSL(SL,"str",False)(VFFSL(SL,"event",True)['sref']) # u"$str($event['sref'])" on line 27, col 118 if _v is not None: write(_filter(_v, rawExpr=u"$str($event['sref'])")) # from line 27, col 118. write(u"""', '""") _v = VFFSL(SL,"channel",True) # u'$channel' on line 27, col 142 if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 27, col 142. write(u'''\')" /> \t\t\t\t<a href="/web/stream.m3u?ref=''') _v = VFFSL(SL,"quote",False)(VFFSL(SL,"event",True)['sref']) # u"$quote($event['sref'])" on line 28, col 34 if _v is not None: write(_filter(_v, rawExpr=u"$quote($event['sref'])")) # from line 28, col 34. write(u'''&name=''') _v = VFFSL(SL,"quote",False)(VFFSL(SL,"event",True)['channel']) # u"$quote($event['channel'])" on line 28, col 62 if _v is not None: write(_filter(_v, rawExpr=u"$quote($event['channel'])")) # from line 28, col 62. write(u'''" target="_blank"><img \t\t\t\t\tsrc="/images/ico_stream2.png" alt="Stream ''') _v = VFFSL(SL,"channel",True) # u'$channel' on line 29, col 48 if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 29, col 48. write(u'''" title="''') _v = VFFSL(SL,"tstrings",True)['stream'] # u"$tstrings['stream']" on line 29, col 65 if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['stream']")) # from line 29, col 65. write(u''' ''') _v = VFFSL(SL,"channel",True) # u'$channel' on line 29, col 85 if _v is not None: write(_filter(_v, rawExpr=u'$channel')) # from line 29, col 85. write(u'''" style="cursor:pointer" /></a> \t\t\t</div> \t\t</div> \t\t<div style="float:left; width:250px; margin-left: 5px"> \t\t\t<div style="font-size: 13px; font-weight: bold">''') _v = VFFSL(SL,"event",True)['title'] # u"$event['title']" on line 33, col 52 if _v is not None: write(_filter(_v, rawExpr=u"$event['title']")) # from line 33, col 52. write(u'''</div> ''') if VFFSL(SL,"event",True)['title'] != VFFSL(SL,"event",True)['shortdesc']: # generated from line 34, col 1 write(u'''\t\t\t<div style="font-size: 12px; font-weight: bold">''') _v = VFFSL(SL,"event",True)['shortdesc'] # u"$event['shortdesc']" on line 35, col 52 if _v is not None: write(_filter(_v, rawExpr=u"$event['shortdesc']")) # from line 35, col 52. write(u'''</div> ''') write(u'''\t\t\t<div style="max-height:400px; overflow:auto"><p>''') _v = VFN(VFFSL(SL,"event",True)['longdesc'],"replace",False)("\n","<br/>") # u'$(event[\'longdesc\'].replace("\\n","<br/>"))' on line 37, col 52 if _v is not None: write(_filter(_v, rawExpr=u'$(event[\'longdesc\'].replace("\\n","<br/>"))')) # from line 37, col 52. write(u'''</p></div> \t\t</div> \t\t<div style="clear:left"></div> \t</div> </div> <script> var theevent = ''') _v = VFFSL(SL,"dumps",False)(VFFSL(SL,"event",True)) # u'$dumps($event)' on line 43, col 16 if _v is not None: write(_filter(_v, rawExpr=u'$dumps($event)')) # from line 43, col 16. write(u'''; if (picons[theevent[\'channel\']]) \t$(\'#station\').html(\'<img src="\'+picons[theevent[\'channel\']]+\'" width="75" />\'); </script> ''') ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_event= 'respond' ## END CLASS DEFINITION if not hasattr(event, '_initCheetahAttributes'): templateAPIClass = getattr(event, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(event) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=event()).run()
gpl-2.0
8,901,558,350,633,718,000
47.560669
242
0.59745
false
sauloal/cnidaria
scripts/venv/lib/python2.7/site-packages/cogent/core/moltype.py
1
40874
#!/usr/bin/env python """ moltype.py MolType provides services for resolving ambiguities, or providing the correct ambiguity for recoding. It also maintains the mappings between different kinds of alphabets, sequences and alignments. One issue with MolTypes is that they need to know about Sequence, Alphabet, and other objects, but, at the same time, those objects need to know about the MolType. It is thus essential that the connection between these other types and the MolType can be made after the objects are created. """ __author__ = "Peter Maxwell, Gavin Huttley and Rob Knight" __copyright__ = "Copyright 2007-2012, The Cogent Project" __credits__ = ["Peter Maxwell", "Gavin Huttley", "Rob Knight", \ "Daniel McDonald"] __license__ = "GPL" __version__ = "1.5.3" __maintainer__ = "Gavin Huttley" __email__ = "[email protected]" __status__ = "Production" from cogent.core.alphabet import CharAlphabet, Enumeration, Alphabet, \ AlphabetError, _make_complement_array from cogent.util.misc import FunctionWrapper, add_lowercase, iterable, if_ from cogent.util.transform import allchars, keep_chars from cogent.data.molecular_weight import DnaMW, RnaMW, ProteinMW from cogent.core.sequence import Sequence as DefaultSequence, RnaSequence, \ DnaSequence, ProteinSequence, ABSequence, NucleicAcidSequence, \ ByteSequence, ModelSequence, ModelNucleicAcidSequence, \ ModelDnaSequence, ModelRnaSequence, ModelDnaCodonSequence, \ ModelRnaCodonSequence, ModelProteinSequence, ProteinWithStopSequence,\ ModelProteinWithStopSequence from cogent.core.genetic_code import DEFAULT as DEFAULT_GENETIC_CODE, \ GeneticCodes from cogent.core.alignment import Alignment, DenseAlignment, \ SequenceCollection from random import choice import re import string import numpy from numpy import array, sum, transpose, remainder, zeros, arange, newaxis, \ ravel, asarray, fromstring, take, uint8, uint16, uint32 Float = numpy.core.numerictypes.sctype2char(float) Int = numpy.core.numerictypes.sctype2char(int) from string import maketrans, translate IUPAC_gap = '-' IUPAC_missing = '?' IUPAC_DNA_chars = ['T','C','A','G'] IUPAC_DNA_ambiguities = { 'N': ('A','C','T','G'), 'R': ('A','G'), 'Y': ('C','T'), 'W': ('A','T'), 'S': ('C','G'), 'K': ('T','G'), 'M': ('C','A'), 'B': ('C','T','G'), 'D': ('A','T','G'), 'H': ('A','C','T'), 'V': ('A','C','G') } IUPAC_DNA_ambiguities_complements = { 'A':'T','C':'G','G':'C','T':'A', '-':'-', 'M':'K', 'K':'M', 'N':'N', 'R':'Y', 'Y':'R', 'W':'W', 'S':'S', 'X':'X', # not technically an IUPAC ambiguity, but used by repeatmasker 'V':'B', 'B':'V', 'H':'D', 'D':'H' } IUPAC_DNA_complements = { 'A':'T','C':'G','G':'C','T':'A', '-':'-', } IUPAC_DNA_complements = { 'A':'T','C':'G','G':'C','T':'A', '-':'-', } IUPAC_RNA_chars = ['U','C','A','G'] #note change in standard order from DNA IUPAC_RNA_ambiguities = { 'N': ('A','C','U','G'), 'R': ('A','G'), 'Y': ('C','U'), 'W': ('A','U'), 'S': ('C','G'), 'K': ('U','G'), 'M': ('C','A'), 'B': ('C','U','G'), 'D': ('A','U','G'), 'H': ('A','C','U'), 'V': ('A','C','G') } IUPAC_RNA_ambiguities_complements = { 'A':'U','C':'G','G':'C','U':'A', '-':'-', 'M':'K', 'K':'M', 'N':'N', 'R':'Y', 'Y':'R', 'W':'W', 'S':'S', 'X':'X', # not technically an IUPAC ambiguity, but used by repeatmasker 'V':'B', 'B':'V', 'H':'D', 'D':'H' } IUPAC_RNA_complements = { 'A':'U','C':'G','G':'C','U':'A', '-':'-', } #Standard RNA pairing: GU pairs count as 'weak' pairs RnaStandardPairs = { ('A','U'): True, #True vs False for 'always' vs 'sometimes' pairing ('C','G'): True, ('G','C'): True, ('U','A'): True, ('G','U'): False, ('U','G'): False, } #Watson-Crick RNA pairing only: GU pairs don't count as pairs RnaWCPairs = { ('A','U'): True, ('C','G'): True, ('G','C'): True, ('U','A'): True, } #RNA pairing with GU counted as standard pairs RnaGUPairs = { ('A','U'): True, ('C','G'): True, ('G','C'): True, ('U','A'): True, ('G','U'): True, ('U','G'): True, } #RNA pairing with GU, AA, GA, CA and UU mismatches allowed as weak pairs RnaExtendedPairs = { ('A','U'): True, ('C','G'): True, ('G','C'): True, ('U','A'): True, ('G','U'): False, ('U','G'): False, ('A','A'): False, ('G','A'): False, ('A','G'): False, ('C','A'): False, ('A','C'): False, ('U','U'): False, } #Standard DNA pairing: only Watson-Crick pairs count as pairs DnaStandardPairs = { ('A','T'): True, ('C','G'): True, ('G','C'): True, ('T','A'): True, } # protein letters & ambiguity codes IUPAC_PROTEIN_chars = [ 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'Y'] PROTEIN_WITH_STOP_chars = [ 'A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'Y', '*'] IUPAC_PROTEIN_ambiguities = { 'B': ['N', 'D'], 'X': IUPAC_PROTEIN_chars, 'Z': ['Q', 'E'], } PROTEIN_WITH_STOP_ambiguities = { 'B': ['N', 'D'], 'X': PROTEIN_WITH_STOP_chars, 'Z': ['Q', 'E'], } class FoundMatch(Exception): """Raised when a match is found in a deep loop to skip many levels""" pass def make_matches(monomers=None, gaps=None, degenerates=None): """Makes a dict of symbol pairs (i,j) -> strictness. Strictness is True if i and j always match and False if they sometimes match (e.g. A always matches A, but W sometimes matches R). """ result = {} #allow defaults to be left blank without problems monomers = monomers or {} gaps = gaps or {} degenerates = degenerates or {} #all monomers always match themselves and no other monomers for i in monomers: result[(i,i)] = True #all gaps always match all other gaps for i in gaps: for j in gaps: result[(i,j)] = True #monomers sometimes match degenerates that contain them for i in monomers: for j in degenerates: if i in degenerates[j]: result[(i,j)] = False result[(j,i)] = False #degenerates sometimes match degenerates that contain at least one of #the same monomers for i in degenerates: for j in degenerates: try: for i_symbol in degenerates[i]: if i_symbol in degenerates[j]: result[(i,j)] = False raise FoundMatch except FoundMatch: pass #flow control: break out of doubly nested loop return result def make_pairs(pairs=None, monomers=None, gaps=None, degenerates=None): """Makes a dict of symbol pairs (i,j) -> strictness. Expands pairs into all possible pairs using degen symbols. Strictness is True if i and j always pair, and False if they 'weakly' pair (e.g. GU pairs or if it is possible that they pair). If you want to make GU pairs count as 'always matching', pass in pairs that have (G,U) and (U, G) mapped to True rather than False. """ result = {} #allow defaults to be left blank without problems pairs = pairs or {} monomers = monomers or {} gaps = gaps or {} degenerates = degenerates or {} #add in the original pairs: should be complete monomer pairs result.update(pairs) #all gaps 'weakly' pair with each other for i in gaps: for j in gaps: result[(i,j)] = False #monomers sometimes pair with degenerates if the monomer's complement #is in the degenerate symbol for i in monomers: for j in degenerates: found = False try: for curr_j in degenerates[j]: #check if (i,curr_j) and/or (curr_j,i) is a valid pair: #not mutually required if pairs are not all commutative! if (i, curr_j) in pairs: result[(i,j)] = False found = True if (curr_j, i) in pairs: result[(j,i)] = False found = True if found: raise FoundMatch except FoundMatch: pass #flow control: break out of nested loop #degenerates sometimes pair with each other if the first degenerate #contains the complement of one of the bases in the second degenerate for i in degenerates: for j in degenerates: try: for curr_i in degenerates[i]: for curr_j in degenerates[j]: if (curr_i, curr_j) in pairs: result[(i,j)] = False raise FoundMatch except FoundMatch: pass #just using for flow control #don't forget the return value! return result #RnaPairingRules is a dict of {name:(base_pairs,degen_pairs)} where base_pairs #is a dict with the non-degenerate pairing rules and degen_pairs is a dict with #both the degenerate and non-degenerate pairing rules. #NOTE: uses make_pairs to augment the initial dict after construction. RnaPairingRules = { 'Standard': RnaStandardPairs, 'WC': RnaWCPairs, 'GU': RnaGUPairs, 'Extended': RnaExtendedPairs, } for k, v in RnaPairingRules.items(): RnaPairingRules[k] = (v, make_pairs(v)) class CoreObjectGroup(object): """Container relating gapped, ungapped, degen, and non-degen objects.""" _types = ['Base', 'Degen', 'Gap', 'DegenGap'] def __init__(self, Base, Degen=None, Gapped=None, DegenGapped=None): """Returns new CoreObjectGroup. Only Base is required""" self.Base = Base self.Degen = Degen self.Gapped = Gapped self.DegenGapped = DegenGapped self._items = [Base, Degen, Gapped, DegenGapped] self._set_relationships() def _set_relationships(self): """Sets relationships between the different "flavors".""" self.Base.Gapped = self.Gapped self.Base.Ungapped = self.Base self.Base.Degen = self.Degen self.Base.NonDegen = self.Base statements = [ "self.Degen.Gapped = self.DegenGapped", "self.Degen.Ungapped = self.Degen", "self.Degen.Degen = self.Degen", "self.Degen.NonDegen = self.Base", "self.Gapped.Gapped = self.Gapped", "self.Gapped.Ungapped = self.Base", "self.Gapped.Degen = self.DegenGapped", "self.Gapped.NonDegen = self.Gapped", "self.DegenGapped.Gapped = self.DegenGapped", "self.DegenGapped.Ungapped = self.Degen", "self.DegenGapped.Degen = self.DegenGapped", "self.DegenGapped.NonDegen = self.Gapped", ] for s in statements: try: exec(s) except AttributeError: pass def __getitem__(self, i): """Allows container to be indexed into, by type of object (e.g. Gap).""" return self.__dict__[i] def whichType(self, a): """Returns the type of an alphabet in self, or None if not present.""" return self._types[self._items.find(a)] class AlphabetGroup(CoreObjectGroup): """Container relating gapped, ungapped, degen, and non-degen alphabets.""" def __init__(self, chars, degens, gap=IUPAC_gap, missing=IUPAC_missing, \ MolType=None, constructor=None): """Returns new AlphabetGroup.""" if constructor is None: if max(map(len, chars)) == 1: constructor = CharAlphabet chars = ''.join(chars) degens = ''.join(degens) else: constructor = Alphabet #assume multi-char self.Base = constructor(chars, MolType=MolType) self.Degen = constructor(chars+degens, MolType=MolType) self.Gapped = constructor(chars+gap, gap, MolType=MolType) self.DegenGapped = constructor(chars+gap+degens+missing, gap, \ MolType=MolType) self._items = [self.Base, self.Degen, self.Gapped, self.DegenGapped] self._set_relationships() #set complements if MolType was specified if MolType is not None: comps = MolType.Complements for i in self._items: i._complement_array = _make_complement_array(i, comps) class MolType(object): """MolType: Handles operations that depend on the sequence type (e.g. DNA). The MolType knows how to connect alphabets, sequences, alignments, and so forth, and how to disambiguate ambiguous symbols and perform base pairing (where appropriate). WARNING: Objects passed to a MolType become associated with that MolType, i.e. if you pass ProteinSequence to a new MolType you make up, all ProteinSequences will now be associated with the new MolType. This may not be what you expect. Use preserve_existing_moltypes=True if you don't want to reset the moltype. """ def __init__(self, motifset, Gap=IUPAC_gap, Missing=IUPAC_missing,\ Gaps=None, Sequence=None, Ambiguities=None, label=None, Complements=None, Pairs=None, MWCalculator=None, \ add_lower=False, preserve_existing_moltypes=False, \ make_alphabet_group=False, ModelSeq=None): """Returns a new MolType object. Note that the parameters are in flux. Currently: motifset: Alphabet or sequence of items in the default alphabet. Does not include degenerates. Gap: default gap symbol Missing: symbol for missing data Gaps: any other symbols that should be treated as gaps (doesn't have to include Gap or Missing; they will be silently added) Sequence: Class for constructing sequences. Ambiguities: dict of char:tuple, doesn't include gaps (these are hard-coded as - and ?, and added later. label: text label, don't know what this is used for. Unnecessary? Complements: dict of symbol:symbol showing how the non-degenerate single characters complement each other. Used for constructing on the fly the complement table, incl. support for mustPair and canPair. Pairs: dict in which keys are pairs of symbols that can pair with each other, values are True (must pair) or False (might pair). Currently, the meaning of GU pairs as 'weak' is conflated with the meaning of degenerate symbol pairs (which might pair with each other but don't necessarily, depending on how the symbol is resolved). This should be refactored. MWCalculator: f(seq) -> molecular weight. add_lower: if True (default: False) adds the lowercase versions of everything into the alphabet. Slated for deletion. preserve_existing_moltypes: if True (default: False), does not set the MolType of the things added in **kwargs to self. make_alphabet_group: if True, makes an AlphabetGroup relating the various alphabets to one another. ModelSeq: sequence type for modeling Note on "Degenerates" versus "Ambiguities": self.Degenerates contains _only_ mappings for degenerate symbols, whereas self.Ambiguities contains mappings for both degenerate and non-degenerate symbols. Sometimes you want one, sometimes the other, so both are provided. """ self.Gap = Gap self.Missing = Missing self.Gaps = frozenset([Gap, Missing]) if Gaps: self.Gaps = self.Gaps.union(frozenset(Gaps)) self.label = label #set the sequence constructor if Sequence is None: Sequence = ''.join #safe default string constructor elif not preserve_existing_moltypes: Sequence.MolType = self self.Sequence = Sequence #set the ambiguities ambigs = {self.Missing:tuple(motifset)+(self.Gap,),self.Gap:(self.Gap,)} if Ambiguities: ambigs.update(Ambiguities) for c in motifset: ambigs[c] = (c,) self.Ambiguities = ambigs #set Complements -- must set before we make the alphabet group self.Complements = Complements or {} if make_alphabet_group: #note: must use _original_ ambiguities here self.Alphabets = AlphabetGroup(motifset, Ambiguities, \ MolType=self) self.Alphabet = self.Alphabets.Base else: if isinstance(motifset, Enumeration): self.Alphabet = motifset elif max(len(motif) for motif in motifset) == 1: self.Alphabet = CharAlphabet(motifset, MolType=self) else: self.Alphabet = Alphabet(motifset, MolType=self) #set the other properties self.Degenerates = Ambiguities and Ambiguities.copy() or {} self.Degenerates[self.Missing] = ''.join(motifset)+self.Gap self.Matches = make_matches(motifset, self.Gaps, self.Degenerates) self.Pairs = Pairs and Pairs.copy() or {} self.Pairs.update(make_pairs(Pairs, motifset, self.Gaps, \ self.Degenerates)) self.MWCalculator = MWCalculator #add lowercase characters, if we're doing that if add_lower: self._add_lowercase() #cache various other data that make the calculations faster self._make_all() self._make_comp_table() # a gap can be a true gap char or a degenerate character, typically '?' # we therefore want to ensure consistent treatment across the definition # of characters as either gap or degenerate self.GapString = ''.join(self.Gaps) strict_gap = "".join(set(self.GapString) - set(self.Degenerates)) self.stripDegenerate = FunctionWrapper( keep_chars(strict_gap+''.join(self.Alphabet))) self.stripBad = FunctionWrapper(keep_chars(''.join(self.All))) to_keep = set(self.Alphabet) ^ set(self.Degenerates) - set(self.Gaps) self.stripBadAndGaps = FunctionWrapper(keep_chars(''.join(to_keep))) #make inverse degenerates from degenerates #ensure that lowercase versions also exist if appropriate inv_degens = {} for key, val in self.Degenerates.items(): inv_degens[frozenset(val)] = key.upper() if add_lower: inv_degens[frozenset(''.join(val).lower())] = key.lower() for m in self.Alphabet: inv_degens[frozenset(m)] = m if add_lower: inv_degens[frozenset(''.join(m).lower())] = m.lower() for m in self.Gaps: inv_degens[frozenset(m)] = m self.InverseDegenerates = inv_degens #set array type for modeling alphabets try: self.ArrayType = self.Alphabet.ArrayType except AttributeError: self.ArrayType = None #set modeling sequence self.ModelSeq = ModelSeq def __repr__(self): """String representation of MolType. WARNING: This doesn't allow you to reconstruct the object in its present incarnation. """ return 'MolType(%s)' % (self.Alphabet,) def gettype(self): """Returns type, e.g. 'dna', 'rna', 'protein'. Delete?""" return self.label def makeSequence(self, Seq, Name=None, **kwargs): """Returns sequence of correct type. Replace with just self.Sequence?""" return self.Sequence(Seq, Name, **kwargs) def verifySequence(self, seq, gaps_allowed=True, wildcards_allowed=True): """Checks whether sequence is valid on the default alphabet. Has special-case handling for gaps and wild-cards. This mechanism is probably useful to have in parallel with the validation routines that check specifically whether the sequence has gaps, degenerate symbols, etc., or that explicitly take an alphabet as input. """ alpha = frozenset(self.Ambiguities) if gaps_allowed: alpha = alpha.union(self.Gaps) if wildcards_allowed: alpha = alpha.union(self.Missing) try: nonalpha = re.compile('[^%s]' % re.escape(''.join(alpha))) badchar = nonalpha.search(seq) if badchar: motif = badchar.group() raise AlphabetError(motif) except TypeError: #not alphabetic sequence: try slow method for motif in seq: if motif not in alpha: raise AlphabetError(motif) def isAmbiguity(self, querymotif): """Return True if querymotif is an amibiguity character in alphabet. Arguments: - querymotif: the motif being queried.""" return len(self.Ambiguities[querymotif]) > 1 def _whatAmbiguity(self, motifs): """The code that represents all of 'motifs', and minimal others. Does this duplicate DegenerateFromSequence directly? """ most_specific = len(self.Alphabet) + 1 result = self.Missing for (code, motifs2) in self.Ambiguities.items(): for c in motifs: if c not in motifs2: break else: if len(motifs2) < most_specific: most_specific = len(motifs2) result = code return result def whatAmbiguity(self, motifs): """The code that represents all of 'motifs', and minimal others. Does this duplicate DegenerateFromSequence directly? """ if not hasattr(self, '_reverse_ambiguities'): self._reverse_ambiguities = {} motifs = frozenset(motifs) if motifs not in self._reverse_ambiguities: self._reverse_ambiguities[motifs] = self._whatAmbiguity(motifs) return self._reverse_ambiguities[motifs] def _add_lowercase(self): """Adds lowercase versions of keys and vals to each internal dict.""" for name in ['Alphabet', 'Degenerates', 'Gaps', 'Complements', 'Pairs', 'Matches']: curr = getattr(self, name) #temp hack to get around re-ordering if isinstance(curr, Alphabet): curr = tuple(curr) new = add_lowercase(curr) setattr(self, name, new) def _make_all(self): """Sets self.All, which contains all the symbols self knows about. Note that the value of items in self.All will be the string containing the possibly degenerate set of symbols that the items expand to. """ all = {} for i in self.Alphabet: curr = str(i) all[i] = i for key, val in self.Degenerates.items(): all[key] = val for i in self.Gaps: all[i] = i self.All = all def _make_comp_table(self): """Sets self.ComplementTable, which maps items onto their complements. Note: self.ComplementTable is only set if self.Complements exists. """ if self.Complements: self.ComplementTable = maketrans(''.join(self.Complements.keys()), ''.join(self.Complements.values())) def complement(self, item): """Returns complement of item, using data from self.Complements. Always tries to return same type as item: if item looks like a dict, will return list of keys. """ if not self.Complements: raise TypeError, \ "Tried to complement sequence using alphabet without complements." try: return item.translate(self.ComplementTable) except (AttributeError, TypeError): item = iterable(item) get = self.Complements.get return item.__class__([get(i, i) for i in item]) def rc(self, item): """Returns reverse complement of item w/ data from self.Complements. Always returns same type as input. """ comp = list(self.complement(item)) comp.reverse() if isinstance(item, str): return item.__class__(''.join(comp)) else: return item.__class__(comp) def __contains__(self, item): """A MolType contains every character it knows about.""" return item in self.All def __iter__(self): """A MolType iterates only over the characters in its Alphabet..""" return iter(self.Alphabet) def isGap(self, char): """Returns True if char is a gap.""" return char in self.Gaps def isGapped(self, sequence): """Returns True if sequence contains gaps.""" return self.firstGap(sequence) is not None def isDegenerate(self, sequence): """Returns True if sequence contains degenerate characters.""" return self.firstDegenerate(sequence) is not None def isValid(self, sequence): """Returns True if sequence contains no items that are not in self.""" try: return self.firstInvalid(sequence) is None except: return False def isStrict(self, sequence): """Returns True if sequence contains only items in self.Alphabet.""" try: return (len(sequence)==0) or (self.firstNonStrict(sequence) is None) except: return False def isValidOnAlphabet(self, sequence, alphabet=None): """Returns True if sequence contains only items in alphabet. Alphabet can actually be anything that implements __contains__. Defaults to self.Alphabet if not supplied. """ if alphabet is None: alphabet = self.Alphabet return first_index_in_set(sequence, alphabet) is not None def firstNotInAlphabet(self, sequence, alphabet=None): """Returns index of first item not in alphabet, or None. Defaults to self.Alphabet if alphabet not supplied. """ if alphabet is None: alphabet = self.Alphabet return first_index_in_set(sequence, alphabet) def firstGap(self, sequence): """Returns the index of the first gap in the sequence, or None.""" gap = self.Gaps for i, s in enumerate(sequence): if s in gap: return i return None def firstDegenerate(self, sequence): """Returns the index of first degenerate symbol in sequence, or None.""" degen = self.Degenerates for i, s in enumerate(sequence): if s in degen: return i return None def firstInvalid(self, sequence): """Returns the index of first invalid symbol in sequence, or None.""" all = self.All for i, s in enumerate(sequence): if not s in all: return i return None def firstNonStrict(self, sequence): """Returns the index of first non-strict symbol in sequence, or None.""" monomers = self.Alphabet for i, s in enumerate(sequence): if not s in monomers: return i return None def disambiguate(self, sequence, method='strip'): """Returns a non-degenerate sequence from a degenerate one. method can be 'strip' (deletes any characters not in monomers or gaps) or 'random'(assigns the possibilities at random, using equal frequencies). """ if method == 'strip': try: return sequence.__class__(self.stripDegenerate(sequence)) except: ambi = self.Degenerates def not_ambiguous(x): return not x in ambi return sequence.__class__(filter(not_ambiguous, sequence)) elif method == 'random': degen = self.Degenerates result = [] for i in sequence: if i in degen: result.append(choice(degen[i])) else: result.append(i) if isinstance(sequence, str): return sequence.__class__(''.join(result)) else: return sequence.__class__(result) else: raise NotImplementedError, "Got unknown method %s" % method def degap(self, sequence): """Deletes all gap characters from sequence.""" try: return sequence.__class__(sequence.translate( \ allchars, self.GapString)) except AttributeError: gap = self.Gaps def not_gap(x): return not x in gap return sequence.__class__(filter(not_gap, sequence)) def gapList(self, sequence): """Returns list of indices of all gaps in the sequence, or [].""" gaps = self.Gaps return [i for i, s in enumerate(sequence) if s in gaps] def gapVector(self, sequence): """Returns list of bool indicating gap or non-gap in sequence.""" return map(self.isGap, sequence) def gapMaps(self, sequence): """Returns tuple containing dicts mapping between gapped and ungapped. First element is a dict such that d[ungapped_coord] = gapped_coord. Second element is a dict such that d[gapped_coord] = ungapped_coord. Note that the dicts will be invalid if the sequence changes after the dicts are made. The gaps themselves are not in the dictionary, so use d.get() or test 'if pos in d' to avoid KeyErrors if looking up all elements in a gapped sequence. """ ungapped = {} gapped = {} num_gaps = 0 for i, is_gap in enumerate(self.gapVector(sequence)): if is_gap: num_gaps += 1 else: ungapped[i] = i - num_gaps gapped[i - num_gaps] = i return gapped, ungapped def countGaps(self, sequence): """Counts the gaps in the specified sequence.""" gaps = self.Gaps gap_count = 0 for s in sequence: if s in gaps: gap_count += 1 return gap_count def countDegenerate(self, sequence): """Counts the degenerate bases in the specified sequence.""" degen = self.Degenerates degen_count = 0 for s in sequence: if s in degen: degen_count += 1 return degen_count def possibilities(self, sequence): """Counts number of possible sequences matching the sequence. Uses self.Degenerates to decide how many possibilites there are at each position in the sequence. """ degen = self.Degenerates count = 1 for s in sequence: if s in degen: count *= len(degen[s]) return count def MW(self, sequence, method='random', delta=None): """Returns the molecular weight of the sequence. If the sequence is ambiguous, uses method (random or strip) to disambiguate the sequence. if delta is present, uses it instead of the standard weight adjustment. """ if not sequence: return 0 try: return self.MWCalculator(sequence, delta) except KeyError: #assume sequence was ambiguous return self.MWCalculator(self.disambiguate(sequence, method), delta) def canMatch(self, first, second): """Returns True if every pos in 1st could match same pos in 2nd. Truncates at length of shorter sequence. Gaps are only allowed to match other gaps. """ m = self.Matches for pair in zip(first, second): if pair not in m: return False return True def canMismatch(self, first, second): """Returns True if any position in 1st could cause a mismatch with 2nd. Truncates at length of shorter sequence. Gaps are always counted as matches. """ m = self.Matches if not first or not second: return False for pair in zip(first, second): if not m.get(pair, None): return True return False def mustMatch(self, first, second): """Returns True if all positions in 1st must match positions in second.""" return not self.canMismatch(first, second) def canPair(self, first, second): """Returns True if first and second could pair. Pairing occurs in reverse order, i.e. last position of second with first position of first, etc. Truncates at length of shorter sequence. Gaps are only allowed to pair with other gaps, and are counted as 'weak' (same category as GU and degenerate pairs). NOTE: second must be able to be reverse """ p = self.Pairs sec = list(second) sec.reverse() for pair in zip(first, sec): if pair not in p: return False return True def canMispair(self, first, second): """Returns True if any position in 1st could mispair with 2nd. Pairing occurs in reverse order, i.e. last position of second with first position of first, etc. Truncates at length of shorter sequence. Gaps are always counted as possible mispairs, as are weak pairs like GU. """ p = self.Pairs if not first or not second: return False sec = list(second) sec.reverse() for pair in zip(first, sec): if not p.get(pair, None): return True return False def mustPair(self, first, second): """Returns True if all positions in 1st must pair with second. Pairing occurs in reverse order, i.e. last position of second with first position of first, etc. """ return not self.canMispair(first, second) def degenerateFromSequence(self, sequence): """Returns least degenerate symbol corresponding to chars in sequence. First tries to look up in self.InverseDegenerates. Then disambiguates and tries to look up in self.InverseDegenerates. Then tries converting the case (tries uppercase before lowercase). Raises TypeError if conversion fails. """ symbols = frozenset(sequence) #check if symbols are already known inv_degens = self.InverseDegenerates result = inv_degens.get(symbols, None) if result: return result #then, try converting the symbols degens = self.All converted = set() for sym in symbols: for char in degens[sym]: converted.add(char) symbols = frozenset(converted) result = inv_degens.get(symbols, None) if result: return result #then, try converting case symbols = frozenset([s.upper() for s in symbols]) result = inv_degens.get(symbols, None) if result: return result symbols = frozenset([s.lower() for s in symbols]) result = inv_degens.get(symbols, None) if result: return result #finally, try to find the minimal subset containing the symbols symbols = frozenset([s.upper() for s in symbols]) lengths = {} for i in inv_degens: if symbols.issubset(i): lengths[len(i)] = i if lengths: #found at least some matches sorted = lengths.keys() sorted.sort() return inv_degens[lengths[sorted[0]]] #if we got here, nothing worked raise TypeError, "Cannot find degenerate char for symbols: %s" \ % symbols ASCII = MolType( # A default type for text read from a file etc. when we don't # want to prematurely assume DNA or Protein. Sequence = DefaultSequence, motifset = string.letters, Ambiguities = {}, label = 'text', ModelSeq = ModelSequence, ) DNA = MolType( Sequence = DnaSequence, motifset = IUPAC_DNA_chars, Ambiguities = IUPAC_DNA_ambiguities, label = "dna", MWCalculator = DnaMW, Complements = IUPAC_DNA_ambiguities_complements, Pairs = DnaStandardPairs, make_alphabet_group=True, ModelSeq = ModelDnaSequence, ) RNA = MolType( Sequence = RnaSequence, motifset = IUPAC_RNA_chars, Ambiguities = IUPAC_RNA_ambiguities, label = "rna", MWCalculator = RnaMW, Complements = IUPAC_RNA_ambiguities_complements, Pairs = RnaStandardPairs, make_alphabet_group=True, ModelSeq = ModelRnaSequence, ) PROTEIN = MolType( Sequence = ProteinSequence, motifset = IUPAC_PROTEIN_chars, Ambiguities = IUPAC_PROTEIN_ambiguities, MWCalculator = ProteinMW, make_alphabet_group=True, ModelSeq = ModelProteinSequence, label = "protein") PROTEIN_WITH_STOP = MolType( Sequence = ProteinWithStopSequence, motifset = PROTEIN_WITH_STOP_chars, Ambiguities = PROTEIN_WITH_STOP_ambiguities, MWCalculator = ProteinMW, make_alphabet_group=True, ModelSeq = ModelProteinWithStopSequence, label = "protein_with_stop") BYTES = MolType( # A default type for arbitrary chars read from a file etc. when we don't # want to prematurely assume _anything_ about the data. Sequence = ByteSequence, motifset = map(chr, range(256)), Ambiguities = {}, ModelSeq = ModelSequence, label = 'bytes') #following is a two-state MolType useful for testing AB = MolType( Sequence = ABSequence, motifset = 'ab', Ambiguities={}, ModelSeq = ModelSequence, label='ab') class _CodonAlphabet(Alphabet): """Codon alphabets are DNA TupleAlphabets with a genetic code attribute and some codon-specific methods""" def _with(self, motifs): a = Alphabet._with(self, motifs) a.__class__ = type(self) a._gc = self._gc return a def isCodingCodon(self, codon): return not self._gc.isStop(codon) def isStopCodon(self, codon): return self._gc.isStop(codon) def getGeneticCode(self): return self._gc def CodonAlphabet(gc=DEFAULT_GENETIC_CODE, include_stop_codons=False): if isinstance(gc, (int, basestring)): gc = GeneticCodes[gc] if include_stop_codons: motifset = list(gc.Codons) else: motifset = list(gc.SenseCodons) motifset = [codon.upper().replace('U', 'T') for codon in motifset] a = _CodonAlphabet(motifset, MolType=DNA) a._gc = gc return a def _method_codon_alphabet(ignore, *args, **kwargs): """If CodonAlphabet is set as a property, it gets self as extra 1st arg.""" return CodonAlphabet(*args, **kwargs) STANDARD_CODON = CodonAlphabet() #Modify NucleicAcidSequence to avoid circular import NucleicAcidSequence.CodonAlphabet = _method_codon_alphabet NucleicAcidSequence.PROTEIN = PROTEIN ModelRnaSequence.MolType = RNA ModelRnaSequence.Alphabet = RNA.Alphabets.DegenGapped ModelDnaSequence.MolType = DNA ModelDnaSequence.Alphabet = DNA.Alphabets.DegenGapped ModelProteinSequence.MolType = PROTEIN ModelProteinSequence.Alphabet = PROTEIN.Alphabets.DegenGapped ModelProteinWithStopSequence.MolType = PROTEIN_WITH_STOP ModelProteinWithStopSequence.Alphabet = PROTEIN_WITH_STOP.Alphabets.DegenGapped ModelSequence.Alphabet = BYTES.Alphabet DenseAlignment.Alphabet = BYTES.Alphabet DenseAlignment.MolType = BYTES ModelDnaCodonSequence.Alphabet = DNA.Alphabets.Base.Triples ModelRnaCodonSequence.Alphabet = RNA.Alphabets.Base.Triples #Modify Alignment to avoid circular import Alignment.MolType = ASCII SequenceCollection.MolType = BYTES
mit
-914,309,476,236,726,300
34.666667
110
0.588443
false
typesupply/vanilla
Lib/vanilla/vanillaPopover.py
1
14880
import weakref from Foundation import NSObject, NSRect, NSMakeRect, NSZeroRect from AppKit import NSView, NSViewController, NSPopover, NSMinXEdge, NSMaxXEdge, \ NSMinYEdge, NSMaxYEdge, NSPopoverBehaviorApplicationDefined, NSPopoverBehaviorTransient, \ NSPopoverBehaviorSemitransient from vanilla.vanillaBase import VanillaBaseObject, _breakCycles, _addAutoLayoutRules from vanilla.nsSubclasses import getNSSubclass _edgeMap = { "left": NSMinXEdge, "right": NSMaxXEdge, "top": NSMinYEdge, "bottom": NSMaxYEdge } try: NSPopoverBehaviorApplicationDefined except NameError: NSPopoverBehaviorApplicationDefined = 0 NSPopoverBehaviorTransient = 1 NSPopoverBehaviorSemitransient = 2 _behaviorMap = { "applicationDefined": NSPopoverBehaviorApplicationDefined, "transient": NSPopoverBehaviorTransient, "semitransient": NSPopoverBehaviorSemitransient } class VanillaPopoverContentView(NSView): def _getContentView(self): return self class VanillaPopoverDelegate(NSObject): def popoverWillShow_(self, notification): self.vanillaWrapper()._alertBindings("will show") def popoverDidShow_(self, notification): self.vanillaWrapper()._alertBindings("did show") def popoverWillClose_(self, notification): self.vanillaWrapper()._alertBindings("will close") def popoverDidClose_(self, notification): self.vanillaWrapper()._alertBindings("did close") class Popover(VanillaBaseObject): """ A popover capable of containing controls. .. image:: /_images/Popover.png :: from vanilla import Window, List, Popover, TextBox class PopoverExample: def __init__(self): self.w = Window((120, 120)) self.w.list = List((0, 0, -0, -0), ['A', 'B', 'C'], selectionCallback=self.showPopoverCallback) self.w.open() def showPopoverCallback(self, sender): selection = sender.getSelection() if not selection: return index = sender.getSelection()[0] relativeRect = sender.getNSTableView().rectOfRow_(index) self.pop = Popover((140, 80)) self.pop.text = TextBox((10, 10, -10, -10), 'This is a popover.') self.pop.open(parentView=sender.getNSTableView(), preferredEdge='right', relativeRect=relativeRect) PopoverExample() **size** Tuple of form *(width, height)* representing the size of the content in the popover. **parentView** The parent view that the popover should pop out from. This can be either a vanilla object or an instance of `NSView`_ or `NSView_` subclass. **preferredEdge** The edge of the parent view that you want the popover to pop out from. These are the options: +------------+ | *"left"* | +------------+ | *"right"* | +------------+ | *"top"* | +------------+ | *"bottom"* | +------------+ **behavior** The desired behavior of the popover. These are the options: +------------------------+-----------------------------------------------------+ | *"applicationDefined"* | Corresponds to NSPopoverBehaviorApplicationDefined. | +------------------------+-----------------------------------------------------+ | *"transient"* | Corresponds to NSPopoverBehaviorTransient. | +------------------------+-----------------------------------------------------+ | *"semitransient"* | Corresponds to NSPopoverBehaviorSemitransient. | +------------------------+-----------------------------------------------------+ .. _NSView: https://developer.apple.com/documentation/appkit/nsview?language=objc """ nsPopoverClass = NSPopover contentViewClass = VanillaPopoverContentView contentViewControllerClass = NSViewController def __init__(self, size, parentView=None, preferredEdge="top", behavior="semitransient"): if isinstance(parentView, VanillaBaseObject): parentView = parentView._getContentView() self._parentView = parentView self._preferredEdge = preferredEdge # content view and controller self._nsObject = getNSSubclass(self.contentViewClass).alloc().initWithFrame_(((0, 0), size)) self._contentViewController = self.contentViewControllerClass.alloc().init() self._contentViewController.setView_(self._nsObject) # popover cls = getNSSubclass(self.nsPopoverClass) self._popover = cls.alloc().init() self._popover.setContentViewController_(self._contentViewController) self._popover.setBehavior_(_behaviorMap[behavior]) # delegate self._delegate = VanillaPopoverDelegate.alloc().init() self._delegate.vanillaWrapper = weakref.ref(self) self._popover.setDelegate_(self._delegate) self._bindings = {} self._autoLayoutViews = {} def __del__(self): self._breakCycles() def _breakCycles(self): super(Popover, self)._breakCycles() view = self._getContentView() if view is not None: _breakCycles(view) self._contentViewController = None self._popover = None self._parentView = None self._delegate = None def open(self, parentView=None, preferredEdge=None, relativeRect=None): """ Open the popover. If desired, the *parentView* may be specified. If not, the values assigned during init will be used. Additionally, a rect of form `(x, y, width, height)` may be specified to indicate where the popover should pop out from. If not provided, the parent view's bounds will be used. """ if isinstance(parentView, VanillaBaseObject): parentView = parentView._getContentView() if parentView is None: parentView = self._parentView if relativeRect is not None: if not isinstance(relativeRect, NSRect): x, y, w, h = relativeRect relativeRect = NSMakeRect(x, y, w, h) else: relativeRect = NSZeroRect if preferredEdge is None: preferredEdge = self._preferredEdge preferredEdge = _edgeMap[preferredEdge] self._popover.showRelativeToRect_ofView_preferredEdge_(relativeRect, parentView, preferredEdge) def close(self): """ Close the popover. Once a popover has been closed it can not be re-opened. """ self._popover.close() def resize(self, width, height): """ Change the size of the popover to *width* and *height*. """ self._popover.setContentSize_((width, height)) def bind(self, event, callback): """ Bind a callback to an event. **event** A string representing the desired event. The options are: +----------------+-----------------------------------------------+ | *"will show"* | Called immediately before the popover shows. | +----------------+-----------------------------------------------+ | *"did show"* | Called immediately after the popover shows. | +----------------+-----------------------------------------------+ | *"will close"* | Called immediately before the popover closes. | +----------------+-----------------------------------------------+ | *"did close"* | Called immediately after the popover closes. | +----------------+-----------------------------------------------+ """ if event not in self._bindings: self._bindings[event] = [] self._bindings[event].append(callback) def unbind(self, event, callback): """ Unbind a callback from an event. **event** A string representing the desired event. Refer to :meth:`Popover.bind` for the options. **callback** The callback that has been bound to the event. """ self._bindings[event].remove(callback) def _alertBindings(self, key): if hasattr(self, "_bindings"): if key in self._bindings: for callback in self._bindings[key]: # XXX why return? there could be more than one binding. return callback(self) def addAutoPosSizeRules(self, rules, metrics=None): """ Add auto layout rules for controls/view in this view. **rules** must by a list of rule definitions. Rule definitions may take two forms: * strings that follow the `Visual Format Language`_ * dictionaries with the following key/value pairs: +---------------------------+-------------------------------------------------------------------------+ | key | value | +===========================+=========================================================================+ | *"view1"* | The vanilla wrapped view for the left side of the rule. | +---------------------------+-------------------------------------------------------------------------+ | *"attribute1"* | The attribute of the view for the left side of the rule. | | | See below for options. | +---------------------------+-------------------------------------------------------------------------+ | *"relation"* (optional) | The relationship between the left side of the rule | | | and the right side of the rule. See below for options. | | | The default value is `"=="`. | +---------------------------+-------------------------------------------------------------------------+ | *"view2"* | The vanilla wrapped view for the right side of the rule. | +---------------------------+-------------------------------------------------------------------------+ | *"attribute2"* | The attribute of the view for the right side of the rule. | | | See below for options. | +---------------------------+-------------------------------------------------------------------------+ | *"multiplier"* (optional) | The constant multiplied with the attribute on the right side of | | | the rule as part of getting the modified attribute. | | | The default value is `1`. | +---------------------------+-------------------------------------------------------------------------+ | *"constant"* (optional) | The constant added to the multiplied attribute value on the right | | | side of the rule to yield the final modified attribute. | | | The default value is `0`. | +---------------------------+-------------------------------------------------------------------------+ The `attribute1` and `attribute2` options are: +-------------------+--------------------------------+ | value | AppKit equivalent | +===================+================================+ | *"left"* | NSLayoutAttributeLeft | +-------------------+--------------------------------+ | *"right"* | NSLayoutAttributeRight | +-------------------+--------------------------------+ | *"top"* | NSLayoutAttributeTop | +-------------------+--------------------------------+ | *"bottom"* | NSLayoutAttributeBottom | +-------------------+--------------------------------+ | *"leading"* | NSLayoutAttributeLeading | +-------------------+--------------------------------+ | *"trailing"* | NSLayoutAttributeTrailing | +-------------------+--------------------------------+ | *"width"* | NSLayoutAttributeWidth | +-------------------+--------------------------------+ | *"height"* | NSLayoutAttributeHeight | +-------------------+--------------------------------+ | *"centerX"* | NSLayoutAttributeCenterX | +-------------------+--------------------------------+ | *"centerY"* | NSLayoutAttributeCenterY | +-------------------+--------------------------------+ | *"baseline"* | NSLayoutAttributeBaseline | +-------------------+--------------------------------+ | *"lastBaseline"* | NSLayoutAttributeLastBaseline | +-------------------+--------------------------------+ | *"firstBaseline"* | NSLayoutAttributeFirstBaseline | +-------------------+--------------------------------+ Refer to the `NSLayoutAttribute documentation`_ for the information about what each of these do. The `relation` options are: +--------+------------------------------------+ | value | AppKit equivalent | +========+====================================+ | *"<="* | NSLayoutRelationLessThanOrEqual | +--------+------------------------------------+ | *"=="* | NSLayoutRelationEqual | +--------+------------------------------------+ | *">="* | NSLayoutRelationGreaterThanOrEqual | +--------+------------------------------------+ Refer to the `NSLayoutRelation documentation`_ for the information about what each of these do. **metrics** may be either *None* or a dict containing key value pairs representing metrics keywords used in the rules defined with strings. .. _Visual Format Language: https://developer.apple.com/library/archive/documentation/UserExperience/Conceptual/AutolayoutPG/VisualFormatLanguage.html#//apple_ref/doc/uid/TP40010853-CH27-SW1 .. _NSLayoutAttribute documentation: https://developer.apple.com/documentation/uikit/nslayoutattribute?language=objc .. _NSLayoutRelation documentation: https://developer.apple.com/documentation/uikit/nslayoutrelation?language=objc """ _addAutoLayoutRules(self, rules, metrics)
mit
-5,528,028,774,403,373,000
45.35514
198
0.467204
false
eamy-org/imi
tests/handlers.py
1
1051
#!/usr/bin/env python import json import unittest from unittest.mock import patch from urllib.request import Request import imi.handlers __all__ = ['TestCustomHandler'] class TestCustomHandler(unittest.TestCase): def setUp(self): self.handler = imi.handlers.CustomHandler() def test_noop_open(self): data = json.dumps({'a': 'b'}).encode('utf-8') req = Request('noop://example?b=c', data=data) res = json.loads(self.handler.noop_open(req).read().decode('utf-8')) self.assertEqual({'a': 'b', 'b': 'c'}, res) @patch('imi.handlers.subprocess.check_output') def test_command_open(self, check_output): data = json.dumps({'a': 'b'}).encode('utf-8') check_output.return_value = json.dumps({'b': 'c'}).encode('utf-8') req = Request('command://example', data=data) res = json.loads(self.handler.command_open(req).read().decode('utf-8')) self.assertEqual({'b': 'c'}, res) def tearDown(self): pass if __name__ == '__main__': unittest.main()
mpl-2.0
729,989,669,214,812,200
28.194444
79
0.613701
false
molski/pmag
utils.py
1
4995
from functools import reduce import os import struct import numpy as np class LABEL: NONE = 0 VESSEL = 1 BACKGROUND = 2 PADDING = 3 class IMG: BACKGROUND = 0 VESSEL = 1 NONE = 255 is_eq = lambda a: lambda b: a == b is_neq = lambda a: lambda b: a != b def read_image(filename, dtype=np.float32): # Returns a 3D array for given OCT scan file ('.bin' file) with open(filename, 'rb') as openfile: # First, extract 3D lengths from file header shape = np.fromfile( openfile, dtype=np.int32, count=3, ) image_size = int(np.prod(shape)) # Then, read extracted amount of bytes and reshape data to 3D array image = np.fromfile( openfile, dtype=dtype, count=image_size, ).reshape(*shape) return image def read_labels(filename): # Returns a 3D array for given OCT scan labels file ('.bin.labels' file) # Same as `read_image`, but the labels data type is int32 return read_image(filename=filename, dtype=np.int32) def write_image(image, filename, dtype=np.float32): image = image.astype(dtype) voxels_count = np.prod(image.shape) label_descriptions = ( # (NAME, (R, G, B)), ('NONE', (0x00, 0x00, 0x00)), ('VESSEL', (0xff, 0x26, 0x26)), ('BACKGROUND', (0x00, 0x33, 0xff)), ) with open(filename, 'wb') as openfile: openfile.write(struct.pack('<%sI' % image.ndim, *image.shape)) if dtype == np.float32: openfile.write(struct.pack('<%sf' % voxels_count, *image.flatten())) else: openfile.write(struct.pack('<%sI' % voxels_count, *image.flatten())) openfile.write(struct.pack('<I', len(label_descriptions))) for i, (name, rgb) in enumerate(label_descriptions): openfile.write(struct.pack('<I', i)) openfile.write(struct.pack('<I', len(name))) openfile.write(name.encode()) openfile.write(struct.pack('<%sB' % len(rgb), *reversed(rgb))) openfile.write(struct.pack('<B', 0xff)) def write_labels(labels, filename): return write_image(image=labels, filename=filename, dtype=np.int32) def cache_targets(targets, filename): # Input: (n, 3) shaped array targets = np.transpose(targets) os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, 'wb') as openfile: openfile.write(struct.pack('<I', targets.shape[1])) openfile.write(struct.pack('<%sh' % targets.size, *targets.flatten())) def read_cached_targets(filename): with open(filename, 'rb') as openfile: length = int(np.fromfile( openfile, dtype=np.int32, count=1, )) targets = np.fromfile( openfile, dtype=np.int16, count=3 * length, ).reshape(3, length).astype(np.int32) # Output: (n, 3) shaped array return np.transpose(targets) def path(filepath): return os.path.join( os.path.normpath( os.path.dirname(__file__), ), filepath, ) def preprocess_patch(patch): # Performs ZMUV normalization on patch return (patch - patch.mean()) / patch.std() def preprocess_batch(batch): # Performs ZMUV normalization on whole batch of patches return ( batch - np.mean( batch, axis=tuple(range(1, batch.ndim)), ).reshape( -1, *(1,) * (batch.ndim - 1), ) ) / np.std( batch, axis=tuple(range(1, batch.ndim)), ).reshape( -1, *(1,) * (batch.ndim - 1), ) def merge_labels(labels): first_labels = labels[0] assert all(label.shape == first_labels.shape for label in labels) assert all(label.dtype == first_labels.dtype for label in labels) merged_labels = np.zeros( shape=first_labels.shape, dtype=first_labels.dtype, ) # Initially, set all voxels as belonging to the background merged_labels[...] = LABEL.BACKGROUND # Mark voxels, for which none of the labels belong to the # background, as vessels merged_labels[ reduce( np.logical_and, map(is_neq(LABEL.BACKGROUND), labels), ) ] = LABEL.VESSEL # Mark voxels, for which all the labels are unassigned or # belong to both vessel and background classes, as unassigned merged_labels[ np.logical_or( reduce( np.logical_and, map(is_eq(LABEL.NONE), labels), ), np.logical_and( reduce( np.logical_or, map(is_eq(LABEL.VESSEL), labels), ), reduce( np.logical_or, map(is_eq(LABEL.BACKGROUND), labels), ), ), ), ] = LABEL.NONE return merged_labels
mit
3,394,424,271,241,360,400
26.146739
80
0.562763
false
tsroten/ticktock
setup.py
1
1186
from setuptools import setup with open('README.rst') as f: long_description = f.read() setup( name='ticktock', version='0.1.2', author='Thomas Roten', author_email='[email protected]', url='https://github.com/tsroten/ticktock', description="adds least-recently-used cache management and automatic data " "timeout to Python's Shelf class.", long_description=long_description, platforms='any', classifiers=[ 'Programming Language :: Python', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Filesystems', 'Topic :: Database :: Front-Ends', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', ], keywords=['shelf', 'dictionary-like', 'dict-like', 'cache', 'lru', 'least-recently-used', 'timeout', 'persistent'], py_modules=['ticktock'], test_suite='test', )
mit
-3,626,140,701,747,176,400
34.939394
79
0.60118
false
aaalgo/fcns
fcn-val.py
1
5637
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import time from tqdm import tqdm import numpy as np import cv2 from skimage import measure # RESNET: import these for slim version of resnet import tensorflow as tf import picpac from stitcher import Stitcher from gallery import Gallery class Model: def __init__ (self, path, name='logits:0', prob=False): """applying tensorflow image model. path -- path to model name -- output tensor name prob -- convert output (softmax) to probability """ graph = tf.Graph() with graph.as_default(): saver = tf.train.import_meta_graph(path + '.meta') if False: for op in graph.get_operations(): for v in op.values(): print(v.name) inputs = graph.get_tensor_by_name("images:0") outputs = graph.get_tensor_by_name(name) if prob: shape = tf.shape(outputs) # (?, ?, ?, 2) # softmax outputs = tf.reshape(outputs, (-1, 2)) outputs = tf.nn.softmax(outputs) outputs = tf.reshape(outputs, shape) # keep prob of 1 only outputs = tf.slice(outputs, [0, 0, 0, 1], [-1, -1, -1, -1]) # remove trailing dimension of 1 outputs = tf.squeeze(outputs, axis=[3]) pass self.prob = prob self.path = path self.graph = graph self.inputs = inputs self.outputs = outputs self.saver = saver self.sess = None pass def __enter__ (self): assert self.sess is None config = tf.ConfigProto() config.gpu_options.allow_growth=True self.sess = tf.Session(config=config, graph=self.graph) #self.sess.run(init) self.saver.restore(self.sess, self.path) return self def __exit__ (self, eType, eValue, eTrace): self.sess.close() self.sess = None def apply (self, images, batch=32): if self.sess is None: raise Exception('Model.apply must be run within context manager') if len(images.shape) == 3: # grayscale images = images.reshape(images.shape + (1,)) pass return self.sess.run(self.outputs, feed_dict={self.inputs: images}) pass flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('db', 'db', '') flags.DEFINE_string('model', 'model', 'Directory to put the training data.') flags.DEFINE_integer('channels', 3, '') flags.DEFINE_integer('patch', None, '') flags.DEFINE_string('out', None, '') flags.DEFINE_integer('max', 100, '') flags.DEFINE_string('name', 'logits:0', '') flags.DEFINE_float('cth', 0.5, '') flags.DEFINE_integer('stride', 0, '') flags.DEFINE_integer('max_size', None, '') def save (path, images, prob): image = images[0, :, :, :] prob = prob[0] contours = measure.find_contours(prob, FLAGS.cth) prob *= 255 cv2.normalize(image, image, 0, 255, cv2.NORM_MINMAX) prob = cv2.cvtColor(prob, cv2.COLOR_GRAY2BGR) H = max(image.shape[0], prob.shape[0]) both = np.zeros((H, image.shape[1]*2 + prob.shape[1], 3)) both[0:image.shape[0],0:image.shape[1],:] = image off = image.shape[1] for contour in contours: tmp = np.copy(contour[:,0]) contour[:, 0] = contour[:, 1] contour[:, 1] = tmp contour = contour.reshape((1, -1, 2)).astype(np.int32) cv2.polylines(image, contour, True, (0, 255,0)) cv2.polylines(prob, contour, True, (0,255,0)) both[0:image.shape[0],off:(off+image.shape[1]), :] = image off += image.shape[1] both[0:prob.shape[0],off:(off+prob.shape[1]), :] = prob cv2.imwrite(path, both) def main (_): assert FLAGS.out assert FLAGS.db and os.path.exists(FLAGS.db) picpac_config = dict(seed=2016, #loop=True, shuffle=True, reshuffle=True, #resize_width=256, #resize_height=256, round_div = FLAGS.stride, batch=1, split=1, split_fold=0, annotate='json', channels=FLAGS.channels, stratify=True, pert_color1=20, pert_angle=20, pert_min_scale=0.8, pert_max_scale=1.2, #pad=False, pert_hflip=True, pert_vflip=True, channel_first=False # this is tensorflow specific # Caffe's dimension order is different. ) stream = picpac.ImageStream(FLAGS.db, perturb=False, loop=False, **picpac_config) gal = Gallery(FLAGS.out) cc = 0 with Model(FLAGS.model, name=FLAGS.name, prob=True) as model: for images, _, _ in stream: #images *= 600.0/1500 #images -= 800 #images *= 3000 /(2000-800) _, H, W, _ = images.shape if FLAGS.max_size: if max(H, W) > FLAGS.max_size: continue if FLAGS.patch: stch = Stitcher(images, FLAGS.patch) probs = stch.stitch(model.apply(stch.split())) else: probs = model.apply(images) cc += 1 save(gal.next(), images, probs) if FLAGS.max and cc >= FLAGS.max: break gal.flush() pass if __name__ == '__main__': tf.app.run()
bsd-2-clause
5,973,205,275,073,431,000
31.211429
85
0.54639
false
onjin/ntv
setup.py
1
1377
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') setup( name='ntv', version='0.4.4', description='n.tv api', long_description=readme + '\n\n' + history, author='Marek Wywiał', author_email='[email protected]', url='https://github.com/onjin/ntv', packages=[ 'ntv', ], package_dir={'ntv': 'ntv'}, include_package_data=True, install_requires=[ 'requests', 'requests_cache', 'simplejson', 'clint', 'baker', ], license="BSD", zip_safe=False, keywords='ntv', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', ], test_suite='tests', scripts=['ntv/ntv-cli'], )
bsd-3-clause
8,386,393,671,116,866,000
23.571429
66
0.570494
false
wbond/ocspbuilder
ocspbuilder/__init__.py
1
39461
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function from datetime import datetime, timedelta import inspect import re import sys import textwrap from asn1crypto import x509, keys, core, ocsp from asn1crypto.util import timezone from oscrypto import asymmetric, util from .version import __version__, __version_info__ if sys.version_info < (3,): int_types = (int, long) # noqa str_cls = unicode # noqa byte_cls = str else: int_types = (int,) str_cls = str byte_cls = bytes __all__ = [ '__version__', '__version_info__', 'OCSPRequestBuilder', 'OCSPResponseBuilder', ] def _writer(func): """ Decorator for a custom writer, but a default reader """ name = func.__name__ return property(fget=lambda self: getattr(self, '_%s' % name), fset=func) class OCSPRequestBuilder(object): _certificate = None _issuer = None _hash_algo = None _key_hash_algo = None _nonce = True _request_extensions = None _tbs_request_extensions = None def __init__(self, certificate, issuer): """ :param certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object to create the request for :param issuer: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object for the issuer of the certificate """ self.certificate = certificate self.issuer = issuer self._key_hash_algo = 'sha1' self._hash_algo = 'sha256' self._request_extensions = {} self._tbs_request_extensions = {} @_writer def certificate(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate to create the request for. """ is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate = value @_writer def issuer(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the issuer. """ is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' issuer must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._issuer = value @_writer def hash_algo(self, value): """ A unicode string of the hash algorithm to use when signing the request - "sha1", "sha256" (default) or "sha512". """ if value not in set(['sha1', 'sha256', 'sha512']): raise ValueError(_pretty_message( ''' hash_algo must be one of "sha1", "sha256", "sha512", not %s ''', repr(value) )) self._hash_algo = value @_writer def key_hash_algo(self, value): """ A unicode string of the hash algorithm to use when creating the certificate identifier - "sha1" (default), or "sha256". """ if value not in set(['sha1', 'sha256']): raise ValueError(_pretty_message( ''' hash_algo must be one of "sha1", "sha256", not %s ''', repr(value) )) self._key_hash_algo = value @_writer def nonce(self, value): """ A bool - if the nonce extension should be used to prevent replay attacks. """ if not isinstance(value, bool): raise TypeError(_pretty_message( ''' nonce must be a boolean, not %s ''', _type_name(value) )) self._nonce = value def set_extension(self, name, value): """ Sets the value for an extension using a fully constructed asn1crypto.core.Asn1Value object. Normally this should not be needed, and the convenience attributes should be sufficient. See the definition of asn1crypto.ocsp.TBSRequestExtension and asn1crypto.ocsp.RequestExtension to determine the appropriate object type for a given extension. Extensions are marked as critical when RFC 6960 indicates so. :param name: A unicode string of an extension id name from asn1crypto.ocsp.TBSRequestExtensionId or asn1crypto.ocsp.RequestExtensionId. If the extension is not one defined in those classes, this must be an instance of one of the classes instead of a unicode string. :param value: A value object per the specs defined by asn1crypto.ocsp.TBSRequestExtension or asn1crypto.ocsp.RequestExtension """ if isinstance(name, str_cls): request_extension_oids = set([ 'service_locator', '1.3.6.1.5.5.7.48.1.7' ]) tbs_request_extension_oids = set([ 'nonce', 'acceptable_responses', 'preferred_signature_algorithms', '1.3.6.1.5.5.7.48.1.2', '1.3.6.1.5.5.7.48.1.4', '1.3.6.1.5.5.7.48.1.8' ]) if name in request_extension_oids: name = ocsp.RequestExtensionId(name) elif name in tbs_request_extension_oids: name = ocsp.TBSRequestExtensionId(name) else: raise ValueError(_pretty_message( ''' name must be a unicode string from asn1crypto.ocsp.TBSRequestExtensionId or asn1crypto.ocsp.RequestExtensionId, not %s ''', repr(name) )) if isinstance(name, ocsp.RequestExtensionId): extension = ocsp.RequestExtension({'extn_id': name}) elif isinstance(name, ocsp.TBSRequestExtensionId): extension = ocsp.TBSRequestExtension({'extn_id': name}) else: raise TypeError(_pretty_message( ''' name must be a unicode string or an instance of asn1crypto.ocsp.TBSRequestExtensionId or asn1crypto.ocsp.RequestExtensionId, not %s ''', _type_name(name) )) # We use native here to convert OIDs to meaningful names name = extension['extn_id'].native spec = extension.spec('extn_value') if not isinstance(value, spec) and value is not None: raise TypeError(_pretty_message( ''' value must be an instance of %s, not %s ''', _type_name(spec), _type_name(value) )) if isinstance(extension, ocsp.TBSRequestExtension): extn_dict = self._tbs_request_extensions else: extn_dict = self._request_extensions if value is None: if name in extn_dict: del extn_dict[name] else: extn_dict[name] = value def build(self, requestor_private_key=None, requestor_certificate=None, other_certificates=None): """ Validates the request information, constructs the ASN.1 structure and then optionally signs it. The requestor_private_key, requestor_certificate and other_certificates params are all optional and only necessary if the request needs to be signed. Signing a request is uncommon for OCSP requests related to web TLS connections. :param requestor_private_key: An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey object for the private key to sign the request with :param requestor_certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate associated with the private key :param other_certificates: A list of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate objects that may be useful for the OCSP server to verify the request signature. Intermediate certificates would be specified here. :return: An asn1crypto.ocsp.OCSPRequest object of the request """ def _make_extension(name, value): return { 'extn_id': name, 'critical': False, 'extn_value': value } tbs_request_extensions = [] request_extensions = [] has_nonce = False for name, value in self._tbs_request_extensions.items(): if name == 'nonce': has_nonce = True tbs_request_extensions.append(_make_extension(name, value)) if self._nonce and not has_nonce: tbs_request_extensions.append( _make_extension('nonce', util.rand_bytes(16)) ) if not tbs_request_extensions: tbs_request_extensions = None for name, value in self._request_extensions.items(): request_extensions.append(_make_extension(name, value)) if not request_extensions: request_extensions = None tbs_request = ocsp.TBSRequest({ 'request_list': [ { 'req_cert': { 'hash_algorithm': { 'algorithm': self._key_hash_algo }, 'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo), 'issuer_key_hash': getattr(self._issuer.public_key, self._key_hash_algo), 'serial_number': self._certificate.serial_number, }, 'single_request_extensions': request_extensions } ], 'request_extensions': tbs_request_extensions }) signature = None if requestor_private_key or requestor_certificate or other_certificates: is_oscrypto = isinstance(requestor_private_key, asymmetric.PrivateKey) if not isinstance(requestor_private_key, keys.PrivateKeyInfo) and not is_oscrypto: raise TypeError(_pretty_message( ''' requestor_private_key must be an instance of asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey, not %s ''', _type_name(requestor_private_key) )) cert_is_oscrypto = isinstance(requestor_certificate, asymmetric.Certificate) if not isinstance(requestor_certificate, x509.Certificate) and not cert_is_oscrypto: raise TypeError(_pretty_message( ''' requestor_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(requestor_certificate) )) if other_certificates is not None and not isinstance(other_certificates, list): raise TypeError(_pretty_message( ''' other_certificates must be a list of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate objects, not %s ''', _type_name(other_certificates) )) if cert_is_oscrypto: requestor_certificate = requestor_certificate.asn1 tbs_request['requestor_name'] = x509.GeneralName( name='directory_name', value=requestor_certificate.subject ) certificates = [requestor_certificate] for other_certificate in other_certificates: other_cert_is_oscrypto = isinstance(other_certificate, asymmetric.Certificate) if not isinstance(other_certificate, x509.Certificate) and not other_cert_is_oscrypto: raise TypeError(_pretty_message( ''' other_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(other_certificate) )) if other_cert_is_oscrypto: other_certificate = other_certificate.asn1 certificates.append(other_certificate) signature_algo = requestor_private_key.algorithm if signature_algo == 'ec': signature_algo = 'ecdsa' signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo) if requestor_private_key.algorithm == 'rsa': sign_func = asymmetric.rsa_pkcs1v15_sign elif requestor_private_key.algorithm == 'dsa': sign_func = asymmetric.dsa_sign elif requestor_private_key.algorithm == 'ec': sign_func = asymmetric.ecdsa_sign if not is_oscrypto: requestor_private_key = asymmetric.load_private_key(requestor_private_key) signature_bytes = sign_func(requestor_private_key, tbs_request.dump(), self._hash_algo) signature = ocsp.Signature({ 'signature_algorithm': {'algorithm': signature_algorithm_id}, 'signature': signature_bytes, 'certs': certificates }) return ocsp.OCSPRequest({ 'tbs_request': tbs_request, 'optional_signature': signature }) class OCSPResponseBuilder(object): _response_status = None _certificate = None _certificate_status = None _revocation_date = None _certificate_issuer = None _hash_algo = None _key_hash_algo = None _nonce = None _this_update = None _next_update = None _response_data_extensions = None _single_response_extensions = None def __init__(self, response_status, certificate=None, certificate_status=None, revocation_date=None): """ Unless changed, responses will use SHA-256 for the signature, and will be valid from the moment created for one week. :param response_status: A unicode string of OCSP response type: - "successful" - when the response includes information about the certificate - "malformed_request" - when the request could not be understood - "internal_error" - when an internal error occured with the OCSP responder - "try_later" - when the OCSP responder is temporarily unavailable - "sign_required" - when the OCSP request must be signed - "unauthorized" - when the responder is not the correct responder for the certificate :param certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate the response is about. Only required if the response_status is "successful". :param certificate_status: A unicode string of the status of the certificate. Only required if the response_status is "successful". - "good" - when the certificate is in good standing - "revoked" - when the certificate is revoked without a reason code - "key_compromise" - when a private key is compromised - "ca_compromise" - when the CA issuing the certificate is compromised - "affiliation_changed" - when the certificate subject name changed - "superseded" - when the certificate was replaced with a new one - "cessation_of_operation" - when the certificate is no longer needed - "certificate_hold" - when the certificate is temporarily invalid - "remove_from_crl" - only delta CRLs - when temporary hold is removed - "privilege_withdrawn" - one of the usages for a certificate was removed - "unknown" - the responder doesn't know about the certificate being requested :param revocation_date: A datetime.datetime object of when the certificate was revoked, if the response_status is "successful" and the certificate status is not "good" or "unknown". """ self.response_status = response_status self.certificate = certificate self.certificate_status = certificate_status self.revocation_date = revocation_date self._key_hash_algo = 'sha1' self._hash_algo = 'sha256' self._response_data_extensions = {} self._single_response_extensions = {} @_writer def response_status(self, value): """ The overall status of the response. Only a "successful" response will include information about the certificate. Other response types are for signaling info about the OCSP responder. Valid values include: - "successful" - when the response includes information about the certificate - "malformed_request" - when the request could not be understood - "internal_error" - when an internal error occured with the OCSP responder - "try_later" - when the OCSP responder is temporarily unavailable - "sign_required" - when the OCSP request must be signed - "unauthorized" - when the responder is not the correct responder for the certificate """ if not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' response_status must be a unicode string, not %s ''', _type_name(value) )) valid_response_statuses = set([ 'successful', 'malformed_request', 'internal_error', 'try_later', 'sign_required', 'unauthorized' ]) if value not in valid_response_statuses: raise ValueError(_pretty_message( ''' response_status must be one of "successful", "malformed_request", "internal_error", "try_later", "sign_required", "unauthorized", not %s ''', repr(value) )) self._response_status = value @_writer def certificate(self, value): """ An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate the response is about. """ if value is not None: is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate = value @_writer def certificate_status(self, value): """ A unicode string of the status of the certificate. Valid values include: - "good" - when the certificate is in good standing - "revoked" - when the certificate is revoked without a reason code - "key_compromise" - when a private key is compromised - "ca_compromise" - when the CA issuing the certificate is compromised - "affiliation_changed" - when the certificate subject name changed - "superseded" - when the certificate was replaced with a new one - "cessation_of_operation" - when the certificate is no longer needed - "certificate_hold" - when the certificate is temporarily invalid - "remove_from_crl" - only delta CRLs - when temporary hold is removed - "privilege_withdrawn" - one of the usages for a certificate was removed - "unknown" - when the responder doesn't know about the certificate being requested """ if value is not None: if not isinstance(value, str_cls): raise TypeError(_pretty_message( ''' certificate_status must be a unicode string, not %s ''', _type_name(value) )) valid_certificate_statuses = set([ 'good', 'revoked', 'key_compromise', 'ca_compromise', 'affiliation_changed', 'superseded', 'cessation_of_operation', 'certificate_hold', 'remove_from_crl', 'privilege_withdrawn', 'unknown', ]) if value not in valid_certificate_statuses: raise ValueError(_pretty_message( ''' certificate_status must be one of "good", "revoked", "key_compromise", "ca_compromise", "affiliation_changed", "superseded", "cessation_of_operation", "certificate_hold", "remove_from_crl", "privilege_withdrawn", "unknown" not %s ''', repr(value) )) self._certificate_status = value @_writer def revocation_date(self, value): """ A datetime.datetime object of when the certificate was revoked, if the status is not "good" or "unknown". """ if value is not None and not isinstance(value, datetime): raise TypeError(_pretty_message( ''' revocation_date must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._revocation_date = value @_writer def certificate_issuer(self, value): """ An asn1crypto.x509.Certificate object of the issuer of the certificate. This should only be set if the OCSP responder is not the issuer of the certificate, but instead a special certificate only for OCSP responses. """ if value is not None: is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate_issuer must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate_issuer = value @_writer def hash_algo(self, value): """ A unicode string of the hash algorithm to use when signing the request - "sha1", "sha256" (default) or "sha512". """ if value not in set(['sha1', 'sha256', 'sha512']): raise ValueError(_pretty_message( ''' hash_algo must be one of "sha1", "sha256", "sha512", not %s ''', repr(value) )) self._hash_algo = value @_writer def key_hash_algo(self, value): """ A unicode string of the hash algorithm to use when creating the certificate identifier - "sha1" (default), or "sha256". """ if value not in set(['sha1', 'sha256']): raise ValueError(_pretty_message( ''' hash_algo must be one of "sha1", "sha256", not %s ''', repr(value) )) self._key_hash_algo = value @_writer def nonce(self, value): """ The nonce that was provided during the request. """ if not isinstance(value, byte_cls): raise TypeError(_pretty_message( ''' nonce must be a byte string, not %s ''', _type_name(value) )) self._nonce = value @_writer def this_update(self, value): """ A datetime.datetime object of when the response was generated. """ if not isinstance(value, datetime): raise TypeError(_pretty_message( ''' this_update must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._this_update = value @_writer def next_update(self, value): """ A datetime.datetime object of when the response may next change. This should only be set if responses are cached. If responses are generated fresh on every request, this should not be set. """ if not isinstance(value, datetime): raise TypeError(_pretty_message( ''' next_update must be an instance of datetime.datetime, not %s ''', _type_name(value) )) self._next_update = value def set_extension(self, name, value): """ Sets the value for an extension using a fully constructed asn1crypto.core.Asn1Value object. Normally this should not be needed, and the convenience attributes should be sufficient. See the definition of asn1crypto.ocsp.SingleResponseExtension and asn1crypto.ocsp.ResponseDataExtension to determine the appropriate object type for a given extension. Extensions are marked as critical when RFC 6960 indicates so. :param name: A unicode string of an extension id name from asn1crypto.ocsp.SingleResponseExtensionId or asn1crypto.ocsp.ResponseDataExtensionId. If the extension is not one defined in those classes, this must be an instance of one of the classes instead of a unicode string. :param value: A value object per the specs defined by asn1crypto.ocsp.SingleResponseExtension or asn1crypto.ocsp.ResponseDataExtension """ if isinstance(name, str_cls): response_data_extension_oids = set([ 'nonce', 'extended_revoke', '1.3.6.1.5.5.7.48.1.2', '1.3.6.1.5.5.7.48.1.9' ]) single_response_extension_oids = set([ 'crl', 'archive_cutoff', 'crl_reason', 'invalidity_date', 'certificate_issuer', '1.3.6.1.5.5.7.48.1.3', '1.3.6.1.5.5.7.48.1.6', '2.5.29.21', '2.5.29.24', '2.5.29.29' ]) if name in response_data_extension_oids: name = ocsp.ResponseDataExtensionId(name) elif name in single_response_extension_oids: name = ocsp.SingleResponseExtensionId(name) else: raise ValueError(_pretty_message( ''' name must be a unicode string from asn1crypto.ocsp.ResponseDataExtensionId or asn1crypto.ocsp.SingleResponseExtensionId, not %s ''', repr(name) )) if isinstance(name, ocsp.ResponseDataExtensionId): extension = ocsp.ResponseDataExtension({'extn_id': name}) elif isinstance(name, ocsp.SingleResponseExtensionId): extension = ocsp.SingleResponseExtension({'extn_id': name}) else: raise TypeError(_pretty_message( ''' name must be a unicode string or an instance of asn1crypto.ocsp.SingleResponseExtensionId or asn1crypto.ocsp.ResponseDataExtensionId, not %s ''', _type_name(name) )) # We use native here to convert OIDs to meaningful names name = extension['extn_id'].native spec = extension.spec('extn_value') if name == 'nonce': raise ValueError(_pretty_message( ''' The nonce value should be set via the .nonce attribute, not the .set_extension() method ''' )) if name == 'crl_reason': raise ValueError(_pretty_message( ''' The crl_reason value should be set via the certificate_status parameter of the OCSPResponseBuilder() constructor, not the .set_extension() method ''' )) if name == 'certificate_issuer': raise ValueError(_pretty_message( ''' The certificate_issuer value should be set via the .certificate_issuer attribute, not the .set_extension() method ''' )) if not isinstance(value, spec) and value is not None: raise TypeError(_pretty_message( ''' value must be an instance of %s, not %s ''', _type_name(spec), _type_name(value) )) if isinstance(extension, ocsp.ResponseDataExtension): extn_dict = self._response_data_extensions else: extn_dict = self._single_response_extensions if value is None: if name in extn_dict: del extn_dict[name] else: extn_dict[name] = value def build(self, responder_private_key=None, responder_certificate=None): """ Validates the request information, constructs the ASN.1 structure and signs it. The responder_private_key and responder_certificate parameters are only required if the response_status is "successful". :param responder_private_key: An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey object for the private key to sign the response with :param responder_certificate: An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate object of the certificate associated with the private key :return: An asn1crypto.ocsp.OCSPResponse object of the response """ if self._response_status != 'successful': return ocsp.OCSPResponse({ 'response_status': self._response_status }) is_oscrypto = isinstance(responder_private_key, asymmetric.PrivateKey) if not isinstance(responder_private_key, keys.PrivateKeyInfo) and not is_oscrypto: raise TypeError(_pretty_message( ''' responder_private_key must be an instance of asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey, not %s ''', _type_name(responder_private_key) )) cert_is_oscrypto = isinstance(responder_certificate, asymmetric.Certificate) if not isinstance(responder_certificate, x509.Certificate) and not cert_is_oscrypto: raise TypeError(_pretty_message( ''' responder_certificate must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(responder_certificate) )) if cert_is_oscrypto: responder_certificate = responder_certificate.asn1 if self._certificate is None: raise ValueError(_pretty_message( ''' certificate must be set if the response_status is "successful" ''' )) if self._certificate_status is None: raise ValueError(_pretty_message( ''' certificate_status must be set if the response_status is "successful" ''' )) def _make_extension(name, value): return { 'extn_id': name, 'critical': False, 'extn_value': value } response_data_extensions = [] single_response_extensions = [] for name, value in self._response_data_extensions.items(): response_data_extensions.append(_make_extension(name, value)) if self._nonce: response_data_extensions.append( _make_extension('nonce', self._nonce) ) if not response_data_extensions: response_data_extensions = None for name, value in self._single_response_extensions.items(): single_response_extensions.append(_make_extension(name, value)) if self._certificate_issuer: single_response_extensions.append( _make_extension( 'certificate_issuer', [ x509.GeneralName( name='directory_name', value=self._certificate_issuer.subject ) ] ) ) if not single_response_extensions: single_response_extensions = None responder_key_hash = getattr(responder_certificate.public_key, self._key_hash_algo) if self._certificate_status == 'good': cert_status = ocsp.CertStatus( name='good', value=core.Null() ) elif self._certificate_status == 'unknown': cert_status = ocsp.CertStatus( name='unknown', value=core.Null() ) else: status = self._certificate_status reason = status if status != 'revoked' else 'unspecified' cert_status = ocsp.CertStatus( name='revoked', value={ 'revocation_time': self._revocation_date, 'revocation_reason': reason, } ) issuer = self._certificate_issuer if self._certificate_issuer else responder_certificate if issuer.subject != self._certificate.issuer: raise ValueError(_pretty_message( ''' responder_certificate does not appear to be the issuer for the certificate. Perhaps set the .certificate_issuer attribute? ''' )) produced_at = datetime.now(timezone.utc) if self._this_update is None: self._this_update = produced_at if self._next_update is None: self._next_update = self._this_update + timedelta(days=7) response_data = ocsp.ResponseData({ 'responder_id': ocsp.ResponderId(name='by_key', value=responder_key_hash), 'produced_at': produced_at, 'responses': [ { 'cert_id': { 'hash_algorithm': { 'algorithm': self._key_hash_algo }, 'issuer_name_hash': getattr(self._certificate.issuer, self._key_hash_algo), 'issuer_key_hash': getattr(issuer.public_key, self._key_hash_algo), 'serial_number': self._certificate.serial_number, }, 'cert_status': cert_status, 'this_update': self._this_update, 'next_update': self._next_update, 'single_extensions': single_response_extensions } ], 'response_extensions': response_data_extensions }) signature_algo = responder_private_key.algorithm if signature_algo == 'ec': signature_algo = 'ecdsa' signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo) if responder_private_key.algorithm == 'rsa': sign_func = asymmetric.rsa_pkcs1v15_sign elif responder_private_key.algorithm == 'dsa': sign_func = asymmetric.dsa_sign elif responder_private_key.algorithm == 'ec': sign_func = asymmetric.ecdsa_sign if not is_oscrypto: responder_private_key = asymmetric.load_private_key(responder_private_key) signature_bytes = sign_func(responder_private_key, response_data.dump(), self._hash_algo) certs = None if self._certificate_issuer: certs = [responder_certificate] return ocsp.OCSPResponse({ 'response_status': self._response_status, 'response_bytes': { 'response_type': 'basic_ocsp_response', 'response': { 'tbs_response_data': response_data, 'signature_algorithm': {'algorithm': signature_algorithm_id}, 'signature': signature_bytes, 'certs': certs } } }) def _pretty_message(string, *params): """ Takes a multi-line string and does the following: - dedents - converts newlines with text before and after into a single line - strips leading and trailing whitespace :param string: The string to format :param *params: Params to interpolate into the string :return: The formatted string """ output = textwrap.dedent(string) # Unwrap lines, taking into account bulleted lists, ordered lists and # underlines consisting of = signs if output.find('\n') != -1: output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output) if params: output = output % params output = output.strip() return output def _type_name(value): """ :param value: A value to get the object name of :return: A unicode string of the object name """ if inspect.isclass(value): cls = value else: cls = value.__class__ if cls.__module__ in set(['builtins', '__builtin__']): return cls.__name__ return '%s.%s' % (cls.__module__, cls.__name__)
mit
-6,160,873,467,568,284,000
34.518452
105
0.54839
false
mitschabaude/nanopores
nanopores/models/randomwalk.py
1
37095
# (c) 2017 Gregor Mitscha-Baude "random walk of many particles in cylindrical pore" # TODO: bug: walldist = 2 works like what would be expected for = 1 # have hardcoded *2 in .contains_point # => properly investigate matplotlib.path.contains_point # and make code transparent import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as mpatches from matplotlib import animation from matplotlib import collections from scipy.stats import poisson, gamma import dolfin import nanopores from nanopores import get_pore from nanopores.tools.polygons import Polygon, Ball, isempty from nanopores.models import nanopore from nanopores.tools.poreplots import streamlines from nanopores.tools import fields dolfin.parameters["allow_extrapolation"] = True #False #TODO: params = nanopores.user_params( # general params geoname = "wei", dim = 2, rMolecule = 6., h = 5., Nmax = 4e4, Qmol = -1., bV = -0.2, posDTarget = True, x0 = None, # random walk params N = 1000, # number of (simultaneous) random walks dt = 10., # time step [ns] walldist = 1., # in multiples of radius, should be >= 1 margtop = 40., margbot = 20., initial = "disc", # oder "sphere" or "bottom-sphere" or "bottom-disc" ) # domains are places where molecule can bind # and/or be reflected after collision domain_params = dict( cyl = False, # determines whether rz or xyz coordinates are passed to .inside walldist = 1., # multiple of radius that determines what counts as collision exclusion = True, minsize = 0.01, # accuracy when performing reflection binding = False, bind_type = "collision", # or "zone" # "collision" parameters: eps = 1., # margin in addition to walldist, determines re-attempting p = 0.1, # binding probability for one attempt # "zone" parameters: ka = 1e5, # (bulk) association rate constant [1/Ms] ra = 1, # radius of the association zone (w/o rMolecule) [nm] # in collect_stats_mode, which applies only to "zone" binding, we separate # the random walk and sampling phases; during rw, only stats about attempt # time and (if use_force = True) position are collected collect_stats_mode = False, t = 1e6, # mean of exponentially distributed binding duration [ns] dx = 0.4, # width of bond energy barrier [nm] use_force = True, # if True, t_mean = t*exp(-|F|*dx/kT) ) class Domain(object): """based on existing domain object which need only support the methods .inside and .inside_single, which get either xyz or rz coordinates depending on the cyl attribute set in init.""" def __init__(self, domain, **params): self.domain = domain self.__dict__.update(domain_params) self.__dict__.update(params) if isinstance(domain, Polygon): self.cyl = True def initialize_binding_zone(self, rw): if not self.binding or not self.bind_type == "zone": return # calculate binding rate in binding zone self.rbind = rw.params.rMolecule + self.ra Vbind = 4./3.*np.pi*(self.rbind**3 - rw.params.rMolecule**3) # [nm**3] Vbind *= (1e-8)**3 * nanopores.mol # [dm**3/mol = 1/M] kbind = 1e-9 * self.ka / Vbind # [1/ns] # mean no. bindings during this step self.nbind = kbind * rw.dt self.Vbind = Vbind self.kbind = kbind def collide(self, rw): "compute collisions and consequences with RandomWalk instance" radius = rw.params.rMolecule * self.walldist # determine collisions and then operate only on collided particles X = rw.rz if self.cyl else rw.x[rw.alive] if self.exclusion or (self.binding and self.bind_type == "collision"): collided = self.domain.inside(X, radius=radius) # "reflect" particles by shortening last step if self.exclusion: X0, X1 = rw.xold[rw.alive], rw.x[rw.alive] for i in np.nonzero(collided)[0]: x = self.binary_search_inside(X0[i], X1[i], radius) rw.update_one(i, x) # attempt binding for particles that can bind if self.binding: if self.bind_type == "collision": can_bind = rw.can_bind[rw.alive] attempt = collided & can_bind # bind with probability p bind = np.random.rand(np.sum(attempt)) <= self.p # draw exponentially distributed binding time duration = self.draw_binding_durations(attempt, bind, rw) # update can_bind and bind_times of random walk iattempt = rw.i[rw.alive][attempt] ibind = iattempt[bind] rw.can_bind[iattempt] = False rw.bind_times[ibind] += duration # some statistics rw.attempts[rw.i[rw.alive][attempt]] += 1 rw.bindings[rw.i[rw.alive][attempt][bind]] += 1 # unbind particles that can not bind and are out of nobind zone X_can_not_bind = X[~can_bind] rnobind = radius + self.eps unbind = ~self.domain.inside(X_can_not_bind, radius=rnobind) iunbind = rw.i[rw.alive][~can_bind][unbind] rw.can_bind[iunbind] = True elif self.bind_type == "zone": rzone = self.rbind - self.domain.r if isinstance( self.domain, Ball) else self.rbind # determine particles in binding zone attempt = self.domain.inside(X, radius=rzone) iattempt = rw.i[rw.alive][attempt] rw.attempt_times[iattempt] += rw.dt if not self.collect_stats_mode: # draw poisson distributed number of bindings bindings = np.random.poisson(self.nbind, size=np.sum(attempt)) # draw gamma distributed binding durations and add to time duration = self.draw_zone_binding_durations(bindings, rw) #duration = np.random.gamma(bindings, scale=self.t) rw.bind_times[iattempt] += duration # statistics rw.bindings[iattempt] += bindings elif self.use_force: self.collect_forces(attempt, iattempt, rw) # update can_bind for video # actually can_bind should be called can_not_bind here rw.can_bind[iattempt] = False can_bind = rw.can_bind[rw.alive] X_can_not_bind = X[~can_bind] unbind = ~self.domain.inside(X_can_not_bind, radius=rzone) iunbind = rw.i[rw.alive][~can_bind][unbind] rw.can_bind[iunbind] = True def binary_search_inside(self, x0, x1, radius): if self.domain.inside_single(x0, radius=radius): #print self.rw.domains #print self print self.domain print "ERROR: x0 is in domain despite having been excluded before." print "x0", x0, "x1", x1 raise Exception if np.sum((x0 - x1)**2) < self.minsize**2: return x0 x05 = .5*(x0 + x1) if self.domain.inside_single(x05, radius=radius): x1 = x05 else: x0 = x05 return self.binary_search_inside(x0, x1, radius) def draw_binding_durations(self, attempt, bind, rw): if self.use_force and np.sum(bind) > 0: # evaluate force magnitude at binding particles ibind = np.nonzero(attempt)[0][bind] F = np.array([rw.F(x) for x in rw.rz[ibind]]) F = np.sqrt(np.sum(F**2, 1)) # create array of mean times kT = rw.phys.kT dx = 1e-9*self.dx t = self.t * np.exp(-F*dx/kT) else: t = self.t return np.random.exponential(t, np.sum(bind)) def draw_zone_binding_durations(self, bindings, rw): if self.use_force and np.sum(bindings) > 0: # evaluate force magnitude at binding particles ibind = np.nonzero(bindings)[0] F = np.array([rw.F(x) for x in rw.rz[ibind]]) F = np.sqrt(np.sum(F**2, 1)) # create array of mean times kT = rw.phys.kT dx = 1e-9*self.dx t = np.zeros(bindings.shape) t[ibind] = self.t * np.exp(-F*dx/kT) else: t = self.t return np.random.gamma(bindings, scale=t) def collect_forces(self, attempt, iattempt, rw): if not hasattr(rw, "binding_zone_forces"): rw.binding_zone_forces = [[] for i in range(rw.N)] if np.any(attempt): F = np.array([rw.F(x) for x in rw.rz[attempt]]) F = np.sqrt(np.sum(F**2, 1)) for i, f in enumerate(F): rw.binding_zone_forces[iattempt[i]].append(f) # external forces def load_externals(**params): return nanopore.force_diff(**params) class RandomWalk(object): def __init__(self, pore, N=10, dt=1., walldist=2., margtop=20., margbot=10., xstart=None, zstart=None, rstart=None, record_positions=False, initial="sphere", **params): # dt is timestep in nanoseconds self.pore = pore self.params = pore.params self.params.update(params, margtop=margtop, margbot=margbot, walldist=walldist, dt=dt, rstart=rstart, xstart=xstart, zstart=zstart, initial=initial) self.sim_params = params # initialize some parameters and create random walkers at entrance self.rtop = pore.protein.radiustop() - self.params.rMolecule self.ztop = pore.protein.zmax()[1] self.rbot = pore.protein.radiusbottom() - self.params.rMolecule self.zbot = pore.protein.zmin()[1] self.zmid = .5*(self.ztop + self.zbot) if isempty(pore.membrane) else pore.params["zmem"] self.N = N x, r, z = self.initial() self.x = x self.xold = x self.rz = np.column_stack([r, z]) self.dt = dt self.t = 0. self.i = np.arange(N) # load force and diffusivity fields F, D, divD = load_externals(**params) self.F = F #self.ood_evaluation(F) self.D = D #self.ood_evaluation(D) self.divD = divD #self.ood_evaluation(divD) self.phys = nanopores.Physics("pore_mol", **params) self.alive = np.full((N,), True, dtype=bool) self.success = np.full((N,), False, dtype=bool) self.fail = np.full((N,), False, dtype=bool) self.can_bind = np.full((N,), True, dtype=bool) self.times = np.zeros(N) self.bind_times = np.zeros(N) self.attempts = np.zeros(N, dtype=int) self.attempt_times = np.zeros(N) self.bindings = np.zeros(N, dtype=int) self.domains = [] self.add_domain(pore.protein, binding=False, exclusion=True, walldist=walldist) if not isempty(pore.membrane): self.add_domain(pore.membrane, binding=False, exclusion=True, walldist=walldist) self.record_positions = record_positions if self.record_positions: self.timetraces = [[] for i in range(N)] self.positions = [[] for i in range(N)] self.update_positions_record() # initial positions: uniformly distributed over disc def initial(self): if self.params.initial == "sphere": return self.initial_half_sphere() if self.params.initial == "bottom-sphere": return self.initial_half_sphere(True) if self.params.initial == "bottom-disc": return self.initial_disc(True) else: return self.initial_disc() def initial_disc(self, bottom=False): rstart = self.params.rstart xstart = self.params.xstart zstart = self.params.zstart if rstart is None: if bottom: rstart = self.rbot - self.params.rMolecule*self.params.walldist else: rstart = self.rtop - self.params.rMolecule*self.params.walldist if xstart is None: xstart = 0. if zstart is None: if bottom: zstart = self.zbot else: zstart = self.ztop self.rstart = rstart # create uniform polar coordinates r, theta r = rstart * np.sqrt(np.random.rand(self.N)) theta = 2.*np.pi * np.random.rand(self.N) x = np.zeros((self.N, 3)) x[:, 0] = xstart + r*np.cos(theta) x[:, 1] = r*np.sin(theta) x[:, 2] = zstart return x, np.sqrt(x[:, 0]**2 + x[:, 1]**2), x[:, 2] def initial_half_sphere(self, bottom=False): rstart = self.params.rstart xstart = self.params.xstart zstart = self.params.zstart if rstart is None: if bottom: rstart = 2.*self.rbot else: rstart = 2.*self.rtop if xstart is None: xstart = 0. if zstart is None: if bottom: zstart = self.zbot - self.params.rMolecule*self.params.walldist else: zstart = self.ztop + self.params.rMolecule*self.params.walldist self.rstart = rstart # draw 3D gaussian points, project to half-sphere and # only accept if above channel x = np.random.randn(self.N, 3) sign = -1 if bottom else 1 x[:, 2] = sign*np.abs(x[:, 2]) R = np.sqrt(np.sum(x**2, 1)) m = np.array([[xstart, 0., zstart]]) x = m + rstart*x/R[:, None] return x, np.sqrt(x[:, 0]**2 + x[:, 1]**2), x[:, 2] def add_domain(self, domain, **params): """add domain where particles can bind and/or are excluded from. domain only has to implement the .inside(x, radius) method. params can be domain_params""" dom = Domain(domain, **params) self.domains.append(dom) dom.initialize_binding_zone(self) def add_wall_binding(self, **params): dom = self.domains[0] dom.__dict__.update(params, binding=True) dom.initialize_binding_zone(self) def set_stopping_criteria(self, success=None, fail=None): if success is not None: self.is_success = success.__get__(self) if fail is not None: self.is_fail = fail.__get__(self) was_ood = False def ood_evaluation(self, f): dim = self.params.dim def newf(x): try: return f(x) except RuntimeError: if not self.was_ood: print "\nFirst particle out of domain:", x self.was_ood = True return np.zeros(dim) return newf def evaluate(self, function): return np.array([function(rz) for rz in self.rz]) def evaluate_vector_cyl(self, function): r = self.rz[:, 0] + 1e-30 R = self.x[self.alive] / r[:, None] F = self.evaluate(function) return np.column_stack([F[:, 0]*R[:, 0], F[:, 0]*R[:, 1], F[:, 1]]) def evaluate_D_cyl_matrix(self): # approximation based on Dn \sim Dt D = self.evaluate(self.D) Dn = D[:, 0] Dt = D[:, 1] r = self.rz[:, 0] + 1e-30 xbar = self.x[:, 0]/r ybar = self.x[:, 1]/r Dmatrix = np.zeros((self.N, 3, 3)) Dmatrix[:, 0, 0] = Dn*xbar**2 + Dt*(1.-xbar**2) Dmatrix[:, 1, 1] = Dn*ybar**2 + Dt*(1.-ybar**2) Dmatrix[:, 2, 2] = Dt return Dmatrix def evaluate_D_cyl(self): # approximation based on Dn \sim Dt D = self.evaluate(self.D) Dn = D[:, 0] Dt = D[:, 1] r = self.rz[:, 0] xbar = self.x[self.alive, 0]/r ybar = self.x[self.alive, 1]/r Dx = Dn*xbar**2 + Dt*(1.-xbar**2) Dy = Dn*ybar**2 + Dt*(1.-ybar**2) return np.column_stack([Dx, Dy, Dt]) def evaluate_D_simple(self): # just take D = Dzz D = self.evaluate(self.D) return D[:, 1, None] def brownian(self, D): n = np.count_nonzero(self.alive) zeta = np.random.randn(n, 3) return np.sqrt(2.*self.dt*1e9*D) * zeta def update(self, dx): self.xold = self.x.copy() #self.rzold = self.rz.copy() self.x[self.alive] = self.x[self.alive] + dx self.update_alive() r = np.sqrt(np.sum(self.x[self.alive, :2]**2, 1)) self.rz = np.column_stack([r, self.x[self.alive, 2]]) # def is_success(self, r, z): # return (z < self.zbot - self.params.margbot) | ( # (r > self.rbot + self.params.margbot) & (z < self.zbot)) # # def is_fail(self, r, z): # return (z > self.ztop + self.params.margtop) | ( # (r > self.rtop + self.params.margtop) & (z > self.ztop)) def is_success(self, r, z): return (r**2 + (z - self.zbot)**2 > self.params.margbot**2) & ( self.below_channel(r, z)) def is_fail(self, r, z): return (r**2 + (z - self.ztop)**2 > self.params.margtop**2) & ( self.above_channel(r, z)) def in_channel(self, r, z): if not hasattr(self, "_channel"): self._channel = self.pore.get_subdomain("pore") return self._channel.inside_winding(r, z) def above_channel(self, r, z): if not hasattr(self, "_above_channel"): self._above_channel = self.pore.get_subdomain( {"bulkfluid_top", "poreregion_top"}) return self._above_channel.inside_winding(r, z) def below_channel(self, r, z): if not hasattr(self, "_below_channel"): self._below_channel = self.pore.get_subdomain( {"bulkfluid_bottom", "poreregion_bottom"}) return self._below_channel.inside_winding(r, z) def update_alive(self): alive = self.alive z = self.x[alive, 2] r = np.sqrt(np.sum(self.x[alive, :2]**2, 1)) self.success[alive] = self.is_success(r, z) self.fail[alive] = self.is_fail(r, z) died = self.fail[alive] | self.success[alive] self.alive[alive] = ~died self.times[alive] = self.t def update_one(self, i, xnew): self.x[np.nonzero(self.alive)[0][i]] = xnew self.rz[i, 0] = np.sqrt(xnew[0]**2 + xnew[1]**2) self.rz[i, 1] = xnew[2] def update_positions_record(self): for i in range(self.N): if self.alive[i]: t = self.times[i] + self.bind_times[i] x = np.copy(self.x[i]) self.timetraces[i].append(t) self.positions[i].append(x) def step(self): "one step of random walk" # evaluate F and D D = self.evaluate_D_cyl() F = self.evaluate_vector_cyl(self.F) divD = 1e9*self.evaluate_vector_cyl(self.divD) kT = self.phys.kT dt = self.dt self.t += self.dt # get step dW = self.brownian(D) dx = dW + dt*divD + dt*D/kT*F #print "%.2f (dx) = %.2f (dW) + %.2f (divD) + %.2f (F)" % ( # abs(dx[0, 2]), abs(dW[0, 2]), abs(dt*divD[0, 2]), abs((dt*D/kT*F)[0, 2])) #print ("t = %.2f microsec" % (self.t*1e-3)) # update position and time and determine which particles are alive self.update(dx) # correct particles that collided with pore wall #self.simple_reflect() for domain in self.domains: domain.collide(self) if self.record_positions: self.update_positions_record() def walk(self): with nanopores.Log("Running..."): yield self.t while np.any(self.alive): #with nanopores.Log("%.0f ns, cpu time:" % self.t): self.step() yield self.t self.finalize() def draw_bindings(self, idomain=None, **bind_params): "(re-)draw bindings after running or loading rw" # get either specified domain or unique binding domain if idomain is None: domains = [dom for dom in self.domains if dom.binding] assert len(domains) == 1 domain = domains[0] else: domain = self.domains[idomain] domain.__dict__.update(bind_params) # draw number of bindings if domain.bind_type == "zone": domain.initialize_binding_zone(self) ta = self.attempt_times ka = domain.kbind self.bindings = np.random.poisson(ka*ta) else: raise NotImplementedError("currently only for zone binding") # draw binding durations t = self.t if domain.use_force: raise NotImplementedError("currently no force dependency") self.bind_times = 1e-9*np.random.gamma(self.bindings, scale=t) self.times = self.walk_times + self.bind_times def finalize(self): print "finished!" #print "mean # of attempts:", self.attempts.mean() tdwell = self.times.mean() tbind = self.bind_times.mean() ta = self.attempt_times.mean() #print "mean attempt time: %s ns (fraction of total time: %s)" % ( # ta, ta/tdwell) #print "mean # of bindings:", self.bindings.mean() #print "mean dwell time with binding: %.3f mus"%(1e-3*(tbind + tdwell)) #print "mean dwell time without binding: %.3f mus" % (1e-3*tdwell) self.walk_times = np.copy(self.times) self.times += self.bind_times for domain in self.domains: if not domain.binding or not domain.bind_type == "zone": continue # calculate effective association rate in pore phys = self.phys Dbulk = phys.DTargetBulk r = 1e-9*self.rstart # radius of arrival zone karr = 2.*self.phys.pi*r*Dbulk*1e3*phys.mol # events/Ms ka = domain.ka # bulk association rate [1/Ms] Vbind = domain.Vbind # binding volume per mole [1/M] cchar = 1./Vbind # [M], receptor concentration at which all targets # are in binding zone, so that ka * cchar = kbind kbind = ka * cchar # binding zone assoc. rate [1/s] ta_ = 1e-9*ta # mean attempt time := time in binding zone [s] nbind = ta_ * kbind # mean no. bindings per event keff = karr * ta_ * cchar * ka # = karr * nbind = bindings / Ms = # effective association rate frac = karr * ta_ / Vbind # fraction of time spent in binding zone # in simulation, relative to bulk = keff / ka #print("Dbulk", Dbulk) #print("karr", karr) #print("ta", ta) #print("Vbind", Vbind) #print "kbind", kbind #print("nbind: %.3f (bindings per event)" % nbind) #print("ka [1/Ms]. Effective: %.3g, bulk: %.3g, fraction: %.3g" % ( # keff, ka, frac)) print('\n==============\n\n') if self.record_positions: for i in range(self.N): self.positions[i] = np.array(self.positions[i]) self.timetraces[i] = np.array(self.timetraces[i]) if hasattr(self, "binding_zone_forces"): for i in range(self.N): self.binding_zone_forces[i] = np.array(self.binding_zone_forces[i]) def save(self, name="rw"): if "N" in self.params: self.params.pop("N") optional = dict() if self.record_positions: optional.update(positions = self.positions, timetraces = self.timetraces) if hasattr(self, "binding_zone_forces"): optional.update(binding_zone_forces = self.binding_zone_forces) fields.save_fields(name, self.params, times = self.times, success = self.success, fail = self.fail, bind_times = self.bind_times, attempts = self.attempts, bindings = self.bindings, attempt_times = self.attempt_times, **optional) fields.update() def ellipse_collection(self, ax): "for matplotlib plotting" xz = self.x[:, [0,2]] #xz = self.rz sizes = 2.*self.params.rMolecule*np.ones(self.N) colors = ["b"]*self.N coll = collections.EllipseCollection(sizes, sizes, np.zeros_like(sizes), offsets=xz, units="xy", facecolors=colors, transOffset=ax.transData, alpha=0.7) return coll def move_ellipses(self, coll, cyl=False): xz = self.x[:, ::2] if not cyl else np.column_stack( [np.sqrt(np.sum(self.x[:, :2]**2, 1)), self.x[:, 2]]) coll.set_offsets(xz) #inside = self.inside_wall() #margin = np.nonzero(self.alive)[0][self.inside_wall(2.)] colors = np.full((self.N,), "b", dtype=str) #colors[margin] = "r" colors[self.success] = "k" colors[self.fail] = "k" colors[self.alive & ~self.can_bind] = "r" #colors = [("r" if inside[i] else "g") if margin[i] else "b" for i in range(self.N)] coll.set_facecolors(colors) #y = self.x[:, 1] #d = 50. #sizes = self.params.rMolecule*(1. + y/d) #coll.set(widths=sizes, heights=sizes) def polygon_patches(self, cyl=False): poly_settings = dict(closed=True, facecolor="#eeeeee", linewidth=.75, edgecolor="k") ball_settings = dict(facecolor="#aaaaaa", linewidth=1., edgecolor="k", alpha=0.5) ball_bind_zone_settings = dict(facecolor="#ffaaaa", linewidth=0., alpha=0.5) patches = [] for dom in self.domains: domp = dom.domain if isinstance(domp, Polygon): polygon = domp.nodes polygon = np.array(polygon) patches.append(mpatches.Polygon(polygon, **poly_settings)) if not cyl: polygon_m = np.column_stack([-polygon[:,0], polygon[:,1]]) patches.append(mpatches.Polygon(polygon_m, **poly_settings)) elif isinstance(domp, Ball): xy = domp.x0[0], domp.x0[2] if dom.binding and dom.bind_type == "zone": p1 = mpatches.Circle(xy, domp.r + dom.ra, **ball_bind_zone_settings) p1.set_zorder(-100) patches.append(p1) p = mpatches.Circle(xy, domp.r, **ball_settings) p.set_zorder(200) patches.append(p) return patches def plot_streamlines(self, both=False, Hbot=None, Htop=None, R=None, **params): R = self.params.R if R is None else R Htop = self.params.Htop if Htop is None else Htop Hbot = self.params.Hbot if Hbot is None else Hbot #ax = plt.axes(xlim=(-R, R), ylim=(-Hbot, Htop)) dolfin.parameters["allow_extrapolation"] = True if both: Fel, Fdrag = fields.get_functions("force_pointsize", "Fel", "Fdrag", **self.sim_params) streamlines(patches=[self.polygon_patches(), self.polygon_patches()], R=R, Htop=Htop, Hbot=Hbot, Nx=100, Ny=100, Fel=Fel, Fdrag=Fdrag, **params) else: streamlines(patches=[self.polygon_patches()], R=R, Htop=Htop, Hbot=Hbot, Nx=100, Ny=100, F=self.F, **params) dolfin.parameters["allow_extrapolation"] = False # for p in patches: # p.set_zorder(100) # plt.gca().add_patch(p) plt.xlim(-R, R) plt.ylim(-Hbot, Htop) def plot_pore(self, cyl=False): R = self.params.R Htop = self.params.Htop Hbot = self.params.Hbot xlim = (-R*1.01, R*1.01) if not cyl else (0., R) ax = plt.axes(xlim=xlim, ylim=(-Hbot, Htop)) patches = self.polygon_patches(cyl) for p in patches: ax.add_patch(p) return ax def plot_path(self, i=0, cyl=False, **plot_params): self.plot_pore(cyl) path = self.positions[i] x = np.sqrt(path[:, 0]**2 + path[:, 1]**2) if cyl else path[:, 0] z = path[:, 2] plt.plot(x, z, **plot_params) def setup_default(params): pore = get_pore(**params) return RandomWalk(pore, **params) def _load(a): return a.load() if isinstance(a, fields.NpyFile) else a def load_results(name, **params): data = fields.get_fields(name, **params) data = nanopores.Params({k: _load(data[k]) for k in data}) print "Found %d simulated events." % len(data.times) return data def get_results(name, params, setup=setup_default, calc=True): # setup is function rw = setup(params) that sets up rw # check existing saved rws data = None if fields.exists(name, **params): data = load_results(name, **params) N = len(data.times) else: N = 0 # determine number of missing rws and run N_missing = params["N"] - N if N_missing > 0 and calc: new_params = nanopores.Params(params, N=N_missing) rw = setup(new_params) run(rw, name) rw.save(name) data = load_results(name, **params) # return results elif data is None: data = load_results(name, **params) return data def reconstruct_rw(data, params, setup=setup_default, finalize=True, **setup_args): """if positions were recorded, the complete random walk instance can IN PRINCIPLE be reconstructed and restarted. this is a simple first attempt.""" # TODO: recreate rw.rz, rw.t, rw.can_bind rw = setup(params, **setup_args) rw.__dict__.update( times = data.times - data.bind_times, success = data.success, fail = data.fail, bind_times = data.bind_times, attempts = data.attempts, bindings = data.bindings, attempt_times = data.attempt_times, ) if rw.record_positions: rw.positions = [[x for x in _load(X)] for X in data.positions] rw.timetraces = [[t for t in _load(T)] for T in data.timetraces] rw.x = np.array([X[-1] for X in data.positions]) rw.alive = ~(data.success | data.fail) if hasattr(data, "binding_zone_forces"): rw.binding_zone_forces = [list(_load(X)) for X in data.binding_zone_forces] if finalize: rw.finalize() return rw def get_rw(name, params, setup=setup_default, calc=True, finalize=True, **setup_args): # setup_args are only relevant to reconstructed rw # everything that influences data have to be in params data = get_results(name, params, setup, calc) return reconstruct_rw(data, params, setup, finalize, **setup_args) def video(rw, cyl=False, **aniparams): R = rw.params.R Htop = rw.params.Htop Hbot = rw.params.Hbot #fig = plt.figure() #fig.set_size_inches(6, 6) #ax = plt.axes([0,0,1,1], autoscale_on=False, xlim=(-R, R), ylim=(-H, H)) xlim = (-R, R) if not cyl else (0., R) ax = plt.axes(xlim=xlim, ylim=(-Hbot, Htop)) #streamlines(rx=R, ry=Htop, Nx=100, Ny=100, maxvalue=None, F=rw.F) coll = rw.ellipse_collection(ax) patches = rw.polygon_patches(cyl) def init(): return () def animate(t): if t == 0: ax.add_collection(coll) for p in patches: ax.add_patch(p) rw.move_ellipses(coll, cyl=cyl) return tuple([coll] + patches) aniparams = dict(dict(interval=10, blit=True, save_count=5000), **aniparams) ani = animation.FuncAnimation(ax.figure, animate, frames=rw.walk(), init_func=init, **aniparams) return ani def integrate_hist(hist, cutoff): n, bins, _ = hist I, = np.nonzero(bins > cutoff) return np.dot(n[I[:-1]], np.diff(bins[I])) def integrate_values(T, fT, cutoff): values = 0.5*(fT[:-1] + fT[1:]) I, = np.nonzero(T > cutoff) return np.dot(values[I[:-1]], np.diff(T[I])) def exponential_hist(times, a, b, **params): cutoff = 0.03 # cutoff frequency in ms if len(times) == 0: return bins = np.logspace(a, b, 100) hist = plt.hist(times, bins=bins, alpha=0.5, **params) plt.xscale("log") params.pop("label") color = params.pop("color") total = integrate_hist(hist, cutoff) if sum(times > cutoff) == 0: return tmean = times[times > cutoff].mean() T = np.logspace(a-3, b, 1000) fT = np.exp(-T/tmean)*T/tmean fT *= total/integrate_values(T, fT, cutoff) plt.plot(T, fT, label="exp. fit, mean = %.2f ms" % (tmean,), color="dark" + color, **params) plt.xlim(10**a, 10**b) def histogram(rw, a=-3, b=3, scale=1e-0): t = rw.times * 1e-9 / scale # assuming times are in nanosaconds exponential_hist(t[rw.success], a, b, color="green", label="translocated") exponential_hist(t[rw.fail], a, b, color="red", label="did not translocate") plt.xlabel(r"$\tau$ off [s]") plt.ylabel("count") plt.legend(loc="best") def hist_poisson(rw, name="attempts", ran=None, n=10, pfit=True, mpfit=True, lines=True): attempts = getattr(rw, name) if ran is None: n0 = 0 n1 = n else: n0, n1 = ran k = np.arange(n0, n1 + 1) bins = np.arange(n0 - 0.5, n1 + 1.5) astr = "ap = %.3f" if name == "bindings" else "a = %.1f" plt.hist(attempts, bins=bins, label="Simulated "+name, color="#aaaaff") # poisson fit a = attempts.mean() K = len(attempts) a0 = attempts[attempts >= 1].mean() a1 = poisson_from_positiveK(a0) print "Mod. Poisson fit, mean of K>0:", a0 print "Inferred total mean:", a1 print "Standard Poisson fit, mean:", a p1 = a/a1 K1 = len(attempts[attempts > 0])/(1.-np.exp(-a1)) pdf = K*poisson.pmf(k, a) pdf1 = K*p1*poisson.pmf(k, a1) if n0 == 0: pdf1[0] += K*(1. - p1) k0 = np.linspace(n0, n1, 500) if pfit: if lines: plt.plot(k0, K*gamma.pdf(a, k0 + 1), "-", color="C1") plt.plot(k, pdf, "s", label=("Poisson fit, "+astr)%(a), color="C1") if mpfit: if lines: plt.plot(k0, K1*gamma.pdf(a1, k0 + 1), "-", color="C2") plt.plot(k, pdf1, "v", label=("Mod. Poisson fit, "+astr)%(a1), color="C2") plt.xlim(n0 - 0.5, n1 + 0.5) plt.xticks(k, k) plt.yscale("log") plt.ylim(ymin=1.) plt.xlabel("# %s" % name) plt.ylabel("Count") plt.legend() def solve_newton(C, f, f1, x0=1., n=20): "solve f(x) == C" x = x0 # initial value print "Newton iteration:" for i in range(n): dx = -(f(x) - C)/f1(x) x = x + dx res = abs(f(x) - C) print i, "Residual", res, "Value", x if res < 1e-12: break print return x def poisson_from_positiveK(mean): # solve x/(1 - exp(-x)) == mean def f(x): return x/(1. - np.exp(-x)) def f1(x): return (np.expm1(x) - x)/(2.*np.cosh(x) - 2.) x = solve_newton(mean, f, f1, mean, n=10) return x def save(ani, name="rw"): ani.save(nanopores.HOME + "/presentations/nanopores/%s.mp4" % name, fps=30, dpi=200, writer="ffmpeg_file", extra_args=["-vcodec", "libx264"]) # convenience function for interactive experiments def run(rw, name="rw", plot=False, a=-3, b=3, **aniparams): params = nanopores.user_params(video=False, save=False, cyl=False) if params.video: ani = video(rw, cyl=params.cyl, **aniparams) if params.save: save(ani, name=name) else: plt.show() else: for t in rw.walk(): pass if plot and ((not params.video) or (not params.save)): histogram(rw, a, b) #plt.figure() #hist_poisson(rw, "attempts") #plt.figure() #hist_poisson(rw, "bindings") plt.show() if __name__ == "__main__": pore = get_pore(**params) rw = RandomWalk(pore, **params) receptor = Ball([15., 0., 30.], 8.) rw.add_domain(receptor, exclusion=True, walldist=1., binding=True, eps=1., t=1.5e6, p=0.14) run(rw, name="wei")
mit
-5,644,666,278,619,987,000
36.621704
97
0.545815
false
acidjunk/django-pages
demo_site/pages/templatetags/page.py
1
1357
from django import template register = template.Library() # from django.utils.safestring import mark_safe from ..models import Page @register.inclusion_tag('page_navigation.html', takes_context=True) def page_navigation(context): tree = [] pages = Page.objects.filter(parent=None).order_by('ordering').all() for page in pages: childs = Page.objects.filter(parent=page.pk).count() childs_tree = [] if childs > 0: ch = Page.objects.filter(parent=page.pk).order_by('ordering').all() for child in ch: chil = Page.objects.filter(parent=child.pk).count() chil_tree = [] if chil > 0: chi = Page.objects.filter(parent=child.pk).order_by('ordering').all() for ch in chi: chil_tree.append({'title': ch.name, 'url': ch.url}) childs_tree.append({'title': child.name, 'url': child.url, 'childs': chil_tree}) tree.append({'title': page.name, 'url': page.url, 'childs': childs_tree}) return {'tree': tree, 'request': context['request']} @register.inclusion_tag('page_content_load.html', takes_context=True) def content_load(context, content_object): is_staff = context['request'].user.is_staff return {'is_staff': is_staff, 'content_object': content_object}
gpl-3.0
-5,544,578,883,086,672,000
40.121212
96
0.608696
false
KamilWo/bestja
addons/email_confirmation/controllers.py
1
3272
# -*- coding: utf-8 -*- import logging from openerp.addons.auth_signup.res_users import SignupError from openerp.addons.auth_signup.controllers.main import AuthSignupHome from openerp import http from openerp.http import request from openerp.tools.translate import _ _logger = logging.getLogger(__name__) class AuthSignupHome(AuthSignupHome): @http.route('/web/authenticate', type='http', auth='public', website=True) def web_auth_authenticate(self, *args, **kw): """After signing up user confirms his email""" qcontext = self.get_auth_signup_qcontext() if not qcontext.get('token') and not qcontext.get('signup_enabled'): return http.request.not_found() else: try: values = dict((key, qcontext.get(key)) for key in ('login', 'email')) request.env['res.users'].sudo()._authenticate_after_confirmation(values, qcontext.get('token')) request.cr.commit() response = super(AuthSignupHome, self).web_login(*args, **kw) response.qcontext['message'] = """ Witamy się w naszej społeczności! Udało Ci się pomyślnie zarejestrować do naszego systemu. """ return response except (SignupError, AssertionError), e: qcontext['error'] = _(e.message) return self.web_login(*args, **kw) @http.route('/web/signup', type='http', auth='public', website=True) def web_auth_signup(self, *args, **kw): """Need to override, as parent function logs the user in""" qcontext = self.get_auth_signup_qcontext() if not qcontext.get('token') and not qcontext.get('signup_enabled'): return http.request.not_found() if 'error' not in qcontext and request.httprequest.method == 'POST': try: self.do_signup(qcontext) qcontext['message'] = """ Dziękujemy za rejestrację!<br/><br/> To mały krok dla Ciebie, ale wielki skok dla nas!<br/> Nasza społeczność się powiększa!<br/><br/> <strong>Potwierdź rejestrację klikając w link, który otrzymasz w mailu.</strong> """ # do not login user here except (SignupError, AssertionError), e: message = e.message if message.startswith('duplicate key value violates unique constraint "res_users_login_key"'): message = "Podany adres e-mail jest już używany." qcontext['error'] = _(message) return request.render('auth_signup.signup', qcontext) def do_signup(self, qcontext): """ overriden to include redirect """ values = {key: qcontext.get(key) for key in ('login', 'name', 'password')} assert all(values.values()), "The form was not properly filled in." assert values.get('password') == qcontext.get('confirm_password'), "Podane hasła się różnią." request.env['res.users'].sudo().with_context( redirect=qcontext.get('redirect'), no_reset_password=True, confirm_signup=True ).signup(values, qcontext.get('token')) request.cr.commit()
agpl-3.0
938,314,388,638,974,200
46.043478
111
0.601664
false
johanfforsberg/elogy
test/test_db.py
1
18107
from operator import attrgetter from .fixtures import db from elogy.db import Entry from elogy.db import Logbook, LogbookRevision # Logbook def test_logbook(db): lb = Logbook.create(name="Logbook1", description="Hello") assert lb.name == "Logbook1" assert lb.description == "Hello" def test_logbook_descendants(db): parent1 = Logbook.create(name="Logbook1") parent2 = Logbook.create(name="Logbook2") child1 = Logbook.create(name="Logbook3", parent=parent1) child2 = Logbook.create(name="Logbook4", parent=parent1) child1child1 = Logbook.create(name="Logbook5", parent=child1) desc_ids = parent1.descendants assert set(desc_ids) == set([child1, child2, child1child1]) def test_logbook_ancestors(db): parent1 = Logbook.create(name="Logbook1") parent2 = Logbook.create(name="Logbook2") child1 = Logbook.create(name="Logbook3", parent=parent1) child2 = Logbook.create(name="Logbook4", parent=parent1) child1child1 = Logbook.create(name="Logbook5", parent=child1) desc_ids = child1child1.ancestors assert set(desc_ids) == set([parent1, child1]) def test_logbook_entries(db): lb = Logbook.create(name="Logbook1", description="Hello") entry2 = Entry.create(logbook=lb, title="Entry1") entry2 = Entry.create(logbook=lb, title="Entry1") assert len(lb.entries) == 2 def test_logbookrevision(db): lb = Logbook.create(name="Logbook1", description="Hello") # to properly update the logbook, use the "make_change" method # which creates a revision. revision = lb.make_change(name="Logbook2") # remember to save both logbook and revision lb.save() revision.save() assert len(lb.changes) == 1 rev = lb.changes[0] assert rev == revision # old value is stored in the revision assert rev.changed["name"] == "Logbook1" def test_logbookrevisionwrapper1(db): lb = Logbook.create(name="Logbook1", description="Hello") lb.make_change(name="Logbook2").save() lb.save() #The wrapper should look like a historical version of a Logbook wrapper = lb.get_revision(version=0) assert wrapper.name == "Logbook1" def test_logbookrevisionwrapper2(db): DESCRIPTION = "Hello" lb = Logbook.create(name="Logbook1", description=DESCRIPTION) lb.make_change(name="Logbook2").save() lb.save() lb.make_change(name="Logbook3").save() lb.save() wrapper = lb.get_revision(version=0) assert isinstance(wrapper, LogbookRevision) assert wrapper.revision_n == 0 assert wrapper.name == "Logbook1" assert wrapper.description == DESCRIPTION wrapper = lb.get_revision(version=1) assert isinstance(wrapper, LogbookRevision) assert wrapper.revision_n == 1 assert wrapper.name == "Logbook2" assert wrapper.description == DESCRIPTION wrapper = lb.get_revision(version=2) assert wrapper == lb # newest revision is just the Logbook assert wrapper.revision_n == 2 assert wrapper.name == "Logbook3" assert wrapper.description == DESCRIPTION def test_logbookrevisionwrapper3(db): NAME1 = "Name1" NAME2 = "Name2" DESCRIPTION1 = "Original description" DESCRIPTION2 = "New description" lb = Logbook.create(name=NAME1, description=DESCRIPTION1) lb.make_change(name=NAME2, description=DESCRIPTION2).save() lb.save() lb.make_change(name=NAME1).save() lb.save() wrapper = lb.get_revision(version=0) assert wrapper.name == NAME1 assert wrapper.description == DESCRIPTION1 wrapper = lb.get_revision(version=1) assert wrapper.name == NAME2 assert wrapper.description == DESCRIPTION2 wrapper = lb.get_revision(version=2) assert wrapper.name == NAME1 assert wrapper.description == DESCRIPTION2 assert wrapper == lb # newest revision is just the Logbook # Entry def test_entry(db): lb = Logbook.create(name="Logbook1", description="Hello") entry = Entry(logbook=lb, title="Entry1", content="Some content here") assert entry.logbook == lb assert entry.title == "Entry1" def test_enryrevision(db): lb = Logbook.create(name="Logbook1", description="Hello") entry = Entry.create(logbook=lb, title="Entry1") revision = entry.make_change(title="Entry2") entry.save() revision.save() assert len(entry.changes) == 1 rev = entry.changes[0] assert rev == revision assert rev.changed["title"] == "Entry1" def test_entryrevisionwrapper1(db): lb = Logbook.create(name="Logbook1") entry = Entry.create(logbook=lb, title="Entry1") entry.make_change(title="Entry2").save() entry.save() wrapper = entry.get_revision(version=0) assert wrapper.revision_n == 0 assert wrapper.title == "Entry1" def test_entryrevisionwrapper2(db): lb = Logbook.create(name="Logbook1") entry_v0 = { "logbook": lb, "title": "Some nice title", "content": "Some very neat content." } entry_v1 = { "logbook": lb, "title": "Some really nice title", "content": "Some very neat content." } entry_v2 = { "logbook": lb, "title": "Some really nice title", "content": "Some very neat content but changed." } # create entry and modify it twice entry = Entry.create(**entry_v0) entry.make_change(**entry_v1).save() entry.save() entry.make_change(**entry_v2).save() entry.save() # check that the wrapper reports the correct historical # values for each revision wrapper0 = entry.get_revision(version=0) assert wrapper0.revision_n == 0 assert wrapper0.title == entry_v0["title"] assert wrapper0.content == entry_v0["content"] wrapper1 = entry.get_revision(version=1) assert wrapper1.revision_n == 1 assert wrapper1.title == entry_v1["title"] assert wrapper1.content == entry_v1["content"] wrapper2 = entry.get_revision(version=2) assert wrapper2.revision_n == 2 assert wrapper2.title == entry_v2["title"] assert wrapper2.content == entry_v2["content"] # Search def test_entry_content_search(db): lb1 = Logbook.create(name="Logbook1") lb2 = Logbook.create(name="Logbook2") entries = [ { "logbook": lb1, "title": "First entry", "content": "This content is great!" }, { "logbook": lb1, "title": "Second entry", "content": "Some very neat content." }, { "logbook": lb1, "title": "Third entry", "content": "Not so bad content either." }, { "logbook": lb2, "title": "Fourth entry", "content": "Not so great content, should be ignored." } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # simple search result, = list(Entry.search(logbook=lb1, content_filter="great")) assert result.title == "First entry" # regexp search result, = list(Entry.search(logbook=lb1, content_filter="Not.*content")) assert result.title == "Third entry" def test_entry_content_search_global(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!" }, { "logbook": lb, "title": "Second entry", "content": "Some very neat content." }, { "logbook": lb, "title": "Third entry", "content": "Not so bad content either." } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # simple search result, = list(Entry.search(content_filter="great")) assert result.title == "First entry" # regexp search result, = list(Entry.search(content_filter="Not.*content")) assert result.title == "Third entry" def test_entry_title_search(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!" }, { "logbook": lb, "title": "Second entry", "content": "Some very neat content." }, { "logbook": lb, "title": "Third entry", "content": "Not so bad content either." } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # simple search result, = list(Entry.search(logbook=lb, title_filter="First")) assert result.title == "First entry" # regexp search result, = list(Entry.search(logbook=lb, title_filter="Th.*ry")) assert result.title == "Third entry" def test_entry_search_followups(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!" }, { "logbook": lb, "follows_id": 1, "title": "Second entry", "content": "Some very neat content." }, { "logbook": lb, "follows_id": 2, "title": "Third entry", "content": "Not so bad content either." } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # simple search result, = list(Entry.search(logbook=lb)) assert result.title == "First entry" def test_entry_attribute_search_followups(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!" }, { "logbook": lb, "follows_id": 1, "title": "Second entry", "content": "Some very neat content.", "attributes": {"a": 1} }, { "logbook": lb, "follows_id": 2, "title": "Third entry", "content": "Not so bad content either." } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # simple search result, = list(Entry.search(logbook=lb, followups=True, attribute_filter=[("a", 1)])) assert result.title == "Second entry" def test_entry_authors_search(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!", "authors": [{"name": "alpha"}, {"name": "beta"}] }, { "logbook": lb, "title": "Second entry", "content": "Some very neat content.", "authors": [{"name": "alpha"}] }, { "logbook": lb, "title": "Third entry", "content": "Not so bad content either.", "authors": [{"name": "gamma"}, {"name": "beta"}] } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() results = list(Entry.search(logbook=lb, author_filter="alpha")) print(set(map(attrgetter("title"), results))) assert set(map(attrgetter("title"), results)) == set(["First entry", "Second entry"]) # either results = list(Entry.search(logbook=lb, author_filter="alpha|beta")) assert set(map(attrgetter("title"), results)) == set(["First entry", "Second entry", "Third entry"]) def test_entry_attribute_filter(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!", "attributes": {"a": 1, "b": "2"} }, { "logbook": lb, "title": "Second entry", "content": "Some very neat content.", "attributes": {"a": 1, "b": "3"} }, { "logbook": lb, "title": "Third entry", "content": "Not so bad content either.", "attributes": {"a": 2, "b": "2"} } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # filter attributes result, = list(Entry.search(logbook=lb, attribute_filter=[("a", 2)])) assert result.title == "Third entry" results = list(Entry.search(logbook=lb, attribute_filter=[("b", "2")])) assert len(results) == 2 assert set([results[0].title, results[1].title]) == set(["First entry", "Third entry"]) # multiple attribute filter results = list(Entry.search(logbook=lb, attribute_filter=[("a", 2), ("b", "2")])) assert len(results) == 1 assert results[0].title == "Third entry" def test_entry_attribute_multioption_filter(db): """ All the values given for each attribute must be present in the options selected. """ lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!", "attributes": {"a": ["1", "2", "3"]} }, { "logbook": lb, "title": "Second entry", "content": "Some very neat content.", "attributes": {"a": ["2"], "b": ["7"]} }, { "logbook": lb, "title": "Third entry", "content": "Not so bad content either.", "attributes": {"a": ["3", "4"]} } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # filter attributes result, = list(Entry.search(logbook=lb, attribute_filter=[("a", "1")])) assert result.title == "First entry" # one value matching several entries results = list(Entry.search(logbook=lb, attribute_filter=[("a", "2")])) assert len(results) == 2 set([results[0].title, results[0].title]) == set(["First entry", "Second entry"]) # two values for one attribute results = list(Entry.search(logbook=lb, attribute_filter=[("a", "2"), ("a", "3")])) assert len(results) == 1 set([results[0].title, results[0].title]) == set(["First entry"]) # two different attributes results = list(Entry.search(logbook=lb, attribute_filter=[("a", "2"), ("b", "7")])) assert len(results) == 1 set([results[0].title, results[0].title]) == set(["Second entry"]) def test_entry_metadata_filter(db): lb = Logbook.create(name="Logbook1") entries = [ { "logbook": lb, "title": "First entry", "content": "This content is great!", "metadata": {"message": "hello"} }, { "logbook": lb, "title": "Second entry", "content": "Some very neat content.", "metadata": {"message": "yellow"} }, { "logbook": lb, "title": "Third entry", "content": "Not so bad content either.", "metadata": {} } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # filter attributes result, = list(Entry.search(logbook=lb, metadata_filter=[("message", "hello")])) assert result.title == "First entry" results = list(Entry.search(logbook=lb, metadata_filter=[("message", "%ello%")])) assert len(results) == 2 set([results[0].title, results[0].title]) == set(["First entry", "Second entry"]) def test_entry_content_search_child_logbooks(db): """Searching a logbook with 'child_logbooks' should also return hits from all descendant logbooks""" parent_lb = Logbook.create(name="Logbook1") child_lb = Logbook.create(name="Logbook2", parent=parent_lb) grandchild_lb = Logbook.create(name="Logbook2", parent=child_lb) entries = [ { "logbook": parent_lb, "title": "entry1", "content": "This content is great!", }, { "logbook": child_lb, "title": "entry2", "content": "Some very neat content.", }, { "logbook": grandchild_lb, "title": "entry3", "content": "Other stuff.", }, { "logbook": grandchild_lb, "title": "entry4", "content": "Not so bad content either.", }, { "logbook": grandchild_lb, "title": "entry5", "content": "Other stuff.", } ] # create entries for entry in entries: entry = Entry.create(**entry) entry.save() # only parent logbook results = list(Entry.search(logbook=parent_lb, child_logbooks=False, content_filter="content")) assert len(results) == 1 assert results[0].title == "entry1" # include child logbooks results = list(Entry.search(logbook=parent_lb, child_logbooks=True, content_filter="content")) assert len(results) == 3 assert set(r.title for r in results) == {"entry1", "entry2", "entry4"} # more restrictive results = list(Entry.search(logbook=parent_lb, child_logbooks=True, content_filter="neat content")) assert len(results) == 1 set([results[0].title]) == "entry2"
gpl-3.0
-6,961,677,007,931,605,000
28.110932
89
0.551389
false
eadains09/scripts
conjunctions.py
1
14472
#! /usr/bin/env python # Predict planetary visibility in the early evening (sunset to midnight), # and upcoming conjunctions between two or more planets. # Copyright 2014 Akkana Peck -- share and enjoy under the GPLv2 or later. import ephem import math verbose = False output_csv = True # How low can a planet be at sunset or midnight before it's not interesting? min_alt = 10. * math.pi / 180. # How close do two bodies have to be to consider it a conjunction? max_sep = 3.5 * math.pi / 180. # How little percent illuminated do we need to consider something a crescent? crescent_percent = 40 # Start and end times for seeing a crescent phase: crescents = { "Mercury": [ None, None ], "Venus": [ None, None ] } sun = ephem.Sun() planets = [ ephem.Moon(), ephem.Mercury(), ephem.Venus(), ephem.Mars(), ephem.Jupiter(), ephem.Saturn() ] planets_up = {} for planet in planets: planets_up[planet.name] = None def datestr(d): tup = d.tuple() return "%d/%d/%d" % (tup[0], tup[1], tup[2]) def sepstr(sep): deg = float(sep) * 180. / math.pi # if deg < .5: # return "less than a half a degree (%.2f)" % deg # if deg < 1.: # return "less than a degree (%.2f)" % deg return "%.1f deg" % deg class ConjunctionPair: '''A conjunction between a pair of objects''' def __init__(self, b1, b2, date, sep): self.bodies = [b1, b2] self.date = date self.sep = sep def __repr__(self): return "%s: %s and %s, sep %s" % (datestr(self.date), self.bodies[0], self.bodies[1], sepstr(self.sep)) def __contains__(self, body): return body in self.bodies class Conjunction: '''A collection of ConjunctionPairs which may encompass more than two bodies and several days. The list is not guaranteed to be in date (or any other) order. ''' def __init__(self): self.bodies = [] self.pairs = [] def __contains__(self, body): return body in self.bodies def add(self, body1, body2, date, sep): self.pairs.append(ConjunctionPair(body1, body2, date, sep)) if body1 not in self.bodies: self.bodies.append(body1) if body2 not in self.bodies: self.bodies.append(body2) def start_date(self): date = ephem.date('3000/1/1') for pair in self.pairs: if pair.date < date: date = pair.date return date def end_date(self): date = ephem.date('0001/1/1') for pair in self.pairs: if pair.date > date: date = pair.date return date def find_min_seps(self): return mindate, maxdate, minseps def andjoin(self, names): '''Join a list together like a, b, c and d''' if len(names) == 1: return names[0] elif len(names) < 4: return ', '.join(names[:-1]) + ' and ' + names[-1] def closeout(self): '''Time to figure out what we have and print it.''' # Find the list of minimum separations between each pair. startdate = ephem.date('3000/1/1') enddate = ephem.date('0001/1/1') minseps = [] for i, b1 in enumerate(self.bodies): for b2 in self.bodies[i+1:]: minsep = 360 # degrees closest_date = None for pair in self.pairs: if pair.date < startdate: startdate = pair.date if pair.date > enddate: enddate = pair.date if b1 in pair and b2 in pair: if pair.sep < minsep: minsep = pair.sep closest_date = pair.date # Not all pairs will be represented. In a triple conjunction, # the two outer bodies may never get close enough to register # as a conjunction in their own right. if minsep < max_sep: minseps.append((closest_date, minsep, b1, b2)) minseps.sort() if output_csv: s = '"Conjunction of ' + self.andjoin(self.bodies) + '",' s += datestr(startdate) + "," + datestr(enddate) + ",," s += "\"" for m in minseps: s += " %s and %s will be closest on %s (%s)." % \ (m[2], m[3], datestr(m[0]), sepstr(m[1])) s += "\",,http://upload.wikimedia.org/wikipedia/commons/thumb/4/47/Sachin_Nigam_-_starry_moon_%28by-sa%29.jpg/320px-Sachin_Nigam_-_starry_moon_%28by-sa%29.jpg,240,169,\"<a href='http://commons.wikimedia.org/wiki/File:Sachin_Nigam_-_starry_moon_%28by-sa%29.jpg'>starry moon on Wikimedia Commons</a>\"" print s else: print "Conjunction of", self.andjoin(self.bodies), print "lasts from %s to %s." % (datestr(startdate), datestr(enddate)) for m in minseps: print " %s and %s are closest on %s (%s)." % \ (m[2], m[3], datestr(m[0]), sepstr(m[1])) def merge(self, conj): '''Merge in another Conjunction -- it must be that the two sets of pairs have bodies in common. ''' for p in conj.pairs: self.pairs.append(p) for body in conj.bodies: if body not in self.bodies: self.bodies.append(body) class ConjunctionList: '''A collection of Conjunctions -- no bodies should be shared between any of the conjunctions we contain. ''' def __init__(self): self.clist = [] def add(self, b1, b2, date, sep): for i, c in enumerate(self.clist): if b1 in c or b2 in c: c.add(b1, b2, date, sep) # But what if one of the bodies is already in one of our # other Conjunctions? In that case, we have to merge. for cc in self.clist[i+1:]: if b1 in cc or b2 in cc: c.merge(cc) self.clist.delete(cc) return # It's new, so just add it c = Conjunction() c.add(b1, b2, date, sep) self.clist.append(c) def closeout(self): '''When we have a day with no conjunctions, check the list and close out any pending conjunctions. ''' for c in self.clist: c.closeout() self.clist = [] oneday = ephem.hour * 24 web_image = { "Moon" : ("http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Phase-088.jpg/240px-Phase-088.jpg", '''"<a href='http://commons.wikimedia.org/wiki/User:JayTanner/gallery'>Jay Tanner</a>"''', 240, 240), "Mercury" : ("../resources/astronomy/mercury.jpg", "", 240, 182), "Venus" : ("../resources/astronomy/venus.jpg", "", 240, 192), "Mars" : ("http://imgsrc.hubblesite.org/hu/db/images/hs-2001-24-a-small_web.jpg,200,200", "Hubble Space Telescope", 200, 200), "Jupiter" : ("http://upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Jupiter.jpg/240px-Jupiter.jpg", '"USGS, JPL and NASA"', 240, 240), "Saturn" : ("http://upload.wikimedia.org/wikipedia/commons/thumb/b/b4/Saturn_%28planet%29_large.jpg/384px-Saturn_%28planet%29_large.jpg", "Voyager 2", 192, 240) } descriptions = { "Mars": "Mars is visible as a bright, reddish \"star\".", "Saturn": "Saturn is visible. A small telescope will show its rings.", "Jupiter": "Jupiter is visible. With binoculars you can see its four brightest moons." } def quotecsv(s): if ',' in s or '"' in s: return '"' + s.replace('"', '""') + '"' return s def finish_planet(p, d): if not planets_up[p]: return if p in descriptions.keys(): if output_csv: isvis = quotecsv(descriptions[p]) else: isvis = descriptions[p] elif p == "Venus" or p == "Mercury": isvis = p + " is visible in the early evening sky." else: isvis = p + " is visible." # How about crescent info? if p in crescents.keys(): if crescents[p][0]: isvis += " A telescope will show a crescent from " \ + datestr(crescents[p][0]) if crescents[p][1]: isvis += " to " + datestr(crescents[p][1]) isvis += '.' crescents[p] = [ None, None ] if output_csv: if p != 'Moon': if web_image[p]: img = web_image[p][0] cred = web_image[p][1] w = web_image[p][2] h = web_image[p][3] else: img = "" cred = "" w = "" h = "" print "%s,%s,%s,,%s,,%s,%s,%s,%s" % \ (p, datestr(planets_up[p]), datestr(d), isvis, img, w, h, cred) else: print datestr(planets_up[p]), "to", datestr(d), ":", isvis planets_up[p] = None def run(start, end, observer, toolate): '''Find planetary visibility between dates start and end, for an observer whose location has been set, between sunset and "toolate" on each date, where toolate is a GMT hour, e.g. toolate=7 means we'll stop at 0700 GMT or midnight MDT. ''' d = start conjunctions = ConjunctionList() if output_csv: print 'name,start,end,time,longname,URL,image,image width,image height,image credit' else: print "Looking for planetary events between %s and %s:\n" % \ (datestr(d), datestr(end)) def check_if_planet_up(planet, d): '''If the planet is currently up, do housekeeping to remember that status, then return True if it's up, False otherwise. ''' global crescents, planets_up if planet.alt < min_alt: # planet is not up return False if not planets_up[planet.name]: planets_up[planet.name] = d; visible_planets.append(planet) if planet.name not in crescents.keys(): return True # Is it a crescent? Update its crescent dates. if planet.phase <= crescent_percent: # It's a crescent now if not crescents[planet.name][0]: crescents[planet.name][0] = d else: crescents[planet.name][1] = d return True while d < end: observer.date = d sunset = observer.previous_setting(sun) # sunrise = observer.next_rising(sun) # print "Sunset:", sunset, " Sunrise:", sunrise midnight = list(observer.date.tuple()) midnight[3:6] = [toolate, 0, 0] midnight = ephem.date(tuple(midnight)) # We have two lists of planets: planets_up and visible_planets. # planets_up is a dictionary of the time we first saw each planet # in its current apparition. It's global, and used by finish_planet. # visible_planets is a list of planets currently visible. visible_planets = [] for planet in planets: # A planet is observable this evening (not morning) # if its altitude at sunset OR its altitude at midnight # is greater than a threshold, which we'll set at 10 degrees. observer.date = sunset planet.compute(observer) # print planet.name, "alt at sunset:", planet.alt if not check_if_planet_up(planet, observer.date): # If it's not up at sunset, try midnight observer.date = midnight if observer.date < sunset: observer.date += oneday planet.compute(observer) if not check_if_planet_up(planet, observer.date): # Planet is not up. Was it up yesterday? if planets_up[planet.name]: finish_planet(planet.name, observer.date) # print datestr(d), "visible planets:", \ # ' '.join([p.name for p in visible_planets]) # print "planets_up:", planets_up # Done with computing visible_planets. # Now look for conjunctions, anything closer than 5 degrees. # Split the difference, use a time halfway between sunset and midnight. saw_conjunction = False observer.date = ephem.date((sunset + midnight)/2) if len(visible_planets) > 1: for p, planet in enumerate(visible_planets): for planet2 in visible_planets[p+1:]: sep = ephem.separation(planet, planet2) if sep <= max_sep: # print datestr(observer.date), planet.name, \ # planet2.name, sepstr(sep) conjunctions.add(planet.name, planet2.name, observer.date, sep) saw_conjunction = True if not saw_conjunction: conjunctions.closeout() # Add a day: d = ephem.date(d + oneday) for p in visible_planets: finish_planet(p.name, d) if __name__ == '__main__': import sys if len(sys.argv) > 1 and sys.argv[1] == "-c": output_csv = True sys.argv = sys.argv[1:] else: output_csv = False if len(sys.argv) > 1: start = ephem.date(sys.argv[1]) else: start = ephem.date('2014/8/15 04:00') if len(sys.argv) > 2: end = ephem.date(sys.argv[2]) else: end = ephem.date('2017/1/1') # Loop from start date to end date, # using a time of 10pm MST, which is 4am GMT the following day. # end = ephem.date('2016/1/1') # For testing, this spans a Mars/Moon/Venus conjunction: # d = ephem.date('2015/2/10 04:00') # end = ephem.date('2015/3/10') observer = ephem.Observer() observer.name = "Los Alamos" observer.lon = '-106.2978' observer.lat = '35.8911' observer.elevation = 2286 # meters, though the docs don't actually say # What hour GMT corresponds to midnight here? # Note: we're not smart about time zones. This will calculate # a time based on the time zone offset right now, whether we're # currently in DST or not. # And for now we don't even calculate it, just hardwire it. midnight = 7 try: run(start, end, observer, midnight) except KeyboardInterrupt: print "Interrupted"
gpl-2.0
-6,355,210,371,146,874,000
34.821782
312
0.551686
false
k----n/InformationRetrievalSystem
data_parser.py
1
4279
# # Copyright 2015 Kalvin Eng # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import sys def parseData(main_file): count = 1 reviews = list() pterms = list() rterms = list() scores = list() quote = '&' + 'quot' + ';' replace_punctuation = re.compile('[%s]' % re.escape('!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~')) for x in main_file: if "product/productId: " in x: reviews.append(str(count)+",") str_entry = x.replace("product/productId: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append(str_entry+",") elif "product/title: " in x: str_entry = x.replace("product/title: ","").replace("\\","\\\\").replace('"',quote).strip('\n') terms = replace_punctuation.sub(" ", str_entry) terms = terms.split(" ") for term in terms: if len(term)>2: pterms.append(term.lower()+",") pterms.append(str(count)+"\n") reviews.append('"'+str_entry+'",') elif "product/price: " in x: str_entry = x.replace("product/price: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append(str_entry+",") elif "review/userId: " in x: str_entry = x.replace("review/userId: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append(str_entry+",") elif "review/profileName: " in x: str_entry = x.replace("review/profileName: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append('"'+str_entry+'",') elif "review/helpfulness: " in x: str_entry = x.replace("review/helpfulness: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append(str_entry+",") elif "review/score: " in x: str_entry = x.replace("review/score: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append(str_entry+",") scores.append(str(str_entry)+",") scores.append(str(count)+"\n") elif "review/time: " in x: str_entry = x.replace("review/time: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append(str_entry+",") elif "review/summary: " in x: str_entry = x.replace("review/summary: ","").replace("\\","\\\\").replace('"',quote).strip('\n') terms = replace_punctuation.sub(" ", str_entry) terms = terms.split(" ") for term in terms: if len(term)>2: rterms.append(term.lower()+",") rterms.append(str(count)+"\n") reviews.append('"'+str_entry+'",') elif "review/text: " in x: str_entry = x.replace("review/text: ","").replace("\\","\\\\").replace('"',quote).strip('\n') reviews.append('"'+str_entry+'"\n') terms = replace_punctuation.sub(" ", str_entry) terms = terms.split(" ") for term in terms: if len(term)>2: rterms.append(term.lower()+",") rterms.append(str(count)+"\n") count+=1 with open("pterms.txt","a") as pterms_file: for entry in pterms: pterms_file.write(entry) with open("scores.txt","a") as scores_file: for entry in scores: scores_file.write(str(entry)) with open("rterms.txt","a") as rterms_file: for entry in rterms: rterms_file.write(str(entry)) with open("reviews.txt","a") as reviews_file: for entry in reviews: reviews_file.write(entry) if __name__ == "__main__": parseData(sys.stdin)
apache-2.0
6,690,061,454,957,329,000
37.909091
112
0.525123
false
boada/ICD
sandbox/legacy_plot_code/plot_sersic_vs_icd_vs_colorgrad.py
1
2047
#!/usr/bin/env python # File: plot_sersic_vs_icd_vs_colorgrad.py # Created on: Thu 27 Sep 2012 11:09:10 AM CDT # Last Change: Fri Sep 28 16:28:36 2012 # Purpose of script: <+INSERT+> # Author: Steven Boada import pylab as pyl from mk_galaxy_struc import mk_galaxy_struc def plot_sersic_vs_icd_vs_colorgrad(): galaxies = mk_galaxy_struc() f1 = pyl.figure(1,figsize=(8,8)) f1s1 = f1.add_subplot(221) f1s2 = f1.add_subplot(222) f1s3 = f1.add_subplot(223) f1s4 = f1.add_subplot(224) for galaxy in galaxies: if galaxy.ston_I >30. and galaxy.sersic != None: if 1.5 < galaxy.z and galaxy.z < 2.: col1 =f1s1.scatter(galaxy.sersic, galaxy.ICD_IH, s=50, c='r', edgecolor='w',zorder=0,lw='1',label='1.5 < z < 2') if 2. < galaxy.z and galaxy.z < 2.5: col2 =f1s2.scatter(galaxy.sersic, galaxy.ICD_IH, s=50, c='g', edgecolor='w',zorder=1) if 2.5 < galaxy.z and galaxy.z < 3.: col3 =f1s3.scatter(galaxy.sersic, galaxy.ICD_IH, s=50, c='b', edgecolor='w',zorder=2) if 3. < galaxy.z and galaxy.z < 3.5: col4 =f1s4.scatter(galaxy.sersic, galaxy.ICD_IH, s=50, c='k', edgecolor='w',zorder=3) f1s1.set_xlim(-0.1,8.5) f1s1.set_ylim(-0.01,0.3) f1s2.set_xlim(-0.1,8.5) f1s2.set_ylim(-0.01,0.3) f1s3.set_xlim(-0.1,8.5) f1s3.set_ylim(-0.01,0.3) f1s4.set_xlim(-0.1,8.5) f1s4.set_ylim(-0.01,0.3) f1s1.legend([col1],['1.5 < z < 2'],scatterpoints=1) f1s2.legend([col2],['2 < z < 2.5'],scatterpoints=1) f1s3.legend([col3],['2.5 < z < 3'],scatterpoints=1) f1s4.legend([col4],['3 < z < 3.5'],scatterpoints=1) f1s1.set_xlabel('Sersic Index, n') f1s1.set_ylabel(r'$\xi[I,H]$') #pyl.subplots_adjust(left=0.15,bottom=0.15) pyl.savefig('sersic_vs_icd_vs_colorgrad.eps',bbox='tight') pyl.show() if __name__ == '__main__': plot_sersic_vs_icd_vs_colorgrad()
mit
-1,664,582,735,678,096,600
33.694915
77
0.562775
false
lpfann/fri
fri/model/classification.py
1
4248
import cvxpy as cvx import numpy as np from sklearn import preprocessing from sklearn.metrics import fbeta_score, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.utils import check_X_y from sklearn.utils.multiclass import unique_labels from fri.model.base_cvxproblem import Relevance_CVXProblem from fri.model.base_initmodel import InitModel from .base_type import ProblemType class Classification(ProblemType): @classmethod def parameters(cls): return ["C"] @property def get_initmodel_template(cls): return Classification_SVM @property def get_cvxproblem_template(cls): return Classification_Relevance_Bound def relax_factors(cls): return ["loss_slack", "w_l1_slack"] def preprocessing(self, data, **kwargs): X, y = data # Check that X and y have correct shape X, y = check_X_y(X, y) # Store the classes seen during fit classes_ = unique_labels(y) if len(classes_) > 2: raise ValueError("Only binary class data supported") # Negative class is set to -1 for decision surface y = preprocessing.LabelEncoder().fit_transform(y) y[y == 0] = -1 return X, y class Classification_SVM(InitModel): def __init__(self, C=1): super().__init__() self.C = C def fit(self, X, y, **kwargs): (n, d) = X.shape C = self.get_params()["C"] w = cvx.Variable(shape=(d), name="w") slack = cvx.Variable(shape=(n), name="slack") b = cvx.Variable(name="bias") objective = cvx.Minimize(cvx.norm(w, 1) + C * cvx.sum(slack)) constraints = [cvx.multiply(y.T, X @ w + b) >= 1 - slack, slack >= 0] # Solve problem. problem = cvx.Problem(objective, constraints) problem.solve(**self.SOLVER_PARAMS) w = w.value b = b.value slack = np.asarray(slack.value).flatten() self.model_state = {"w": w, "b": b, "slack": slack} loss = np.sum(slack) w_l1 = np.linalg.norm(w, ord=1) self.constraints = {"loss": loss, "w_l1": w_l1} return self def predict(self, X): w = self.model_state["w"] b = self.model_state["b"] y = np.dot(X, w) + b >= 0 y = y.astype(int) y[y == 0] = -1 return y def score(self, X, y, **kwargs): prediction = self.predict(X) # Negative class is set to -1 for decision surface y = LabelEncoder().fit_transform(y) y[y == 0] = -1 # Using weighted f1 score to have a stable score for imbalanced datasets score = fbeta_score(y, prediction, beta=1, average="weighted") if "verbose" in kwargs: return classification_report(y, prediction) return score class Classification_Relevance_Bound(Relevance_CVXProblem): def init_objective_UB(self, sign=None, **kwargs): self.add_constraint( self.feature_relevance <= sign * self.w[self.current_feature] ) self._objective = cvx.Maximize(self.feature_relevance) def init_objective_LB(self, **kwargs): self.add_constraint( cvx.abs(self.w[self.current_feature]) <= self.feature_relevance ) self._objective = cvx.Minimize(self.feature_relevance) def _init_constraints(self, parameters, init_model_constraints): # Upper constraints from initial model l1_w = init_model_constraints["w_l1"] init_loss = init_model_constraints["loss"] C = parameters["C"] # New Variables self.w = cvx.Variable(shape=(self.d), name="w") self.b = cvx.Variable(name="b") self.slack = cvx.Variable(shape=(self.n), nonneg=True, name="slack") # New Constraints distance_from_plane = cvx.multiply(self.y, self.X @ self.w + self.b) self.loss = cvx.sum(self.slack) self.weight_norm = cvx.norm(self.w, 1) self.add_constraint(distance_from_plane >= 1 - self.slack) self.add_constraint(self.weight_norm <= l1_w) self.add_constraint(C * self.loss <= C * init_loss) self.feature_relevance = cvx.Variable(nonneg=True, name="Feature Relevance")
mit
701,556,549,206,578,400
30.235294
84
0.606403
false
Kamalheib/senty
senty/tools/run_multi.py
1
2953
#!/usr/bin/evn python """ Senty Project Copyright(c) 2017 Senty. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Kamal Heib <[email protected]> """ import sys from argparse import ArgumentParser from senty.modules.host import Host from senty.utils.logger import Logger class RunMulti(object): def get_logger(self): if not hasattr(self, "_logger"): self._logger = Logger(self.__class__.__name__, self.verbose) return self._logger def get_parser(self): if not hasattr(self, "_parser"): self._parser = ArgumentParser(self.__class__.__name__) return self._parser def get_hosts(self): if not hasattr(self, '_hosts'): self._hosts = set() for ip in self.ips: self._hosts.add(Host(ip, self.Logger, 0)) return self._hosts def run_commands(self): self._hostToPid = {} for host in self.Hosts: self._hostToPid[host] = host.run_process(self.command) def validate_commands(self): for host in self.Hosts: (rc, out) = host.wait_process(self._hostToPid[host]) if rc: self.Logger.pr_err("-E- Failed to run %s on %s" % (self.command, host.IP)) self.Logger.pr_info("-" * 60) self.Logger.pr_info("--==%s==--" % host.IP) self.Logger.pr_info("-" * 60) self.Logger.pr_info("%s" % out) self.Logger.pr_info("-" * 60) def parse_args(self, args): self.Parser.add_argument('-v', '--verbose', help='log message level', default='info', choices=['info', 'debug']) self.Parser.add_argument('-c', '--command', help='command to run on multi hosts') self.Parser.add_argument('-i', '--ips', help='list of ip addresses or hosts', nargs='+') self.Parser.parse_args(namespace=self, args=args) def execute(self, args): self.parse_args(args) self.run_commands() self.validate_commands() Logger = property(get_logger) Parser = property(get_parser) Hosts = property(get_hosts) if __name__ == '__main__': run_multi = RunMulti() run_multi.execute(sys.argv[1:])
gpl-2.0
308,224,784,992,616,700
32.179775
90
0.604809
false
the0forge/sp
frontend/views/myob.py
1
4311
from decimal import Decimal from django.http import HttpResponse from django.views.generic import ListView from db_settings.models import Settings as db_s from ..mixins import MyobMixin from ..models import Customer, Order class ServiceSaleList(MyobMixin, ListView): model = Order fields = ( 'Co./Last Name', 'First Name', 'Addr 1 - Line 1', '- Line 2', '- Line 3', '- Line 4', 'Invoice #', 'Date', 'Customer PO', 'Account #', 'Amount', 'Inc-Tax Amount', 'Tax Code', 'Non-GST Amount', 'GST Amount', 'Freight Amount', 'Card ID', 'Record ID' ) def get(self, request, *args, **kwargs): ret = ['\t'.join(self.fields)] myob_account = db_s.objects.get(key='myob_account').value myob_tax_code = db_s.objects.get(key='myob_tax_code').value myob_use_addr_only = db_s.objects.get(key='myob_use_addr_only').value myob_use_name_in_addr = db_s.objects.get(key='myob_use_name_in_addr').value for ob in self.get_queryset(): data = [ ob.customer.parsed_name['l'], ob.customer.parsed_name['f'], '', # 2 see below '', # 3 ... '', # 4 ... '', # 5 see below getattr(ob.last_invoice, 'number', 'no-invoice'), ob.order_date.strftime("%d.%m.%Y"), ob.id, myob_account, '$%s' % ob.summary['gross_price'], '$%s' % ob.summary['net_price'], myob_tax_code, '$%s' % ob.summary['gross_price'], '$%s' % ob.summary['tax'], '$%s' % ob.shipping_cost, "*None", "*None", ] if bool(myob_use_addr_only): if bool(myob_use_name_in_addr): data[2] = ob.customer.name data[3] = ob.customer.address_line_1 data[4] = ob.customer.address_line_2 else: data[2] = ob.customer.address_line_1 data[3] = ob.customer.address_line_2 else: if bool(myob_use_name_in_addr): data[2] = ob.customer.name data[3] = ob.customer.address_line_1 data[4] = '%s %s %s' % (ob.customer.suburb, ob.customer.state, ob.customer.postcode) data[5] = ob.customer.address_line_2 else: data[2] = '%s, %s' % (ob.customer.address_line_1, ob.customer.address_line_2) data[3] = ob.customer.suburb data[4] = ob.customer.state data[5] = ob.customer.postcode data = map(lambda x: str(x) if isinstance(x, (int, Decimal)) else x, data) ret.append('\t'.join(data)) return HttpResponse('\n\n'.join(ret), mimetype='text/plain') service_sale_list = ServiceSaleList.as_view() class CustomerList(MyobMixin, ListView): model = Customer fields = ( 'Co./Last Name', 'First Name', 'Card ID', 'Addr 1 - Line 1', '- Line 2', '- Line 3', '- Line 4', '- City', '- State', '- Postcode', '- Country', '- Phone # 1', '- Email', '- Contact Name' ) def get(self, request, *args, **kwargs): ret = ['\t'.join(self.fields)] for ob in self.get_queryset(): data = [ ob.parsed_name['l'], ob.parsed_name['f'], ob.from_src_company_id or "*None", ob.address_line_1, ob.address_line_2, '', '', ob.suburb, ob.state, ob.postcode, ob.country, ob.telephone, ob.email, ob.contacts_data ] data = map(lambda x: str(x) if isinstance(x, (int, Decimal)) else x, data) ret.append('\t'.join(data)) return HttpResponse('\n\n'.join(ret), mimetype='text/plain') customer_list = CustomerList.as_view()
gpl-3.0
1,502,494,154,253,292,500
31.421053
104
0.46161
false
glormph/msstitch
src/app/lookups/sqlite/base.py
1
23651
import sqlite3 mslookup_tables = {'biosets': ['set_id INTEGER PRIMARY KEY', 'set_name TEXT'], 'mzmlfiles': ['mzmlfile_id INTEGER PRIMARY KEY', 'mzmlfilename TEXT', 'set_id INTEGER', 'FOREIGN KEY(set_id)' 'REFERENCES biosets' ], 'mzml': ['spectra_id TEXT PRIMARY KEY', 'mzmlfile_id INTEGER', 'scan_sid TEXT', 'charge INTEGER', 'mz DOUBLE', 'retention_time DOUBLE', 'ion_injection_time DOUBLE', 'FOREIGN KEY(mzmlfile_id)' 'REFERENCES mzmlfiles'], 'ioninjtime': ['spectra_id TEXT', 'ion_injection_time DOUBLE', 'FOREIGN KEY(spectra_id)' 'REFERENCES mzml', ], 'ionmob': ['spectra_id TEXT', 'ion_mobility DOUBLE', 'FOREIGN KEY(spectra_id)' 'REFERENCES mzml', ], 'isobaric_channels': ['channel_id INTEGER PRIMARY KEY', 'channel_name TEXT'], 'isobaric_quant': ['spectra_id TEXT', 'channel_id INTEGER', 'intensity REAL', 'FOREIGN KEY(spectra_id)' 'REFERENCES mzml', 'FOREIGN KEY(channel_id)' 'REFERENCES isobaric_channels' ], # ms1_quant has no spectra_id reference since it contains # features and Im not sure if they can be linked to # spectra_ids or that retention time is averaged 'ms1_quant': ['feature_id INTEGER PRIMARY KEY', 'mzmlfile_id INTEGER', 'retention_time REAL', 'mz REAL', 'charge INTEGER', 'intensity REAL', 'FOREIGN KEY(mzmlfile_id)' 'REFERENCES mzmlfiles'], 'ms1_fwhm': ['feature_id INTEGER PRIMARY KEY', 'fwhm REAL', 'FOREIGN KEY(feature_id)' 'REFERENCES ms1_quant'], 'ms1_align': ['spectra_id TEXT', 'feature_id INTEGER', 'FOREIGN KEY(spectra_id) ' 'REFERENCES mzml ' 'FOREIGN KEY(feature_id) ' 'REFERENCES ms1_quant', ], 'peptide_sequences': ['pep_id INTEGER PRIMARY KEY', 'sequence TEXT', ], 'psms': ['psm_id TEXT PRIMARY KEY NOT NULL', 'pep_id INTEGER', 'score TEXT', 'spectra_id TEXT', 'FOREIGN KEY(pep_id)' 'REFERENCES peptide_sequences ' 'FOREIGN KEY(spectra_id)' 'REFERENCES mzml' ], 'psmrows': ['psm_id TEXT', 'rownr INTEGER', 'FOREIGN KEY(psm_id) ' 'REFERENCES psms(psm_id)'], 'proteins': ['pacc_id INTEGER PRIMARY KEY', 'protein_acc TEXT UNIQUE'], 'gene_tables': ['genetable_id INTEGER PRIMARY KEY', 'set_id INTEGER', 'filename TEXT', 'FOREIGN KEY(set_id)' 'REFERENCES biosets'], 'protein_tables': ['prottable_id INTEGER PRIMARY KEY', 'set_id INTEGER', 'filename TEXT', 'FOREIGN KEY(set_id)' 'REFERENCES biosets'], 'peptide_tables': ['peptable_id INTEGER PRIMARY KEY', 'set_id INTEGER', 'filename TEXT', 'FOREIGN KEY(set_id)' 'REFERENCES biosets'], 'pepquant_channels': ['channel_id INTEGER PRIMARY KEY', 'peptable_id INTEGER', 'channel_name TEXT', 'amount_psms_name TEXT', 'FOREIGN KEY(peptable_id) ' 'REFERENCES ' 'peptide_tables(peptable_id)' ], 'peptide_iso_quanted': ['peptidequant_id' 'INTEGER PRIMARY KEY', 'pep_id INTEGER', 'channel_id INTEGER', 'quantvalue REAL', 'amount_psms INTEGER', 'FOREIGN KEY(pep_id) ' 'REFERENCES ' 'peptide_sequences(pep_id) ' 'FOREIGN KEY(channel_id) ' 'REFERENCES ' 'pepquant_channels(channel_id)' ], 'peptide_precur_quanted': ['pep_precquant_id INTEGER PRIMARY KEY', 'pep_id INTEGER', 'peptable_id INTEGER', 'quant REAL', 'FOREIGN KEY(pep_id) ' 'REFERENCES peptide_sequences(pep_id) ' 'FOREIGN KEY(peptable_id) ' 'REFERENCES peptide_tables(peptable_id)' ], 'peptide_fdr': ['pep_id INTEGER', 'peptable_id INTEGER', 'fdr DOUBLE', 'FOREIGN KEY(pep_id) ' 'REFERENCES peptide_sequences(pep_id) ' 'FOREIGN KEY(peptable_id) ' 'REFERENCES ' 'peptide_tables(peptable_id)' ], 'protein_precur_quanted': ['prot_precquant_id INTEGER PRIMARY KEY', 'pacc_id INTEGER', 'prottable_id INTEGER', 'quant REAL', 'FOREIGN KEY(pacc_id) ' 'REFERENCES proteins(pacc_id) ' 'FOREIGN KEY(prottable_id) ' 'REFERENCES protein_tables(prottable_id)' ], 'gene_precur_quanted': ['gene_precquant_id INTEGER PRIMARY KEY', 'gene_id INTEGER', 'genetable_id INTEGER', 'quant REAL', 'FOREIGN KEY(gene_id) ' 'REFERENCES genes(gene_id) ' 'FOREIGN KEY(genetable_id) ' 'REFERENCES gene_tables(genetable_id)' ], 'assoc_precur_quanted': ['gene_precquant_id INTEGER PRIMARY KEY', 'gn_id INTEGER', 'genetable_id INTEGER', 'quant REAL', 'FOREIGN KEY(gn_id) ' 'REFERENCES associated_ids(gn_id) ' 'FOREIGN KEY(genetable_id) ' 'REFERENCES gene_tables(genetable_id)' ], 'protein_iso_quanted': ['proteinquant_id ' 'INTEGER PRIMARY KEY', 'pacc_id INTEGER', 'channel_id INTEGER', 'quantvalue REAL', 'amount_psms INTEGER', 'FOREIGN KEY(pacc_id) ' 'REFERENCES proteins(pacc_id) ' 'FOREIGN KEY(channel_id) ' 'REFERENCES ' 'protquant_channels(channel_id)' ], 'gene_iso_quanted': ['genequant_id ' 'INTEGER PRIMARY KEY', 'gene_id INTEGER', 'channel_id INTEGER', 'quantvalue REAL', 'amount_psms INTEGER', 'FOREIGN KEY(gene_id) ' 'REFERENCES genes(gene_id) ' 'FOREIGN KEY(channel_id) ' 'REFERENCES ' 'genequant_channels(channel_id)' ], 'assoc_iso_quanted': ['genequant_id ' 'INTEGER PRIMARY KEY', 'gn_id INTEGER', 'channel_id INTEGER', 'quantvalue REAL', 'amount_psms INTEGER', 'FOREIGN KEY(gn_id) ' 'REFERENCES associated_ids(gn_id) ' 'FOREIGN KEY(channel_id) ' 'REFERENCES ' 'genequant_channels(channel_id)' ], 'genequant_channels': ['channel_id INTEGER PRIMARY KEY', 'genetable_id INTEGER', 'channel_name TEXT', 'amount_psms_name TEXT', 'FOREIGN KEY(genetable_id) ' 'REFERENCES ' 'gene_tables(genetable_id)' ], 'protquant_channels': ['channel_id INTEGER PRIMARY KEY', 'prottable_id INTEGER', 'channel_name TEXT', 'amount_psms_name TEXT', 'FOREIGN KEY(prottable_id) ' 'REFERENCES ' 'protein_tables(prottable_id)' ], 'gene_fdr': ['gene_id INTEGER', 'genetable_id INTEGER', 'fdr DOUBLE', 'FOREIGN KEY(gene_id) ' 'REFERENCES genes(gene_id) ' 'FOREIGN KEY(genetable_id) ' 'REFERENCES ' 'gene_tables(genetable_id)' ], 'assoc_fdr': ['gn_id INTEGER', 'genetable_id INTEGER', 'fdr DOUBLE', 'FOREIGN KEY(gn_id) ' 'REFERENCES associated_ids(gn_id) ' 'FOREIGN KEY(genetable_id) ' 'REFERENCES ' 'gene_tables(genetable_id)' ], 'protein_fdr': ['pacc_id INTEGER', 'prottable_id INTEGER', 'fdr DOUBLE', 'FOREIGN KEY(pacc_id) ' 'REFERENCES proteins(pacc_id) ' 'FOREIGN KEY(prottable_id) ' 'REFERENCES ' 'protein_tables(prottable_id)' ], 'protein_psm': ['protein_acc TEXT', 'psm_id TEXT', 'FOREIGN KEY(protein_acc) ' 'REFERENCES proteins(protein_acc) ' 'FOREIGN KEY(psm_id) ' 'REFERENCES psms(psm_id)'], 'protein_evidence': ['protein_acc TEXT', 'evidence_lvl REAL', 'FOREIGN KEY(protein_acc) ' 'REFERENCES ' 'proteins(protein_acc)'], 'protein_seq': ['protein_acc TEXT', 'sequence TEXT', 'FOREIGN KEY(protein_acc) ' 'REFERENCES ' 'proteins(protein_acc)'], 'protein_coverage': ['protein_acc TEXT', 'coverage REAL', 'FOREIGN KEY(protein_acc) ' 'REFERENCES ' 'proteins(protein_acc)'], 'protein_group_master': ['master_id INTEGER PRIMARY KEY', 'pacc_id INTEGER ', 'FOREIGN KEY(pacc_id) ' 'REFERENCES ' 'proteins(pacc_id)'], 'protein_group_content': ['protein_acc TEXT', 'master_id INTEGER', 'peptide_count INTEGER', 'psm_count INTEGER', 'protein_score INTEGER', 'FOREIGN KEY(protein_acc) ' 'REFERENCES ' 'proteins(protein_acc) ' 'FOREIGN KEY(master_id) ' 'REFERENCES ' 'protein_group_master' '(master_id)' ], 'psm_protein_groups': ['psm_id TEXT', 'master_id INTEGER', 'FOREIGN KEY(psm_id) REFERENCES' ' psms(psm_id)', 'FOREIGN KEY(master_id) REFERENCES' ' protein_group_master(master_id)'], 'genes': ['gene_id INTEGER PRIMARY KEY', 'gene_acc TEXT'], 'associated_ids': ['gn_id INTEGER PRIMARY KEY', 'assoc_id TEXT'], 'ensg_proteins': ['gene_id INTEGER', 'pacc_id INTEGER', 'FOREIGN KEY(gene_id) REFERENCES genes(gene_id) ' 'FOREIGN KEY(pacc_id) REFERENCES proteins(pacc_id)'], 'genename_proteins': ['gn_id INTEGER', 'pacc_id INTEGER', 'FOREIGN KEY(gn_id) REFERENCES genes(gene_id) ' 'FOREIGN KEY(pacc_id) REFERENCES proteins(pacc_id)'], 'prot_desc': ['pacc_id INTEGER', 'description TEXT', 'FOREIGN KEY(pacc_id) ' 'REFERENCES proteins(pacc_id)'], 'known_searchspace': ['seqs TEXT UNIQUE'], 'protein_peptides': ['seq TEXT', 'protid TEXT', 'pos INTEGER'], } """ PGM: master, gene, assoc, pgc, nrpgc, cov (all not pooled), put it in a dict lookup? SELECT pgm.protein_acc, g.gene_acc, aid.assoc_id, cov.coverage, sub.pgc, sub.pgcnr from protein_group_master as pgm left outer join (select master_id, group_concat(protein_acc) as pgc, count(protein_acc) as pgcnr from protein_group_content group by master_id) as sub on sub.master_id=pgm.master_id left outer join protein_coverage as cov on pgm.protein_acc=cov.protein_acc left outer join genes as g on pgm.protein_acc=g.protein_acc left outer join associated_ids as aid on aid.protein_acc=pgm.protein_acc; nr_psm, nrpep nrunipep: (FIXME add FDR, quant ----- SELECT master_id, bs.set_id, COUNT(DISTINCT ppg.psm_id), COUNT (DISTINCT ps.pep_id), COUNT(DISTINCT uni.pep_id), FROM psm_protein_groups AS ppg INNER JOIN psms ON ppg.psm_id=psms.psm_id INNER JOIN peptide_sequences AS ps ON psms.pep_id=ps.pep_id INNER JOIN mzml ON mzml.spectra_id=psms.spectra_id INNER JOIN mzmlfiles AS mzf ON mzf.mzmlfile_id=mzml.mzmlfile_id INNER JOIN biosets AS bs ON bs.set_id=mzf.set_id LEFT OUTER JOIN ( SELECT ppg.pep_id AS pep_id from ( SELECT psms.pep_id AS pep_id, COUNT (DISTINCT ppg.master_id) AS nrpep FROM psm_protein_groups AS ppg INNER JOIN psms USING(psm_id) GROUP BY psms.pep_id ) AS ppg WHERE ppg.nrpep==1 ) AS uni ON uni.pep_id=ps.pep_id GROUP BY master_id, bs.set_id select * from (select master_id, bs.set_id, count(ppg.psm_id) as nrpsm, COUNT(distinct ps.pep_id) AS nrpep from psm_protein_groups AS ppg inner join psms ON ppg.psm_id=psms.psm_id inner join peptide_sequences AS ps on psms.pep_id=ps.pep_id inner join mzml on mzml.spectra_id=psms.spectra_id inner join mzmlfiles as mzf on mzf.mzmlfile_id=mzml.mzmlfile_id inner join biosets as bs on bs.set_id=mzf.set_id group by master_id, bs.set_id) as sub where sub.nrpsm>2 ; nr_unipep, fdr, ms1, iso select pgm.protein_acc from protein_group_master as pgm inner join (select psm_id from group_concat(protein_psm) group_by protein_acc) as pp on pgm.protein_acc=pp.protein_acc; """ class DatabaseConnection(object): def __init__(self, fn=None): """SQLite connecting when given filename""" self.fn = fn if self.fn is not None: self.connect(self.fn) def get_fn(self): """Returns lookup filename""" return self.fn def create_tables(self, tables): """Creates database tables in sqlite lookup db""" cursor = self.get_cursor() for table in tables: columns = mslookup_tables[table] try: cursor.execute('CREATE TABLE {0}({1})'.format( table, ', '.join(columns))) except sqlite3.OperationalError as error: print('WARNING: Table {} already exists in database, will ' 'add to existing tables instead of creating ' 'new.'.format(table)) else: self.conn.commit() def connect(self, fn): """SQLite connect method initialize db""" self.conn = sqlite3.connect(fn) cur = self.get_cursor() cur.execute('PRAGMA page_size=4096') cur.execute('PRAGMA FOREIGN_KEYS=ON') cur.execute('PRAGMA cache_size=10000') cur.execute('PRAGMA journal_mode=MEMORY') cur.execute('PRAGMA synchronous=OFF') def get_cursor(self): """Quickly get cursor, abstracting connection""" return self.conn.cursor() def close_connection(self): """Close connection to db, abstracts connection object""" self.conn.close() def index_column(self, index_name, table, column, unique=False): """Called by interfaces to index specific column in table""" cursor = self.get_cursor() unique = 'unique' if unique else '' try: cursor.execute( 'CREATE {3} INDEX {0} on {1}({2})'.format(index_name, table, column, unique)) except sqlite3.OperationalError as error: # Skipping index creation and assuming it exists already pass else: self.conn.commit() def get_inclause(self, inlist): """Returns SQL IN clauses""" return 'IN ({0})'.format(', '.join('?' * len(inlist))) def get_sql_select(self, columns, table, distinct=False): """Creates and returns an SQL SELECT statement""" sql = 'SELECT {0} {1} FROM {2}' dist = {True: 'DISTINCT', False: ''}[distinct] return sql.format(dist, ', '.join(columns), table) def store_many(self, sql, values): """Abstraction over executemany method""" cursor = self.get_cursor() cursor.executemany(sql, values) self.conn.commit() def execute_sql(self, sql): """Executes SQL and returns cursor for it""" cursor = self.get_cursor() cursor.execute(sql) return cursor class ResultLookupInterface(DatabaseConnection): """Connection subclass shared by result lookup interfaces that need to query the database when storing to get e.g. spectra id""" def get_mzmlfile_map(self): """Returns dict of mzmlfilenames and their db ids""" cursor = self.get_cursor() cursor.execute('SELECT mzmlfile_id, mzmlfilename FROM mzmlfiles') return {fn: fnid for fnid, fn in cursor.fetchall()} def get_spectra_id(self, fn_id, retention_time=None, scan_nr=None): """Returns spectra id for spectra filename and retention time""" cursor = self.get_cursor() sql = 'SELECT spectra_id FROM mzml WHERE mzmlfile_id=? ' values = [fn_id] if retention_time is not None: sql = '{0} AND retention_time=?'.format(sql) values.append(retention_time) if scan_nr is not None: sql = '{0} AND scan_nr=?'.format(sql) values.append(scan_nr) cursor.execute(sql, tuple(values)) return cursor.fetchone()[0]
mit
-7,248,918,378,110,086,000
51.792411
507
0.394825
false
navjeet0211/Mara
Biology.py
1
4649
#!/usr/bin/python # -*- coding: ISO-8859-1 -*- """ Mara.Biology (v. 2.1): Biologically involved general algorithms. Requirements: Python 2.2-> Author: Martti Louhivuori ([email protected]) Date: 13.2.2006 --- Copyright (C) 2006 Martti Louhivuori This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. The full license text can be found in the file LICENSE. """ from Mara import loki from Mara.Library import omniTranslator, libLingua import os, sys def parse_sequence(args): """ Parse an amino acid sequence. Arguments: args -- a list of sequence items or a name of a file containing them, e.g. 'GLU PRO GLU CYS' or 'EPEC GLK C EEK' Returns: sequence -- a list of 3-letter amino acid symbols """ loki.debug('parse_sequence < %s' % repr(args)) if isinstance(args, str) and os.path.isfile(args): fname = args elif len(args) == 1 and isinstance(args[0], str): if os.path.isfile(args[0]): fname = args[0] else: if args[0].count(' '): args = args[0].split() else: args = args[0] fname = None else: fname = None if fname: f = open(fname) seq = f.read() f.close() loki.info("Read sequence from file '%s'." % fname) args = seq.strip().split() loki.debug('args=%s' % repr(args)) # sequence = [] # for aa in seq.strip().split(): # try: # sequence.append(omniTranslator(aa.capitalize(), \ # '3-letter-aminoacids')) # except KeyError: # loki.warn("Discarding unknown aminoacid '%s'." % repr(aa)) # else: # check whether all the sequence items are separated from each other args = [x.capitalize() for x in args] separated = True for a in args: if not (a in libLingua.dictionaryAmino1 or \ a in libLingua.dictionaryAmino3): separated = False loki.debug('separated=%s' % repr(separated)) sequence = [] if separated: # append each item after converting it to a 3-letter symbol for a in args: try: sequence.append(omniTranslator(a.capitalize(), \ '3-letter-aminoacids')) except KeyError: loki.warn("Discarding unknown aminoacid '%s'." % repr(a)) else: # jam all symbols together (hope they are all 1-letter symbols) aa = '' for a in args: aa += str(a) aa = aa.replace(' ', '') loki.debug('aa=%s' % repr(aa)) # append each item after converting it to a 3-letter symbol for a in list(aa): try: sequence.append(omniTranslator(a, '3-letter-aminoacids')) except KeyError: loki.warn("Discarding unknown aminoacid '%s'." % repr(a)) loki.debug('parse_sequence > %s' % repr(sequence)) return sequence def write_sequence(seq, output=None, format='long', use_translator=True): """ Write amino acid sequence in the desired format either to a stream given or to STDOUT. TODO: - add some flexibility to the output formats """ if output is None: output = sys.stdout if format == 'long': l = [] while seq: s = seq.pop(0) if use_translator: s = omniTranslator(s.capitalize(), '3-letter-aminoacids') l.append(s.upper()) if len(l) == 15: output.write(' '.join(l) + '\n') l = [] output.write(' '.join(l) + '\n') elif format == 'full': l = [] while seq: s = seq.pop(0) if use_translator: s = omniTranslator(s.capitalize(), 'english') l.append(s) output.write(' '.join(l) + '\n') else: l = [] w = '' while seq: s = seq.pop(0) if use_translator: s = omniTranslator(s.capitalize(), '1-letter-aminoacids') w += s.upper() if len(w) == 10: l.append(w) w = '' if len(l) == 6: output.write(' '.join(l) + '\n') l = [] if w: l.append(w) output.write(' '.join(l) + '\n')
gpl-2.0
-230,963,870,419,947,460
30.842466
76
0.524844
false
antevens/listen
listen/__init__.py
1
1470
#!/usr/bin/python # -*- coding: utf8 -*- """ The MIT License (MIT) Copyright (c) 2014 Antonia Stevens Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # Prepare for deprication of versions < 2.7 #from __future__ import print_function # This API requires Python 2.7 or more recent #import sys #if sys.version < "2.7.0": # print("listen requires Python 2.7 or more recent") # sys.exit(1) from listen.signal_handler import SignalHandler __all__ = ["listen"] __version__ = "0.1.7"
mit
-3,473,648,762,597,402,000
35.75
85
0.762585
false
jeffbuttars/upkg
upkg/cmds/install.py
1
1925
import logging logger = logging.getLogger('upkg') import os from upkg.cmds.base import BaseCmd from upkg.conf import settings from upkg.lib import Repo class Cmd(BaseCmd): """Docstring for Search """ name = 'install' help_text = ("install upkg") aliases = ['in', 'ins', 'inst', 'insta', 'instal'] def build(self): """todo: Docstring for build :return: :rtype: """ self._cmd_parser.add_argument( 'install', type=str, default=None, nargs="+", help=(""), ) self._cmd_parser.add_argument( '-l', '--location', default=None, help=("Specify the installation location. ") ) return super(Cmd, self).build() def install_repo(self, repo, location=''): """todo: Docstring for install_repo :param repo: arg description :type repo: type description :return: :rtype: """ dst = location or settings.upkg_destdir # make sure the destination dir exists. if not os.path.exists(dst): os.makedirs(dst) # repo = Repo(url=repo, repo_dir=dst) repo = Repo(url=repo) repo.install() return repo def exec(self, args): """todo: Docstring for exec :param args: arg description :type args: type description :return: :rtype: """ logger.debug("install %s, location %s", args.install, args.location) location = args.location and os.path.abspath(args.location) if location and len(args) > 1: raise Exception(("You cannot specify multiple install packages when " "using the --location option." )) for repo in args.install: self.install_repo(repo, location)
gpl-2.0
-6,470,076,270,604,769,000
23.367089
81
0.532468
false
kensho-technologies/graphql-compiler
graphql_compiler/tests/test_schema_fingerprint.py
1
17562
# Copyright 2020-present Kensho Technologies, LLC. import unittest from graphql import build_ast_schema, parse, print_schema import pytest from ..schema import compute_schema_fingerprint from .test_helpers import compare_graphql def _compute_schema_text_fingerprint(schema_text: str): """Parse the schema text and compute the fingerprint of the GraphQLSchema.""" return compute_schema_fingerprint(build_ast_schema(parse(schema_text))) def _assert_equal_fingerprints(test_case: unittest.TestCase, schema_text1: str, schema_text2: str): """Parse the schema texts and assert that the schemas have the same fingerprint.""" fingerprint1 = _compute_schema_text_fingerprint(schema_text1) fingerprint2 = _compute_schema_text_fingerprint(schema_text2) test_case.assertEqual(fingerprint1, fingerprint2) def _assert_not_equal_fingerprints( test_case: unittest.TestCase, schema_text1: str, schema_text2: str ): """Parse the schema texts and assert that the schemas do not have the same fingerprint.""" fingerprint1 = _compute_schema_text_fingerprint(schema_text1) fingerprint2 = _compute_schema_text_fingerprint(schema_text2) test_case.assertNotEqual(fingerprint1, fingerprint2) class SchemaFingerprintTests(unittest.TestCase): def test_schema_fingerprint_basic(self): schema_text = """ type Object{ field2: String field1: String field4: String field3: String } """ schema = build_ast_schema(parse(schema_text)) fingerprint = compute_schema_fingerprint(schema) # Assert that compute_schema_fingerprint does not modify the original schema. compare_graphql(self, schema_text, print_schema(schema)) # Assert that compute_schema_fingerprint disregards field order. reordered_schema_text = """ type Object{ field1: String field3: String field4: String field2: String } """ self.assertEqual(_compute_schema_text_fingerprint(reordered_schema_text), fingerprint) # Assert that the computed fingerprint is not the same if we add a new field. schema_text_with_added_field = """ type Object{ field1: String field3: String field4: String field2: String field5: String } """ schema_with_added_field_fingerprint = _compute_schema_text_fingerprint( schema_text_with_added_field ) self.assertNotEqual(schema_with_added_field_fingerprint, fingerprint) def test_different_field_type(self): schema_text1 = """ type Object { field: String } """ schema_text2 = """ type Object { field: Int } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_field_argument_order(self): schema_text1 = """ type Object { field(a: Int, b: String): String } """ schema_text2 = """ type Object { field(b: String, a: Int): String } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_field_arguments(self): schema_text1 = """ type Object { field(a: Int, b: String): String } """ schema_text2 = """ type Object { field(a: Int): Int } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_enum_order(self): schema_text1 = """ enum Enum{ A B } """ schema_text2 = """ enum Enum{ B A } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_enum(self): schema_text1 = """ enum Enum{ A B } """ schema_text2 = """ enum Enum{ A } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_input_order(self): schema_text1 = """ input Input { a: Int b: Int } """ schema_text2 = """ input Input { b: Int a: Int } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_input(self): schema_text1 = """ input Input { a: Int b: Int } """ schema_text2 = """ input Input { a: Int } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_field_equivalency_with_type_extension(self): schema_text1 = """ type Object { a: Int } extend type Object { b: Int } """ schema_text2 = """ type Object { a: Int b: Int } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_interface_equivalency_with_type_extension(self): schema_text1 = """ type Object { a: Int } extend type Object { b: Int } """ schema_text2 = """ type Object { a: Int b: Int } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_type_extensions(self): schema_text1 = """ type Object { a: Int } extend type Object { b: Int } """ schema_text2 = """ type Object { a: Int } extend type Object { c: Int } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_interface_implementation_order(self): schema_text1 = """ type Object implements Interface1 & Interface2 { field1: String field2: String } interface Interface1 { field1: String } interface Interface2 { field1: String } """ schema_text2 = """ type Object implements Interface2 & Interface1 { field1: String field2: String } interface Interface1 { field1: String } interface Interface2 { field1: String } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_interface_implementation(self): schema_text1 = """ type Object implements Interface1 & Interface2 { field1: String field2: String } interface Interface1 { field1: String } interface Interface2 { field1: String } """ schema_text2 = """ type Object implements Interface1 { field1: String field2: String } interface Interface1 { field1: String } interface Interface2 { field1: String } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_union_definition_order(self): schema_text1 = """ type Object1 { field1: String } type Object2 { field2: String } union UnionType = Object1 | Object2 """ schema_text2 = """ type Object1 { field1: String } type Object2 { field2: String } union UnionType = Object2 | Object1 """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_union_definitions(self): schema_text1 = """ type Object1 { field1: String } type Object2 { field2: String } union UnionType = Object1 | Object2 """ schema_text2 = """ type Object1 { field1: String } type Object2 { field2: String } union UnionType = Object1 """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_top_level_type_order(self): schema_text1 = """ scalar Date directive @output on FIELD type Object { field1: String } union UnionType = Object """ schema_text2 = """ directive @output on FIELD scalar Date union UnionType = Object type Object { field1: String } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_top_level_types(self): schema_text1 = """ scalar Date directive @output on FIELD type Object { field1: String } union UnionType = Object """ schema_text2 = """ scalar Date directive @output on FIELD type Object { field1: String } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_schema_operation_order(self): schema_text1 = """ schema { query: RootSchemaQuery mutation: RootSchemaMutation } type Object1 { field1: String } type RootSchemaQuery { Object1: [Object1] } type RootSchemaMutation { Object1: [Object1] } """ schema_text2 = """ schema { mutation: RootSchemaMutation query: RootSchemaQuery } type Object1 { field1: String } type RootSchemaQuery { Object1: [Object1] } type RootSchemaMutation { Object1: [Object1] } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_schema_operations(self): schema_text1 = """ schema { query: RootSchemaQuery mutation: RootSchemaMutation } type Object1 { field1: String } type RootSchemaQuery { Object1: [Object1] } type RootSchemaMutation { Object1: [Object1] } """ schema_text2 = """ schema { query: RootSchemaQuery } type Object1 { field1: String } type RootSchemaQuery { Object1: [Object1] } type RootSchemaMutation { Object1: [Object1] } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_description_change(self): schema_text1 = """ \"\"\" Description 1 \"\"\" scalar Date """ schema_text2 = """ \"\"\" Description 2 \"\"\" scalar Date """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_different_deprecation_reason(self): schema_text1 = """ type Object { field: String @deprecated(reason: "Reason 1") } """ schema_text2 = """ type Object { field: String @deprecated(reason: "Reason 2") } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_directive_definition_argument_order(self): schema_text1 = """ directive @filter( op_name: String! value: [String!] ) repeatable on FIELD | INLINE_FRAGMENT """ schema_text2 = """ directive @filter( value: [String!] op_name: String! ) repeatable on FIELD | INLINE_FRAGMENT """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_directive_arguments(self): schema_text1 = """ directive @filter( op_name: String! value: [String!] ) repeatable on FIELD | INLINE_FRAGMENT """ schema_text2 = """ directive @filter( op_name: String! ) repeatable on FIELD | INLINE_FRAGMENT """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_directive_definition_location_order(self): schema_text1 = """ directive @filter( op_name: String! value: [String!] ) repeatable on FIELD | INLINE_FRAGMENT """ schema_text2 = """ directive @filter( value: [String!] op_name: String! ) repeatable on INLINE_FRAGMENT | FIELD """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_different_directive_locations(self): schema_text1 = """ directive @filter( op_name: String! value: [String!] ) repeatable on FIELD | INLINE_FRAGMENT """ schema_text2 = """ directive @filter( op_name: String! value: [String!] ) repeatable on FIELD """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2) def test_argument_order_of_directives_at_field_definitions(self): schema_text1 = """ directive @custom_directive(a: Int, b: String) on FIELD_DEFINITION type Object { field: String @custom_directive(b: "a", a: 1) } """ schema_text2 = """ directive @custom_directive(a: Int, b: String) on FIELD_DEFINITION type Object { field: String @custom_directive(a: 1, b: "a") } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_reordered_directives_at_field_definition(self): schema_text1 = """ directive @custom_directive1(a: Int, b: String) on FIELD_DEFINITION directive @custom_directive2(c: Int, d: String) on FIELD_DEFINITION type Object { field: String @custom_directive1(a: 1, b: "a") @custom_directive2(c: 1, d: "a") } """ schema_text2 = """ directive @custom_directive1(a: Int, b: String) on FIELD_DEFINITION directive @custom_directive2(c: Int, d: String) on FIELD_DEFINITION type Object { field: String @custom_directive2(d: "a", c: 1) @custom_directive1(b: "a", a: 1) } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) def test_reordered_repeatable_directives_at_field_definition(self): schema_text1 = """ directive @custom_directive1(a: Int, b: String) repeatable on FIELD_DEFINITION type Object { field: String @custom_directive1(a: 1, b: "a") @custom_directive1(b: 0, a: "b") } """ schema_text2 = """ directive @custom_directive1(a: Int, b: String) repeatable on FIELD_DEFINITION type Object { field: String @custom_directive1(a: "b", b: 0) @custom_directive1(b: "a", a: 1) } """ _assert_equal_fingerprints(self, schema_text1, schema_text2) @pytest.mark.xfail( strict=True, reason=( "Directives at field definitions are lost when a schema is printed and parsed." "See https://github.com/graphql/graphql-js/issues/2389." ), ) def test_different_directives_at_field_definitions(self): schema_text1 = """ directive @custom_directive(a: Int) on FIELD_DEFINITION type Object { field: String @custom_directive(a: 1) } """ schema_text2 = """ directive @custom_directive(a: Int) on FIELD_DEFINITION type Object { field: String @custom_directive(a: 2) } """ _assert_not_equal_fingerprints(self, schema_text1, schema_text2)
apache-2.0
-2,442,469,511,526,436,000
27.371567
99
0.489067
false
seanandrews/casa-kit
ImportMS.py
1
1972
import numpy as np # original .ms file name oms_path = '../DR/ALMA_B6/2013.1.00226.S/science_goal.uid___A001_X122_X1f3/group.uid___A001_X122_X1f4/member.uid___A001_X122_X1f5/final' oms_name = 'continuum' mdl_name = oms_name+'.model' mkres = True res_name = oms_name+'.resid' # NAMING CONVENTIONS: If your data file is named 'name.ms', then you will # export it as 'name.vis.npz' and your corresponding model and residual files # will be 'name.model.ms' / 'name.model.vis.npz' and 'name.resid.ms' / # 'name.resid.vis.npz'. You can change this with 'mdl_name' and 'res_name'. # copy the data file into a model os.system('rm -rf '+mdl_name+'.ms') os.system('cp -r '+oms_path+'/'+oms_name+'.ms '+mdl_name+'.ms') # load the data tb.open(mdl_name+'.ms') data = tb.getcol("DATA") flag = tb.getcol("FLAG") tb.close() # Note the flagged columns flagged = np.all(flag, axis=(0, 1)) unflagged = np.squeeze(np.where(flagged == False)) # load the model file (presume this is just an array of complex numbers, with # the appropriate sorting/ordering in original .ms file; also assume that the # polarizations have been averaged, and that the model is unpolarized) mdl = (np.load(mdl_name+'.vis.npz'))['vis'] # replace the original data with the model data[:,:,unflagged] = mdl # now re-pack those back into the .ms tb.open(mdl_name+'.ms', nomodify=False) tb.putcol("DATA", data) tb.flush() tb.close() # now repeat this for the residual visibilities if you want if (mkres == True): os.system('rm -rf '+res_name+'.ms') os.system('cp -r '+oms_path+'/'+oms_name+'.ms '+res_name+'.ms') tb.open(res_name+'.ms') data = tb.getcol("DATA") flag = tb.getcol("FLAG") tb.close() flagged = np.all(flag, axis=(0, 1)) unflagged = np.squeeze(np.where(flagged == False)) mdl = (np.load(mdl_name+'.vis.npz'))['vis'] data[:,:,unflagged] -= mdl tb.open(res_name+'.ms', nomodify=False) tb.putcol("DATA", data) tb.flush() tb.close()
mit
7,792,717,028,530,996,000
30.301587
136
0.660243
false
flightcom/freqtrade
freqtrade/tests/strategy/test_strategy.py
1
4671
import json import logging import pytest from freqtrade.strategy.strategy import Strategy from freqtrade.analyze import parse_ticker_dataframe @pytest.fixture def result(): with open('freqtrade/tests/testdata/BTC_ETH-1.json') as data_file: return parse_ticker_dataframe(json.load(data_file)) def test_sanitize_module_name(): assert Strategy._sanitize_module_name('default_strategy') == 'default_strategy' assert Strategy._sanitize_module_name('default_strategy.py') == 'default_strategy' assert Strategy._sanitize_module_name('../default_strategy.py') == 'default_strategy' assert Strategy._sanitize_module_name('../default_strategy') == 'default_strategy' assert Strategy._sanitize_module_name('.default_strategy') == '.default_strategy' assert Strategy._sanitize_module_name('foo-bar') == 'foo-bar' assert Strategy._sanitize_module_name('foo/bar') == 'bar' def test_search_strategy(): assert Strategy._search_strategy('default_strategy') == '.' assert Strategy._search_strategy('super_duper') is None def test_strategy_structure(): assert hasattr(Strategy, 'init') assert hasattr(Strategy, 'minimal_roi') assert hasattr(Strategy, 'stoploss') assert hasattr(Strategy, 'populate_indicators') assert hasattr(Strategy, 'populate_buy_trend') assert hasattr(Strategy, 'populate_sell_trend') def test_load_strategy(result): strategy = Strategy() strategy.logger = logging.getLogger(__name__) assert not hasattr(Strategy, 'custom_strategy') strategy._load_strategy('default_strategy') assert not hasattr(Strategy, 'custom_strategy') assert hasattr(strategy.custom_strategy, 'populate_indicators') assert 'adx' in strategy.populate_indicators(result) def test_strategy(result): strategy = Strategy() strategy.init({'strategy': 'default_strategy'}) assert hasattr(strategy.custom_strategy, 'minimal_roi') assert strategy.minimal_roi['0'] == 0.04 assert hasattr(strategy.custom_strategy, 'stoploss') assert strategy.stoploss == -0.10 assert hasattr(strategy.custom_strategy, 'populate_indicators') assert 'adx' in strategy.populate_indicators(result) assert hasattr(strategy.custom_strategy, 'populate_buy_trend') dataframe = strategy.populate_buy_trend(strategy.populate_indicators(result)) assert 'buy' in dataframe.columns assert hasattr(strategy.custom_strategy, 'populate_sell_trend') dataframe = strategy.populate_sell_trend(strategy.populate_indicators(result)) assert 'sell' in dataframe.columns def test_strategy_override_minimal_roi(caplog): config = { 'strategy': 'default_strategy', 'minimal_roi': { "0": 0.5 } } strategy = Strategy() strategy.init(config) assert hasattr(strategy.custom_strategy, 'minimal_roi') assert strategy.minimal_roi['0'] == 0.5 assert ('freqtrade.strategy.strategy', logging.INFO, 'Override strategy \'minimal_roi\' with value in config file.' ) in caplog.record_tuples def test_strategy_override_stoploss(caplog): config = { 'strategy': 'default_strategy', 'stoploss': -0.5 } strategy = Strategy() strategy.init(config) assert hasattr(strategy.custom_strategy, 'stoploss') assert strategy.stoploss == -0.5 assert ('freqtrade.strategy.strategy', logging.INFO, 'Override strategy \'stoploss\' with value in config file: -0.5.' ) in caplog.record_tuples def test_strategy_override_ticker_interval(caplog): config = { 'strategy': 'default_strategy', 'ticker_interval': 60 } strategy = Strategy() strategy.init(config) assert hasattr(strategy.custom_strategy, 'ticker_interval') assert strategy.ticker_interval == 60 assert ('freqtrade.strategy.strategy', logging.INFO, 'Override strategy \'ticker_interval\' with value in config file: 60.' ) in caplog.record_tuples def test_strategy_fallback_default_strategy(): strategy = Strategy() strategy.logger = logging.getLogger(__name__) assert not hasattr(Strategy, 'custom_strategy') strategy._load_strategy('../../super_duper') assert not hasattr(Strategy, 'custom_strategy') def test_strategy_singleton(): strategy1 = Strategy() strategy1.init({'strategy': 'default_strategy'}) assert hasattr(strategy1.custom_strategy, 'minimal_roi') assert strategy1.minimal_roi['0'] == 0.04 strategy2 = Strategy() assert hasattr(strategy2.custom_strategy, 'minimal_roi') assert strategy2.minimal_roi['0'] == 0.04
gpl-3.0
5,067,658,529,072,646,000
32.12766
89
0.687219
false
maurov/xraysloth
sloth/fit/peakfit_lmfit.py
1
7232
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Simple peak fitting utility with Lmfit ====================================== Current fitting backend: Lmfit_ .. _Lmfit: https://lmfit.github.io/lmfit-py/ """ #: BASE import numpy as np from matplotlib.pyplot import cm #: LMFIT IMPORTS from lmfit.models import ConstantModel, VoigtModel #: SLOTH from sloth.utils.matplotlib import get_colors from sloth.utils.logging import getLogger _logger = getLogger("sloth.fit.peakfit_lmfit", level="INFO") def fit_peak( x, y, num=1, positions=[None], amplitudes=[None], widths=[None], expressions=None, bkgModel=None, peakModel=None, ): """peak fit with lmfit Description ----------- This peak fitting model is built to fit one to three peaks (with prefixes: 'p1_', 'p2_', 'p3_'). The main control parameter is the initial guess of the peaks positions. Notes ----- For the Gaussian function, amplitude means weighting factor multiplying a unit-normalized Gaussian, so that the maximum height at the centroid is Amplitude/(sqrt(2pi)*sigma), and that the full-width at half maximum is ~2.355 sigma. In the fit, amplitude, center, and sigma can be varied, while height and fwhm are reported values, derived from these quantities. To guess the Gaussian amplitude (A) from the peak maximum (H) and a guess width (W), one could use the simple relation:: A ~ 5.90 * H * W Parameters ---------- num : int number of peaks to fit: currently between 1 and 3 [1] positions : list of floats initial peaks positions amplitudes : list of floats initial peaks amplitudes widths : list of floats initial peaks widths expressions : None or dict parameters expressions bkgModel : None or lmfit.Model (optional) if None: ConstantModel peakModel : None or lmfit.Model (optional) if None: VoigtModel Returns ------- lmfit.fit object """ if num > 3: _logger.error("current model is limited to 3 peaks only!") return None if (len(positions) < num) or (len(amplitudes) < num) or (len(widths) < num): _logger.error("'positions'/'amplitudes'/'widths' < 'num'!") return None if bkgModel is None: bkgModel = ConstantModel if peakModel is None: peakModel = VoigtModel bkg = bkgModel(prefix="bkg_") pars = bkg.guess(y, x=x) pars["bkg_c"].set(y.min()) mod = bkg for ipk in range(num): pkPos = positions[ipk] pkAmp = amplitudes[ipk] pkW = widths[ipk] pfx = f"p{ipk+1}_" xmax = x[np.argmax(y)] ymax = y.max() if pkPos is None: _logger.info(f"{pfx} center guess at x={xmax}") pkPos = xmax positions[ipk] = pkPos if pkAmp is None: _logger.info(f"{pfx} amplitude guess at y={ymax}") pkAmp = ymax amplitudes[ipk] = pkAmp if pkW is None: pkW = 1 _logger.info(f"{pfx} width guess {pkW}") widths[ipk] = pkW pk = peakModel(prefix=pfx) pars.update(pk.make_params()) pars[f"{pfx}center"].set(pkPos) pars[f"{pfx}amplitude"].set(pkAmp) pars[f"{pfx}sigma"].set(pkW) #: force side peaks to stay same side of the main peak if not (ipk == 0): if pkPos < positions[0]: pars[f"{pfx}center"].set(pkPos, max=positions[0]) else: pars[f"{pfx}center"].set(pkPos, min=positions[0]) mod += pk #: set mathematical constraints if given if expressions is not None: assert type(expressions) is dict, "Expressions should be a dictionary" for key, value in expressions.items(): try: pars[key].set(expr=value) except KeyError: _logger.warning(f"[fit_peak] cannot set expression 'key':'value'") _logger.info("Running fit...") fitobj = mod.fit(y, pars, x=x) return fitobj def get_curves_fit(x, fitobj, components="p", with_initial_guess=False): """get a list of curves from the fit object Parameters ---------- x : array fitobj : lmfit.model.fit object components : False or str (optional) if give, include components starting with 'components' string default is 'p' (=peaks only) Returns ------- curves = [[x, y_best, {'legend': 'best fit', 'color': 'red'}] [x, y_initial, {'legend': 'initial guess', 'color': 'gray'}] [x, y_componentN], {'legend': 'component prefix N', 'color': 'pink'}] ] """ curves = [] curve_dict = { "legend": "best fit", "label": "best fit", "color": "red", "linewidth": 1, "linestyle": "-", } curve = [x, fitobj.best_fit, curve_dict] curves.append(curve) if with_initial_guess: guess_dict = { "legend": "initial guess", "label": "initial guess", "color": "gray", "linewidth": 0.5, "linestyle": "-", } curve = [x, fitobj.init_fit, guess_dict] curves.append(curve) if components: comps = fitobj.eval_components() _logger.debug(f"Available fit components are: {comps.keys()}") colors = get_colors(len(comps.keys()), colormap=cm.viridis) for icomp, kcomp in enumerate(comps.keys()): if kcomp.startswith(components): comp_dict = { "legend": f"{kcomp}", "label": f"{kcomp}", "color": colors[icomp], "linewidth": 1, "linestyle": "-", } curve = [x, comps[kcomp], comp_dict] curves.append(curve) return curves def main_test(): """Test and show example usage""" import matplotlib.pyplot as plt from lmfit.lineshapes import gaussian from lmfit.models import GaussianModel def _get_gauss(x, amp, cen, sigma, noise): signal = gaussian(x, amplitude=amp, center=cen, sigma=sigma) signal += noise * np.random.random(size=signal.shape) return signal x = np.linspace(-100, 100, 200) y1 = _get_gauss(x, 100, 0, 5, 0.2) y2 = _get_gauss(x, 60, -18, 10, 0.1) y3 = _get_gauss(x, 90, 10, 10, 0.2) y = 0.0015 * x + y1 + y2 + y3 figname = "test_peakfit_lmfit" ymax = y.max() xmax = x[np.argmax(y)] fitobj = fit_peak( x, y, num=3, positions=[xmax, xmax - 20, xmax + 17], amplitudes=[ymax, ymax / 2.0, ymax / 3.0], widths=[1, 1, 1], peakModel=GaussianModel, ) fit_curves = get_curves_fit(x, fitobj, with_initial_guess=True) #: plot plt.ion() plt.close(figname) fig, ax = plt.subplots(num=figname) ax.plot(x, y, label="data", color="black") for fc in fit_curves: ax.plot(fc[0], fc[1], label=fc[2]["legend"], color=fc[2]["color"]) ax.legend(loc="best") plt.show() return fig, ax if __name__ == "__main__": fig, ax = main_test()
bsd-3-clause
-7,739,106,189,221,876,000
28.279352
83
0.560149
false
kcwu/2048-python
ai_modules/kcwu2.py
1
16043
#!/usr/bin/env python # Copyright 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import time import random import math import array import multiprocessing search_depth = 4 KEY_LEFT = 'left' KEY_UP = 'up' KEY_RIGHT = 'right' KEY_DOWN = 'down' INF = 100000000 moves = [KEY_DOWN, KEY_LEFT, KEY_UP, KEY_RIGHT] range4 = range(4) range3 = range(3) to_idx = dict((2**i, i) for i in range(16)) to_idx[None] = 0 def move_row(row): out = [None, None, None, None] oc = 0 ic = 0 while ic < 4: if row[ic] is None: ic += 1 continue out[oc] = row[ic] oc += 1 ic += 1 ic = 0 oc = 0 while ic < 4: if out[ic] is None: break if ic == 3: out[oc] = out[ic] oc += 1 break if out[ic] == out[ic+1]: out[oc] = 2*out[ic] ic += 1 else: out[oc] = out[ic] ic += 1 oc += 1 while oc < 4: out[oc] = None oc += 1 return out def worker(job_q, reply_q): instance = AI(False) reply_q.put('ready') while True: try: cmd, job = job_q.get() if cmd == 'quit': break grid, depth, nodep = job except EOFError: break key = instance.encode(grid), depth value = instance.search_min(grid, depth, nodep) reply_q.put((key, value)) instance.table = {} class AI(object): def __init__(self, root=True): self.parallel = 30 self.processes = [] self.total_node = 0 self.total_eval = 0 self.total_time = 0 self.eval_count = 0 self.node_count = 0 self.table = {} self.move_table = {} self.move_table_r = {} self.idx_to_row = [] self.row_to_idx = {} self.build_move_table() self.build_eval_monotone_table() if root: self.parallel_start() def build_eval_monotone_table(self): self.eval_monotine_table = {} for idx, row in enumerate(self.idx_to_row): L = R = 0 m = 0 for y in range3: if row[y] and row[y] >= row[y+1]: m += 1 L += m ** 2 * 4 else: L -= abs((row[y] or 0)- (row[y+1] or 0)) * 1.5 m = 0 m = 0 for y in range3: if row[y] <= row[y+1] and row[y+1]: m += 1 R += m ** 2 * 4 else: R -= abs((row[y] or 0)- (row[y+1] or 0)) * 1.5 m = 0 self.eval_monotine_table[row] = L, R def build_move_table(self): # assume max cell is 32768 max_cell = 2**15 values = [None] + [2**x for x in range(1, 16)] assert len(values) == 16 idx = 0 for a in values: for b in values: for c in values: for d in values: row = a, b, c, d self.idx_to_row.append(row) self.row_to_idx[row] = idx idx += 1 for idx, row in enumerate(self.idx_to_row): row_moved = tuple(move_row(row)) if max(row_moved) > max_cell: self.move_table[idx] = -1 else: self.move_table[idx] = self.row_to_idx[row_moved] self.move_table[row] = row_moved self.move_table_r[row] = tuple(move_row(row[::-1])[::-1]) def rotateLeft(self, grid): g = grid return [ (g[3][0], g[2][0], g[1][0], g[0][0]), (g[3][1], g[2][1], g[1][1], g[0][1]), (g[3][2], g[2][2], g[1][2], g[0][2]), (g[3][3], g[2][3], g[1][3], g[0][3]), ] def rotateRight(self, grid): g = grid return [ (g[0][3], g[1][3], g[2][3], g[3][3]), (g[0][2], g[1][2], g[2][2], g[3][2]), (g[0][1], g[1][1], g[2][1], g[3][1]), (g[0][0], g[1][0], g[2][0], g[3][0]), ] def flip(self, grid): return [grid[3][::-1], grid[2][::-1], grid[1][::-1], grid[0][::-1]] def encode(self, grid): return grid[0]+grid[1]+grid[2]+grid[3] def move(self, grid, direction): if direction == KEY_UP: rot = 1 elif direction == KEY_RIGHT: rot = 2 elif direction == KEY_DOWN: rot = 3 else: rot = 0 if rot == 3: tmp = [ self.move_table_r[grid[0]], self.move_table_r[grid[1]], self.move_table_r[grid[2]], self.move_table_r[grid[3]], ] return tmp, (tmp != grid) if rot == 0: grid = self.rotateRight(grid) elif rot == 2: grid = self.rotateLeft(grid) out = [ self.move_table[grid[0]], self.move_table[grid[1]], self.move_table[grid[2]], self.move_table[grid[3]], ] return out, (out != grid) def show(self, grid): for y in range(4): for x in range(4): if grid[x][y]: print '%4d' % grid[x][y], else: print ' .', print def eval_monotone(self, grid): L = R = U = D = 0 LR = UD = 0 for x in range4: m = 0 for y in range3: if grid[x][y] and grid[x][y] >= grid[x][y+1]: m += 1 # 26090 #L += m # 29281 L += m ** 2 * 4 else: # 20585 #L -= abs(to_idx[grid[x][y]] - to_idx[grid[x][y+1]]) ** 2 # 26090 L -= abs((grid[x][y] or 0)- (grid[x][y+1] or 0)) * 1.5 m = 0 m = 0 for y in range3: if grid[x][y] <= grid[x][y+1] and grid[x][y+1]: m += 1 R += m ** 2 * 4 else: #R -= abs(to_idx[grid[x][y]] - to_idx[grid[x][y+1]]) ** 2 R -= abs((grid[x][y] or 0)- (grid[x][y+1] or 0)) * 1.5 m = 0 LR += max(L, R) L = R = 0 for y in range4: m = 0 for x in range3: if grid[x][y] and grid[x][y] >= grid[x+1][y]: m += 1 U += m ** 2 * 4 else: #U -= abs(to_idx[grid[x][y]] - to_idx[grid[x+1][y]]) ** 2 U -= abs((grid[x][y] or 0)- (grid[x+1][y] or 0)) * 1.5 m = 0 m = 0 for x in range3: if grid[x][y] <= grid[x+1][y] and grid[x+1][y]: m += 1 D += m ** 2 * 4 else: #D -= abs(to_idx[grid[x][y]] - to_idx[grid[x+1][y]]) ** 2 D -= abs((grid[x][y] or 0)- (grid[x+1][y] or 0)) * 1.5 m = 0 UD += max(U, D) return LR + UD def eval_monotone(self, grid): L = R = U = D = 0 LR = UD = 0 for x in range4: Lt, Rt = self.eval_monotine_table[grid[x]] L += Lt R += Rt LR = max(L, R) grid = self.rotateRight(grid) for x in range4: Ut, Dt = self.eval_monotine_table[grid[x]] U += Ut D += Dt UD = max(U, D) return LR + UD def eval_smoothness(self, grid): score_smooth = 0 for x in range4: for y in range4: s = INF if x > 0: s = min(s, abs((grid[x][y] or 2) - (grid[x-1][y] or 2))) if y > 0: s = min(s, abs((grid[x][y] or 2) - (grid[x][y-1] or 2))) if x < 3: s = min(s, abs((grid[x][y] or 2) - (grid[x+1][y] or 2))) if y < 3: s = min(s, abs((grid[x][y] or 2) - (grid[x][y+1] or 2))) score_smooth -= s return score_smooth def eval_smoothness(self, grid): a00, a01, a02, a03 = grid[0] a10, a11, a12, a13 = grid[1] a20, a21, a22, a23 = grid[2] a30, a31, a32, a33 = grid[3] a00 = a00 or 2 a01 = a01 or 2 a02 = a02 or 2 a03 = a03 or 2 a10 = a10 or 2 a11 = a11 or 2 a12 = a12 or 2 a13 = a13 or 2 a20 = a20 or 2 a21 = a21 or 2 a22 = a22 or 2 a23 = a23 or 2 a30 = a30 or 2 a31 = a31 or 2 a32 = a32 or 2 a33 = a33 or 2 score_smooth = 0 score_smooth -= min(abs(a00-a01), abs(a00-a10)) score_smooth -= min([abs(a01-a00), abs(a01-a11), abs(a01-a02)]) score_smooth -= min([abs(a02-a01), abs(a02-a12), abs(a02-a03)]) score_smooth -= min(abs(a03-a02), abs(a03-a13)) score_smooth -= min([abs(a10-a00), abs(a10-a11), abs(a10-a20)]) score_smooth -= min([abs(a11-a01), abs(a11-a10), abs(a11-a12), abs(a11-a21)]) score_smooth -= min([abs(a12-a02), abs(a12-a11), abs(a12-a13), abs(a12-a22)]) score_smooth -= min([abs(a13-a03), abs(a13-a12), abs(a13-a23)]) score_smooth -= min([abs(a20-a10), abs(a20-a21), abs(a20-a30)]) score_smooth -= min([abs(a21-a11), abs(a21-a20), abs(a21-a22), abs(a21-a31)]) score_smooth -= min([abs(a22-a12), abs(a22-a21), abs(a22-a23), abs(a22-a32)]) score_smooth -= min([abs(a23-a13), abs(a23-a22), abs(a23-a33)]) score_smooth -= min(abs(a30-a31), abs(a30-a20)) score_smooth -= min([abs(a31-a30), abs(a31-a21), abs(a31-a32)]) score_smooth -= min([abs(a32-a31), abs(a32-a22), abs(a32-a33)]) score_smooth -= min(abs(a33-a32), abs(a33-a23)) return score_smooth def eval_free(self, grid): free = grid[0].count(None) + grid[1].count(None) + grid[2].count(None) + grid[3].count(None) return -(16-free)**2 def eval(self, grid): key = self.encode(grid) if key in self.table: return self.table[key] self.eval_count += 1 score_monotone = self.eval_monotone(grid) score_smooth = self.eval_smoothness(grid) score_free = self.eval_free(grid) score = 0 score += score_smooth score += score_monotone score += score_free self.table[key] = score return score def search_max(self, grid, depth, nodep): key = self.encode(grid), depth, 1 if key in self.table: return self.table[key] best_score = -INF self.node_count += 1 for m in moves: g2, moved = self.move(grid, m) if not moved: continue score = self.search_min(g2, depth - 1, nodep) #print 'search_max', m, score if score > best_score: best_score = score self.table[key] = best_score return best_score def search_min(self, grid, depth, nodep): if depth == 0: return self.eval(grid) self.node_count += 1 key = self.encode(grid), depth if key in self.table: return self.table[key] blank_count = grid[0].count(None) + grid[1].count(None) + grid[2].count(None) + grid[3].count(None) scores = [] for i in range4: for j in range4: if grid[i][j] is not None: continue tmp = list(grid[i]) score = 0 all_p = 0 for v, p in ((2, 0.9), (4, 0.1)): if blank_count > 4 and p * nodep*0.9 < 0.01: # XXX hardcode for search_depth continue tmp[j] = v grid[i] = tuple(tmp) score += p * self.search_max(grid, depth, p*nodep) all_p += p tmp[j] = None grid[i] = tuple(tmp) if all_p == 0: score = self.eval(grid) else: score /= all_p scores.append(score) b = sum(scores) / len(scores) self.table[key] = b return b def reset(self): self.eval_count = 0 self.node_count = 0 self.table = {} def gen_job3(self, grid, depth, nodep): for m in moves: g2, moved = self.move(grid, m) if not moved: continue key = self.encode(g2), depth - 1 job = g2, depth - 1, nodep if key in self.job_table: continue self.job_table.add(key) yield job def gen_job2(self, grid, depth, nodep): blank_count = grid[0].count(None) + grid[1].count(None) + grid[2].count(None) + grid[3].count(None) scores = [] for i in range4: for j in range4: if grid[i][j] is not None: continue tmp = list(grid[i]) for v, p in ((2, 0.9), (4, 0.1)): if blank_count > 4 and p * nodep*0.9 < 0.01: # XXX hardcode for search_depth continue tmp[j] = v grid[i] = tuple(tmp) for job in self.gen_job3(grid, depth, p*nodep): yield job tmp[j] = None grid[i] = tuple(tmp) def gen_job(self, grid): for m in moves: g2, moved = self.move(grid, m) if not moved: continue for job in self.gen_job2(g2, search_depth-1, 1.0): yield job def parallel_start(self): if self.processes: return self.manager = multiprocessing.Manager() self.job_q = self.manager.Queue() self.reply_q = self.manager.Queue() self.processes = [] for i in range(self.parallel): p = multiprocessing.Process(target=worker, args=(self.job_q, self.reply_q)) self.processes.append(p) p.start() # wait all ready for i in range(self.parallel): self.reply_q.get() print 'ready', i def parallel_stop(self): if not self.processes: return for i in range(self.parallel): self.job_q.put(('quit', 0)) for p in self.processes: p.join() self.job_q = None self.reply_q = None self.manager.shutdown() self.manager = None self.processes = [] def parallel_run(self, grid): self.parallel_start() job_count = 0 self.job_table = set() #t0 = time.time() for job in self.gen_job(grid): job_count += 1 self.job_q.put(('job', job)) #t1 = time.time() #print 'gen jobs', t1-t0 #t0 = time.time() for i in range(job_count): key, value = self.reply_q.get() self.table[key] = value #t1 = time.time() #print 'get results', t1-t0 def getNextMove(self, grid): best_score = -INF-1 best_move = 'left' self.reset() t0 = time.time() grid = map(tuple, grid) self.parallel_run(grid) #s0 = time.time() for m in moves: #print 'move', m g2, moved = self.move(grid, m) if not moved: continue #print grid #print g2 score = self.search_min(g2, search_depth-1, 1.0) # round to avoid the instability of floating point numbers score = round(score, 6) #print score, m if score > best_score: best_score = score best_move = m #print '-' t1 = time.time() #print 'main loop', t1-s0 self.total_time += t1 - t0 self.total_eval += self.eval_count self.total_node += self.node_count print 't=%.2fs, eval=%d, node=%d, total_eval=%d, total_node=%d, %fnps' % ( t1-t0, self.eval_count, self.node_count, self.total_eval, self.total_node, (self.total_node+self.total_eval)/self.total_time) return best_move def __del__(self): self.parallel_stop() # vim:sw=4:expandtab:softtabstop=4
bsd-3-clause
8,110,234,823,735,309,000
27.145614
135
0.456336
false
bukun/TorCMS
torcms/core/tool/run_whoosh.py
1
5776
''' Running whoosh script. ''' import os import html2text import tornado.escape from whoosh.analysis import StemmingAnalyzer from whoosh.fields import ID, TEXT, Schema from whoosh.index import create_in, open_dir from config import SITE_CFG, kind_arr, post_type, router_post from torcms.model.post_model import MPost from torcms.model.wiki_model import MWiki try: from jieba.analyse import ChineseAnalyzer except Exception as err: print(repr(err)) ChineseAnalyzer = None SITE_CFG['LANG'] = SITE_CFG.get('LANG', 'zh') # Using jieba lib for Chinese. if SITE_CFG['LANG'] == 'zh' and ChineseAnalyzer: TOR_SCHEMA = Schema(title=TEXT(stored=True, analyzer=ChineseAnalyzer()), catid=TEXT(stored=True), type=TEXT(stored=True), link=ID(unique=True, stored=True), content=TEXT(stored=True, analyzer=ChineseAnalyzer())) else: TOR_SCHEMA = Schema(title=TEXT(stored=True, analyzer=StemmingAnalyzer()), catid=TEXT(stored=True), type=TEXT(stored=True), link=ID(unique=True, stored=True), content=TEXT(stored=True, analyzer=StemmingAnalyzer())) WHOOSH_BASE = 'database/whoosh' if os.path.exists(WHOOSH_BASE): TOR_IDX = open_dir(WHOOSH_BASE) else: os.makedirs(WHOOSH_BASE) TOR_IDX = create_in(WHOOSH_BASE, TOR_SCHEMA) def do_for_app(rand=True, kind='', doc_type=None): ''' 生成whoosh,根据配置文件中类别。 ''' if doc_type is None: doc_type = {} if rand: recs = MPost.query_random(num=10, kind=kind) else: recs = MPost.query_recent(num=2, kind=kind) for rec in recs: text2 = rec.title + ',' + html2text.html2text( tornado.escape.xhtml_unescape(rec.cnt_html)) writer = TOR_IDX.writer() writer.update_document(catid='sid' + kind, title=rec.title, type=doc_type[rec.kind], link='/{0}/{1}'.format(router_post[rec.kind], rec.uid), content=text2) writer.commit() # def do_for_app2(writer, rand=True): # ''' # 生成whoosh,根据数据库中类别。 # :param writer: # :param rand: # :return: # ''' # if rand: # recs = MPost.query_random(num = 10, kind = '2') # else: # recs = MPost.query_recent(2) # # for rec in recs: # text2 = rec.title + ',' + html2text.html2text(tornado.escape.xhtml_unescape(rec.cnt_html)) # # info = MPost2Catalog.get_entry_catalog(rec.uid) # if info: # pass # else: # continue # # catid = info.tag.uid[:2] + '00' # # cat_name = '' # if 'def_cat_uid' in rec.extinfo and rec.extinfo['def_cat_uid']: # taginfo = MCategory.get_by_uid(rec.extinfo['def_cat_uid'][:2] + '00') # if taginfo: # cat_name = taginfo.name # writer.update_document( # title=rec.title, # catid=catid, # type='<span style="color:red;">[{0}]</span>'.format(cat_name), # link='/{0}/{1}'.format(router_post[rec.kind], rec.uid), # content=text2 # ) def do_for_post(rand=True, doc_type=''): if rand: recs = MPost.query_random(num=10, kind='1') else: recs = MPost.query_recent(num=2, kind='1') for rec in recs: text2 = rec.title + ',' + html2text.html2text( tornado.escape.xhtml_unescape(rec.cnt_html)) writer = TOR_IDX.writer() writer.update_document( title=rec.title, catid='sid1', type=doc_type, link='/post/{0}'.format(rec.uid), content=text2, ) writer.commit() def do_for_wiki(rand=True, doc_type=''): if rand: recs = MWiki.query_random(num=10, kind='1') else: recs = MWiki.query_recent(num=2, kind='1') for rec in recs: text2 = rec.title + ',' + html2text.html2text( tornado.escape.xhtml_unescape(rec.cnt_html)) writer = TOR_IDX.writer() writer.update_document(title=rec.title, catid='sid1', type=doc_type, link='/wiki/{0}'.format(rec.title), content=text2) writer.commit() def do_for_page(rand=True, doc_type=''): if rand: recs = MWiki.query_random(num=4, kind='2') else: recs = MWiki.query_recent(num=2, kind='2') for rec in recs: text2 = rec.title + ',' + html2text.html2text( tornado.escape.xhtml_unescape(rec.cnt_html)) writer = TOR_IDX.writer() writer.update_document(title=rec.title, catid='sid1', type=doc_type, link='/page/{0}'.format(rec.uid), content=text2) writer.commit() def gen_whoosh_database(kind_arr, post_type): ''' kind_arr, define the `type` except Post, Page, Wiki post_type, define the templates for different kind. ''' for switch in [True, False]: do_for_post(rand=switch, doc_type=post_type['1']) do_for_wiki(rand=switch, doc_type=post_type['1']) do_for_page(rand=switch, doc_type=post_type['1']) for kind in kind_arr: do_for_app(rand=switch, kind=kind, doc_type=post_type) # writer.commit() def run(): ''' Run it. ''' gen_whoosh_database(kind_arr=kind_arr, post_type=post_type)
mit
173,483,088,446,669,820
30.119565
100
0.536675
false
paulross/cpip
src/cpip/plot/TreePlotTransform.py
1
21470
#!/usr/bin/env python # CPIP is a C/C++ Preprocessor implemented in Python. # Copyright (C) 2008-2017 Paul Ross # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Paul Ross: [email protected] """Provides a means of re-interpreting the coordinate system when plotting trees so that the the tree root can be top/left/bottom/right and the child order plotted anti-clockwise or clockwise. This can convert 'logical' positions into 'physical' positions. Where a 'logical' position is one with the root of the tree at the top and the child nodes below in left-to-right (i.e. anti-clockwise) order. A 'physical' position is a plot-able position where the root of the tree is top/left/bottom or right and the child nodes are in anti-clockwise or clockwise order. Transforming sizes and positions -------------------------------- If the first suffix is 'l' this is the "logical" coordinate system. If the first suffix is 'p' this is the "physical" coordinate system. Then: * ``C`` - The canvas dimension, Cpw is "Canvas physical width" * ``W`` - Width dimension, physical and logical. * ``D`` - Depth dimension, physical and logical. * ``B`` - Box datum position ("top-left"), physical and logical, x and y. * ``P`` - Arbitrary point, physical and logical, x and y. So this "logical view" of the tree graph ('top' and '-'): i.e. Root(s) is a top and children are written in an anti-clockwise. :: ---> x | \/ y <------------------------ Clw ------------------------> | To Parent | | | Blx, Bly -->************************* | * | * Cld * Dl * | *<-------- Wl -----|--->* | * | * | Plx, Ply ->. * | * | ************************* | | | | | To C[0] To C[c] To C[C-1] Or: ====== === === == == ============== ============== ========== ========= Origin Cpw Cpd Wp Dp Bpx Bpy Ppx Ppy ====== === === == == ============== ============== ========== ========= top Clw Cld Wl Dl Blx Bly Plx Ply left Cld Clw Dl Wl Bly (Clw-Plx-Wl) Ply Clw-Plx bottom Clw Cld Wl Dl (Clw-Plx-Wl) (Cld-Ply-Dl) Clw-Plx Cld-Ply right Cld Clw Dl Wl (Cld-Ply-Dl) Blx Cld-Ply Plx ====== === === == == ============== ============== ========== ========= Note the diagonal top-right to bottom-left transference between each pair of columns. That is because with each successive line we are doing a 90 degree rotation (anti-clockwise) plus a +ve y translation by Clw (top->left or bottom->right) or Cld (left->bottom or right->top). Incrementing child positions ---------------------------- Moving from one child to another is done in the following combinations: ========= ====== ====== Origin '-' '+' ========= ====== ====== top right left left up down bottom left right right down up ========= ====== ====== """ __author__ = 'Paul Ross' __date__ = '2011-07-10' __rights__ = 'Copyright (c) 2008-2017 Paul Ross' from cpip import ExceptionCpip from . import Coord class ExceptionTreePlotTransform(ExceptionCpip): """Exception class for TreePlotTransform.""" pass class ExceptionTreePlotTransformRangeCtor(ExceptionTreePlotTransform): """Exception class for out of range input on construction.""" class TreePlotTransform(object): """Provides a means of re-interpreting the coordinate system when plotting trees. rootPosition = frozenset(['top', 'bottom', 'left', 'right']) default: 'top' sweepDirection = frozenset(['+', '-']) default: '-' Has functionality for interpreting width/depth to actual postions given rootPostion. """ # position of the root node in the plot RANGE_ROOTPOS = ['top', 'left', 'bottom', 'right'] RANGE_ROOTPOS_INT = range(len(RANGE_ROOTPOS)) # Sweep direction of the children in the plot RANGE_SWEEPDIR = ['-', '+'] RANGE_SWEEPDIR_INT = range(len(RANGE_SWEEPDIR)) def __init__(self, theLogicalCanvas, rootPos='top', sweepDir='-'): """Constructor, takes a 'logical' Canvas as a Coord.Box and the orientation. :param theLogicalCanvas: Teh logical canvas to draw on. :type theLogicalCanvas: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([int, str]), cpip.plot.Coord.Dim([int, <class 'str'>])])`` :param rootPos: Root position, one of ``('top', 'right', bottom', 'left')``. :type rootPos: ``str`` :param sweepDir: Sweep direction, one of ``('-', '+')``. :type sweepDir: ``str`` :returns: ``NoneType`` """ # canvas is stored as we need it for internal manipulations # Clw self._clw = theLogicalCanvas.width # Cld self._cld = theLogicalCanvas.depth try: # As integer for comparison speed self._rootPos = self.RANGE_ROOTPOS.index(rootPos) except ValueError: raise ExceptionTreePlotTransformRangeCtor( '"%s" not in: %s' % (rootPos, self.RANGE_ROOTPOS) ) try: # As integer for comparison speed self._sweepDir = self.RANGE_SWEEPDIR.index(sweepDir) except ValueError: raise ExceptionTreePlotTransformRangeCtor( '"%s" not in: %s' % (sweepDir, self.RANGE_SWEEPDIR) ) @property def rootPos(self): assert(self._rootPos in self.RANGE_ROOTPOS_INT) return self.RANGE_ROOTPOS[self._rootPos] @property def sweepDir(self): assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) return self.RANGE_SWEEPDIR[self._sweepDir] @property def positiveSweepDir(self): """True if positive sweep, false otherwise.""" assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) return self.RANGE_SWEEPDIR[self._sweepDir] == '+' def genRootPos(self): """Yield all possible root positions.""" for aPos in self.RANGE_ROOTPOS: yield aPos def genSweepDir(self): """Yield all possible root positions.""" for aDir in self.RANGE_SWEEPDIR: yield aDir #assert(self._rootPos in self.RANGE_ROOTPOS_INT) #if self._rootPos == 0: #'top': #elif self._rootPos == 1: #'left': #elif self._rootPos == 2: #'bottom': ##'right': def canvasP(self): """Returns a Coord.Box that describes the physical canvass. :returns: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- The canvas. """ #Origin Cpw Cpd #------ --- --- #top Clw Cld #left Cld Clw #bottom Clw Cld #right Cld Clw assert(self._rootPos in self.RANGE_ROOTPOS_INT) if self._rootPos == 0: #'top' return Coord.Box(self._clw, self._cld) elif self._rootPos == 1: #'left' return Coord.Box(self._cld, self._clw) elif self._rootPos == 2: #'bottom' return Coord.Box(self._clw, self._cld) #'right' return Coord.Box(self._cld, self._clw) def boxP(self, theBl): """Given a logical box this returns a :py:class:`cpip.plot.Coord.Box` that describes the physical box. :param theBl: Logical box. :type theBl: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- Physical box. """ #Origin Wp Dp #top Wl Dl #left Dl Wl #bottom Wl Dl #right Dl Wl assert(self._rootPos in self.RANGE_ROOTPOS_INT) if self._rootPos == 0: #'top' return Coord.Box(theBl.width, theBl.depth) elif self._rootPos == 1: #'left' return Coord.Box(theBl.depth, theBl.width) elif self._rootPos == 2: #'bottom' return Coord.Box(theBl.width, theBl.depth) #'right' return Coord.Box(theBl.depth, theBl.width) def boxDatumP(self, theBlxy, theBl): """Given a logical point and logical box this returns a physical point that is the box datum ("upper left"). :param theBlxy: Logical point. :type theBlxy: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBl: The box. :type theBl: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- Box datum. """ #Origin Bpx Bpy #------ --- --- #top Blx Bly #left Bly (Clw-Plx-Wl) #bottom (Clw-Plx-Wl) (Cld-Ply-Dl) #right (Cld-Ply-Dl) Blx assert(self._rootPos in self.RANGE_ROOTPOS_INT) if self._rootPos == 0: #'top' return Coord.Pt(theBlxy.x, theBlxy.y) elif self._rootPos == 1: #'left' return Coord.Pt(theBlxy.y, self._clw - theBlxy.x - theBl.width) elif self._rootPos == 2: #'bottom' return Coord.Pt( self._clw - theBlxy.x - theBl.width, self._cld - theBlxy.y - theBl.depth ) #'right' return Coord.Pt( self._cld - theBlxy.y - theBl.depth, theBlxy.x ) def tdcL(self, theBlxy, theBl): """Given a logical datum (logical top left) and a logical box this returns logical top dead centre of a box. :param theBlxy: Datum :type theBlxy: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBl: The box. :type theBl: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- Top dead centre of the box. """ assert(self._rootPos in self.RANGE_ROOTPOS_INT) if self._rootPos == 0: #'top' so return logical top return Coord.Pt(theBlxy.x+theBl.width.scale(0.5), theBlxy.y) elif self._rootPos == 1: #'left' so return logical right return Coord.Pt(theBlxy.x+theBl.width, theBlxy.y+theBl.depth.scale(0.5)) elif self._rootPos == 2: #'bottom' so return logical bottom return Coord.Pt(theBlxy.x+theBl.width.scale(0.5), theBlxy.y+theBl.depth) #'right' so return logical left return Coord.Pt(theBlxy.x, theBlxy.y+theBl.depth.scale(0.5)) def bdcL(self, theBlxy, theBl): """Given a logical datum (logical top left) and a logical box this returns logical bottom dead centre of a box.""" assert(self._rootPos in self.RANGE_ROOTPOS_INT) if self._rootPos == 0: #'top' so return logical bottom return Coord.Pt(theBlxy.x+theBl.width.scale(0.5), theBlxy.y+theBl.depth) elif self._rootPos == 1: #'left' so return logical left return Coord.Pt(theBlxy.x, theBlxy.y+theBl.depth.scale(0.5)) elif self._rootPos == 2: #'bottom' so return logical top return Coord.Pt(theBlxy.x+theBl.width.scale(0.5), theBlxy.y) #'right' so return logical right return Coord.Pt(theBlxy.x+theBl.width, theBlxy.y+theBl.depth.scale(0.5)) def prevdcL(self, theBlxy, theBl): """Given a logical datum (logical top left) and a logical box this returns logical 'previous' dead centre of a box. :param theBlxy: Datum :type theBlxy: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBl: The box. :type theBl: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- Previous dead centre of the box. """ assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) if self._sweepDir == 0: # '-' so left dead centre return Coord.Pt(theBlxy.x, theBlxy.y+theBl.depth.scale(0.5)) # '+' so right dead centre return Coord.Pt(theBlxy.x+theBl.width, theBlxy.y+theBl.depth.scale(0.5)) def nextdcL(self, theBlxy, theBl): """Given a logical datum (logical top left) and a logical box this returns logical 'next' dead centre of a box. :param theBlxy: Datum :type theBlxy: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBl: The box. :type theBl: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- Next dead centre of the box. """ assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) if self._sweepDir == 0: # '-' so right dead centre return Coord.Pt(theBlxy.x+theBl.width, theBlxy.y+theBl.depth.scale(0.5)) # '+' so left dead centre return Coord.Pt(theBlxy.x, theBlxy.y+theBl.depth.scale(0.5)) def tdcP(self, theBlxy, theBl): """Given a logical datum (logical top left) and a logical box this returns physical top dead centre of a box.""" assert(self._rootPos in self.RANGE_ROOTPOS_INT) return self.pt(self.tdcL(theBlxy, theBl)) def bdcP(self, theBlxy, theBl): """Given a logical datum (logical top left) and a logical box this returns physical bottom dead centre of a box.""" assert(self._rootPos in self.RANGE_ROOTPOS_INT) return self.pt(self.bdcL(theBlxy, theBl)) def pt(self, thePt, units=None): """Given an arbitrary logical point as a Coord.Pt(), this returns the physical point as a Coord.Pt(). If units is supplied then the return value will be in those units. :param thePt: Logical point. :type thePt: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param units: Optional unit conversion. :type units: ``NoneType`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- Physical Point. """ #Origin Ppx Ppy #------ --- --- #top Plx Ply #left Ply Clw-Plx #bottom Clw-Plx Cld-Ply #right Cld-Ply Plx assert(self._rootPos in self.RANGE_ROOTPOS_INT) if self._rootPos == 0: #'top': retVal = Coord.Pt(thePt.x, thePt.y) elif self._rootPos == 1: #'left': retVal = Coord.Pt(thePt.y, self._clw-thePt.x) elif self._rootPos == 2: #'bottom': retVal = Coord.Pt(self._clw-thePt.x, self._cld-thePt.y) else: #'right': retVal = Coord.Pt(self._cld-thePt.y, thePt.x) if units is not None: return retVal.convert(units) return retVal def startChildrenLogicalPos(self, thePt, theBox): """Returns the starting child logical datum point ('top-left') given the children logical datum point and the children.bbSigma. Returns a :py:class:`cpip.plot.Coord.Pt`. This takes into account the sweep direction. :param thePt: The logical point. :type thePt: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBox: The canvas. :type theBox: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- The 'physical' point. """ assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) if self._sweepDir == 0: # '-' so right return thePt # '+' so left return Coord.Pt(thePt.x+theBox.width, thePt.y) def preIncChildLogicalPos(self, thePt, theBox): """Pre-increments the child logical datum point ('top-left') given the child logical datum point and the child.bbSigma. Returns a :py:class:`cpip.plot.Coord.Pt`. This takes into account the sweep direction. :param thePt: The logical point. :type thePt: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBox: The canvas. :type theBox: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- The 'physical' point. """ assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) if self._sweepDir == 0: # '-' so right return thePt # '+' so left return Coord.Pt(thePt.x-theBox.width, thePt.y) def postIncChildLogicalPos(self, thePt, theBox): """Post-increments the child logical datum point ('top-left') given the child logical datum point and the child.bbSigma. Returns a :py:class:`cpip.plot.Coord.Pt`. This takes into account the sweep direction. :param thePt: The logical point. :type thePt: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :param theBox: The canvas. :type theBox: ``cpip.plot.Coord.Box([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` :returns: ``cpip.plot.Coord.Pt([cpip.plot.Coord.Dim([float, str]), cpip.plot.Coord.Dim([float, <class 'str'>])])`` -- The 'physical' point. """ assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) if self._sweepDir == 0: # '-' so right return Coord.Pt(thePt.x+theBox.width, thePt.y) # '+' so left return thePt def incPhysicalChildPos(self, thePt, theDim): """Given a child physical datum point and a distance to next child this returns the next childs physical datum point. TODO: Remove this as redundant?""" #Origin '-' '+' #------ --- --- #top right left #left up down #bottom left right #right down up assert(self._rootPos in self.RANGE_ROOTPOS_INT) assert(self._sweepDir in self.RANGE_SWEEPDIR_INT) #assert(0) # We need to think about this as it is incomplete. We need a # 'where do I start' and a 'how do I increment'. These can be both # entirely logical coordinates. if self._rootPos == 0: # 'top' if self._sweepDir == 0: # '-' so right return Coord.Pt(thePt.x+theDim, thePt.y) # '+' so left return Coord.Pt(thePt.x-theDim, thePt.y) elif self._rootPos == 1: # 'left' if self._sweepDir == 0: # '-' so up return Coord.Pt(thePt.x, thePt.y-theDim) # '+' so down return Coord.Pt(thePt.x, thePt.y+theDim) elif self._rootPos == 2: # 'bottom' if self._sweepDir == 0: # '-' so left return Coord.Pt(thePt.x-theDim, thePt.y) # '+' so right return Coord.Pt(thePt.x+theDim, thePt.y) # 'right' if self._sweepDir == 0: # '-' so down return Coord.Pt(thePt.x, thePt.y+theDim) # '+' so up return Coord.Pt(thePt.x, thePt.y-theDim)
gpl-2.0
-5,947,760,120,347,143,000
42.995902
158
0.563857
false
guildai/guild
guild/commands/runs_delete.py
1
2568
# Copyright 2017-2021 TensorHub, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division import click from guild import click_util from . import remote_support from . import runs_support @click.command("delete, rm") @runs_support.runs_arg @runs_support.all_filters @remote_support.remote_option("Delete remote runs.") @click.option("-y", "--yes", help="Do not prompt before deleting.", is_flag=True) @click.option( "-p", "--permanent", help="Permanentaly delete runs so they cannot be recovered.", is_flag=True, ) @click.pass_context @click_util.use_args @click_util.render_doc def delete_runs(ctx, args): """Delete one or more runs. Runs are deleting by specifying `RUN` arguments. If a `RUN` argument is not specified, all runs matching the filter criteria are deleted. See SPECIFYING RUNS and FILTERING topics for more information on how runs are selected. By default, Guild will display the list of runs to be deleted and ask you to confirm the operation. If you want to delete the runs without being prompted, use the ``--yes`` option. **WARNING**: Take care when deleting runs using indexes as the runs selected with indexes can change. Review the list of runs carefully before confirming a delete operation. If a run is still running, Guild will stop it first before deleting it. If you delete a run by mistake, provided you didn't use the ``--permanent`` option, you can restore it using ``guild runs restore``. If you want to permanently delete runs, use the ``--permanent`` option. **WARNING**: Permanentaly deleted runs cannot be restored. {{ runs_support.runs_arg }} If a `RUN` argument is not specified, ``:`` is assumed (all runs are selected). {{ runs_support.all_filters }} ### Delete Remote Runs To delete runs on a remote, use `--remote`. {{ remote_support.remote_option }} """ from . import runs_impl runs_impl.delete_runs(args, ctx)
apache-2.0
7,796,304,615,347,681,000
29.571429
81
0.709112
false
tulians/tm
tm/git.py
1
3113
#!/usr/bin/env python3 # Git abstraction module. # =================================== # Built-in modules. from subprocess import run, check_output, Popen, PIPE def add_files(files): """Performs 'git add [-files]' operation.""" if not files: raise ValueError("No files received for adding.") files_to_add = b" ".join(files) git_cmd_string = "git add {}".format(files_to_add.decode("utf-8")) run(git_cmd_string.strip().split(" ")) print("Files successfully added.") def status(flags=""): """Performs 'git status [-flags]' operation.""" git_cmd_string = "git status {}".format(" ".join(flags)) run(git_cmd_string.strip().split(" ")) def commit(message): """Performs 'git commit -m [-message]' operation.""" if message == "" or not message: raise ValueError("No valid message/flag values received.") git_cmd_list = "git commit -m".split(" ") git_cmd_list.append(message) run(git_cmd_list) print("Files successfully committed.") def push(server, branch): """Performs 'git push [-server] [-branch]' operation.""" if not (server and branch): raise ValueError("Not valid server and/or branch values given.") git_cmd_string = "git push {0} {1}".format(server, branch) run(git_cmd_string.strip().split(" ")) print("Files successfully pushed.") def branch(name): """Performs 'git checkout -b [-name]' operation.""" if (not name) or (name == ""): raise ValueError("No valid branch name received.") git_cmd_string = "git checkout -b {}".format(name) run(git_cmd_string.strip().split(" ")) print("Branch successfully created.") def merge(from_branch, to_branch): """Performs the merging of two branches.""" if not (from_branch and to_branch): raise ValueError("No valid branch names received.") git_checkout_string = "git checkout {}".format(to_branch) run(git_checkout_string.strip().split(" ")) git_merge_string = "git merge {}".format(from_branch) run(git_merge_string.strip().split(" ")) git_delete_branch_local = "git branch -d {}".format(from_branch) run(git_delete_branch_local.strip().split(" ")) git_delete_branch_remote = ("git push origin --delete {}". format(from_branch)) run(git_delete_branch_remote.strip().split(" ")) push("origin", to_branch) print("Merging successfully completed.") # --> Utilities. def _changed_files(): """Returns a list with all the changed files after the last commit.""" status = Popen(("git", "status", "-s"), stdout=PIPE) files = check_output(('cut', '-c4-'), stdin=status.stdout) return list(filter(None, files.split(b"\n"))) def make(commit_message, server, branch, partials=False, files=_changed_files(), flags=""): """Pushes commits to the remote repository.""" print("Labels state: {}".format(partials.status["exist"])) if not partials.status["exist"]: print("No preexisting partial commits.") add_files(files) status(flags) commit(commit_message) push(server, branch)
mit
-3,249,015,099,976,167,000
34.781609
74
0.623514
false
basraven/Thesis
Implementation/Code/test10/classes/sparql.py
1
2810
class sparqler(): PREFIXES = """ PREFIX owl: <http://www.w3.org/2002/07/owl#> PREFIX geo: <http://www.w3.org/2003/01/geo/wgs84_pos#> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX foaf: <http://xmlns.com/foaf/0.1/> PREFIX dc: <http://purl.org/dc/elements/1.1/> PREFIX : <http://dbpedia.org/resource/> PREFIX dbpedia2: <http://dbpedia.org/property/> PREFIX dbpedia: <http://dbpedia.org/> PREFIX skos: <http://www.w3.org/2004/02/skos/core#> """ @classmethod def sparql(self, query): from SPARQLWrapper import SPARQLWrapper, JSON sparql = SPARQLWrapper("http://dbpedia.org/sparql") sparql.setQuery(self.PREFIXES + query) sparql.setReturnFormat(JSON) results = sparql.query().convert() return results["results"]["bindings"] @classmethod def localSparql(self, path, query): import rdflib graph = rdflib.Graph() graph.parse(path) #SHOULD IMPLY RDFS:LABEL results = graph.query(self.PREFIXES + query) return results @classmethod def getAllProperties(self, path): query = """ select DISTINCT ?prop ?range ?label where { ?prop rdfs:range ?range . ?prop rdfs:label ?label . } """ return self.localSparql(path, query) @classmethod def getOnLabel(self, entityname): result = self.sparql(self.getQueryOnLabel(entityname)) if len(result) > 0: return [x['entity']['value'] for x in result][0] @classmethod def getQueryOnLabel(self, entityname): query = 'SELECT ?entity WHERE { ' query += '?entity foaf:name "'+entityname+'"@en . ' query += ' } LIMIT 1' return query @classmethod def getDictOnLabels(self, entitynamearray): #TODO: SHOULD BE USED, BUT CAN'T BECAUSE OF EMPTY VALUES, CAN ALSO BE USED OFFLINE?! query = 'SELECT ?entity WHERE { ' for item in entitynamearray: query += ' { ' + self.getOnLabel(item) + ' } UNION ' query = query.rsplit("UNION", 1)[0] #Remove the last UNION query += ' } ' result = self.sparql(query) #return [x['entity']['value'] for x in result] @classmethod def mapResults(self, result, keys): returnarray = [] for row in result: for (counter, item) in enumerate(row): print keys[counter] print row[item]['value'] return returnarray
gpl-3.0
-104,760,214,582,648,260
33.280488
92
0.554804
false
jmd-dk/concept
concept/tests/friedmann/analyze.py
1
4940
# Imports from the CO𝘕CEPT code from commons import * from integration import cosmic_time, init_time plt = get_matplotlib().pyplot # Absolute path and name of the directory of this file this_dir = os.path.dirname(os.path.realpath(__file__)) this_test = os.path.basename(this_dir) # As this non-compiled code should work regardless of whether # the main CO𝘕CEPT code is compiled or not, we need to flood # this name space with names from commons explicitly, as # 'from commons import *' does not import C level variables. commons_flood() # Initiate the cosmic time and the scale factor, # and do the call to CLASS if enable_class_background is True. init_time() # Array of scale factor values at which to compute the cosmic time N_points = 50 scale_factors = logspace(log10(a_begin), log10(1), N_points) # Compute the cosmic time for each value of the scale factor cosmic_times = [cosmic_time(a) for a in scale_factors] # Dependent on the mode, save the computed cosmic times compiled = not user_params['_pure_python'] mode = f'class={enable_class_background}_compiled={compiled}' np.savetxt(f'{this_dir}/t_{mode}.dat', cosmic_times) # If all four data files exist, plot and analyse these data_filenames = glob(f'{this_dir}/*.dat') if sum([bool(re.search(rf'^{this_dir}/t_class=(True|False)_compiled=(True|False)\.dat$' , fname)) for fname in data_filenames]) == 4: masterprint('Analysing {} data ...'.format(this_test)) # Load in the data all_times = {} for filename in data_filenames: if re.search('class=True', filename): key = 'CLASS' else: key = 'no CLASS' if re.search('compiled=True', filename): key += ', compiled' else: key += ', not compiled' all_times[key] = np.loadtxt(filename) # Plot the data fig_file = this_dir + '/result.png' plt.figure(figsize=(16, 12)) markersize = 50 for key, times in all_times.items(): plt.loglog(scale_factors, times, '.', markersize=markersize, alpha=1.0, label=key) markersize -= 10 plt.xlim(a_begin, 1) plt.xlabel('$a$') plt.ylabel(rf'$t\,\mathrm{{[{unit_time}]}}$') # Using CLASS or not makes a difference at early times # due to the inclusion of e.g. radiation and neutrinos. # Find the latest time at which this difference is still important. rel_tol = 1e-2 i = N_points for t1, t2 in zip(reversed(all_times[ 'CLASS, compiled']), reversed(all_times['no CLASS, compiled'])): i -= 1 if not isclose(t1, t2, rel_tol=rel_tol): # Time found. Update plot. a = scale_factors[i] ylim = plt.gca().get_ylim() plt.loglog([a, a], ylim, 'k:', zorder=-1) plt.text(1.1*a, 0.4*ylim[1], r'$\leftarrow$ $1\%$ disagreement between' + '\n' r'$\leftarrow$ CLASS and no CLASS', fontsize=16, ) plt.ylim(ylim) # If this time is too late, something is wrong a_max_allowed = 0.1 if a > a_max_allowed: abort(f'A discrepancy in t(a) of 1% between CLASS and the built-in ' f'Freedman equation is present as late as a = {a}, ' f'which is too extreme to be acceptable.\n' f'See "{fig_file}" for a visualization.' ) break plt.legend(loc='best', fontsize=16).get_frame().set_alpha(0.7) plt.tight_layout() plt.savefig(fig_file) # Whether we are running in compiled mode or not # really should not make a big difference. # When using CLASS, a real (but still small) difference # appears because we are using cubic splines in compiled mode # and linear splines in pure Python mode. When not compiled, # the only difference is round-off errors. # Check that this is actually the case. rel_tol = 1e-5 if not all(isclose(t1, t2, rel_tol=rel_tol) for t1, t2 in zip(all_times['CLASS, compiled'], all_times['CLASS, not compiled']) ): abort('The cosmic times computed via interpolation of CLASS data ' 'are different between compiled and pure Python mode.\n' f'See "{fig_file}" for a visualization.' ) rel_tol = 1e+3*machine_ϵ if not all(isclose(t1, t2, rel_tol=rel_tol) for t1, t2 in zip(all_times['no CLASS, compiled'], all_times['no CLASS, not compiled']) ): abort('The cosmic times computed via the simple Friedmann equation ' 'are different between compiled and pure Python mode.\n' f'See "{fig_file}" for a visualization.' ) masterprint('done')
gpl-3.0
9,141,373,418,933,460,000
41.162393
97
0.591324
false
steven1695-cmis/steven1695-cmis-cs2
cs2quiz3.py
1
2383
#Section 1: Terminology # 1) What is a recursive function? #A function that calls itself. # # # 2) What happens if there is no base case defined in a recursive function? #It becomes an infinite recursion function with no way to end although the computer has set limitations so that a function does not run forever even if it doesn't have a base case. # # # 3) What is the first thing to consider when designing a recursive function? #How the function will end, a base case. # # # 4) How do we put data into a function call? #With the parameters of a function. You plug in your values in for it to use. # # # 5) How do we get data out of a function call? #Return. # # #Section 2: Reading # Read the following function definitions and function calls. # Then determine the values of the variables a1-d3. #a1 = 8 #a2 = 8 #a3 = -1 #b1 = 2 #b2 = 2 #b3 = 4 #c1 = -2 #c2 = 4 #c3 = 45 #d1 = 6 #d2 = 8 #d3 = 4 #Section 3: Programming #Write a script that asks the user to enter a series of numbers. #When the user types in nothing, it should return the average of all the odd numbers #that were typed in. #In your code for the script, add a comment labeling the base case on the line BEFORE the base case. #Also add a comment label BEFORE the recursive case. #It is NOT NECESSARY to print out a running total with each user input. def question(count, total): num = raw_input("Next: ") if num == '': ans = float(total)/float(count) print "The average of your odd numbers was " + str(ans) + "." elif float(num)%2 == 0: return question(count, total) elif float(num)%2 == 1: return question(count+1, total+int(num)) else: exit() def main(): question(0, 0) #main() def function1(x, n): if n <= 0: return x else: return 1 + function1(x, n-1) def function2(a, b): if a <= 0: return 1 + function2(a+1, b**a) else: return b def function3(a, b): if b <= 0: return a else: return a + b + function3(a, b - 1) def function4(a, b, c): if a > b and a > c: return 1 + function4(b + 1, c, a) elif b > a or b > c: return 1 - function4(b - 1, c, a) else: return a + b + c print function1(2, 6) print function1(6, 2) print function1(-1, -1) print function2(2, 2) print function2(0, 0) print function2(-2, -2) print function3(-2, 3) print function3(4, -2) print function3(5, 5) print function4(1,2,3) print function4(3,2,1) print function4(1,3,2)
cc0-1.0
9,110,095,377,412,258,000
20.468468
180
0.67394
false
ezralanglois/arachnid
arachnid/core/metadata/formats/frealign.py
1
15369
''' Read/Write a table in the Frealign PAR format This module reads from and writes to the Frealign PAR format. An example of the file: .. container:: bottomnav, topic | C Align-Fspace parameter file | C | C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 | 1 8.256 120.864 223.096 -2.512 2.705 50000. 8743 31776.1 31280.3 0.82 0.22 | 2 190.212 77.496 85.677 -1.536 6.947 50000. 8743 31776.1 31280.3 0.82 0.24 | 3 284.119 88.178 224.544 6.250 -1.101 50000. 8743 31776.1 31280.3 0.82 0.28 | 4 190.580 92.432 42.037 1.534 0.438 50000. 8743 31776.1 31280.3 0.82 0.31 | 5 275.692 269.766 288.658 1.054 2.737 50000. 8743 31776.1 31280.3 0.82 0.32 | 6 120.020 129.086 295.909 8.228 -2.400 50000. 8743 31776.1 31280.3 0.82 0.31 | 7 282.421 80.952 176.528 3.590 2.651 50000. 8743 31776.1 31280.3 0.82 0.31 | 8 63.624 295.406 120.192 6.332 -5.979 50000. 8743 31776.1 31280.3 0.82 0.29 It supports the following attributes: - Extension: par - Filter: Frealign (\*.par) .. Created on Sep 28, 2010 .. codeauthor:: Robert Langlois <[email protected]> ''' from .. import format_utility from ..factories import namedtuple_factory import logging _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) class read_iterator(object): '''Start format parsing iterator .. sourcecode:: py >>> import os >>> os.system("more data.par") C Align-Fspace parameter file C C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 1 8.256 120.864 223.096 -2.512 2.705 50000. 8743 31776.1 31280.3 0.82 0.22 2 190.212 77.496 85.677 -1.536 6.947 50000. 8743 31776.1 31280.3 0.82 0.24 3 284.119 88.178 224.544 6.250 -1.101 50000. 8743 31776.1 31280.3 0.82 0.28 4 190.580 92.432 42.037 1.534 0.438 50000. 8743 31776.1 31280.3 0.82 0.31 5 275.692 269.766 288.658 1.054 2.737 50000. 8743 31776.1 31280.3 0.82 0.32 6 120.020 129.086 295.909 8.228 -2.400 50000. 8743 31776.1 31280.3 0.82 0.31 7 282.421 80.952 176.528 3.590 2.651 50000. 8743 31776.1 31280.3 0.82 0.31 8 63.624 295.406 120.192 6.332 -5.979 50000. 8743 31776.1 31280.3 0.82 0.29 >>> header = [] >>> fin = open("data.par", 'r') >>> factory, lastline = read_header(fin, header) >>> header ["_rlnImageName","_rlnDefocusU","_rlnDefocusV","_rlnDefocusAngle","_rlnVoltage","_rlnAmplitudeContrast","_rlnSphericalAberration"] >>> reader = read_iterator(fin, len(header), lastline) >>> map(factory, reader) [ BasicTuple("000001@/lmb/home/scheres/data/VP7/all_images.mrcs", 13538, 13985, 109.45, 300, 0.15, 2), BasicTuple("000002@/lmb/home/scheres/data/VP7/all_images.mrcs", 13293, 13796, 109.45, 300, 0.15, 2) ] :Parameters: fin : file stream Input file stream hlen : integer Length of the header lastline : string Last line read during header parsing, requires Star parsing now numeric : boolean If true then convert string values to numeric columns : list List of columns to read otherwise None (all columns) extra : dict Unused keyword arguments ''' __slots__=("fin", "hlen", "lastline", "numeric", "columns") def __init__(self, fin, hlen, lastline="", numeric=False, columns=None, **extra): "Create a read iterator" self.fin = fin self.hlen = hlen self.lastline = lastline self.numeric = numeric self.columns = columns def __iter__(self): '''Get iterator for class This class defines its own iterator. :Returns: val : iterator Self ''' return self def next(self): '''Go to the next non-comment line This method skips to next non-comment line, parses the line into a list of values and returns those values. It raises StopIteration when it is finished. :Returns: val : list List of values parsed from current line of the file ''' if self.lastline == "": while True: line = self.fin.readline() if line == "": self.fin.close() raise StopIteration line = line.strip() if line == "" or line[0] == 'C' or line[0] == '#': continue break else: line = self.lastline self.lastline = "" vals = line.split() if self.hlen != len(vals): try:val1, val2 = vals[-1].split('-') except: try:val1, val2, val3 = vals[-1].split('-') except: pass else: val1+=val2 vals[-1]=val1 vals.append(val3) else: vals[-1]=val1 vals.append(val2) if self.hlen != len(vals): raise format_utility.ParseFormatError, "Header length does not match values: "+str(self.hlen)+" != "+str(len(vals))+" --> "+str(vals) if self.columns is not None: vals = vals[self.columns] if self.numeric: return [format_utility.convert(v) for v in vals] return vals def read_header(filename, header=[], factory=namedtuple_factory, **extra): '''Parses the header on the first line of the Star file .. sourcecode:: py >>> import os >>> os.system("more data.par") C Align-Fspace parameter file C C PSI THETA PHI SHX SHY MAG FILM DF1 DF2 1 8.256 120.864 223.096 -2.512 2.705 50000. 8743 31776.1 31280.3 0.82 0.22 2 190.212 77.496 85.677 -1.536 6.947 50000. 8743 31776.1 31280.3 0.82 0.24 3 284.119 88.178 224.544 6.250 -1.101 50000. 8743 31776.1 31280.3 0.82 0.28 4 190.580 92.432 42.037 1.534 0.438 50000. 8743 31776.1 31280.3 0.82 0.31 5 275.692 269.766 288.658 1.054 2.737 50000. 8743 31776.1 31280.3 0.82 0.32 6 120.020 129.086 295.909 8.228 -2.400 50000. 8743 31776.1 31280.3 0.82 0.31 7 282.421 80.952 176.528 3.590 2.651 50000. 8743 31776.1 31280.3 0.82 0.31 8 63.624 295.406 120.192 6.332 -5.979 50000. 8743 31776.1 31280.3 0.82 0.29 >>> header = [] >>> fin = open("data.par", 'r') >>> factory, lastline = read_header(fin, header) >>> header ["_rlnImageName","_rlnDefocusU","_rlnDefocusV","_rlnDefocusAngle","_rlnVoltage","_rlnAmplitudeContrast","_rlnSphericalAberration"] :Parameters: filename : string or stream Input filename or stream header : list List of strings overriding parsed header factory : Factory Class or module that creates the container for the values returned by the parser extra : dict Unused keyword arguments :Returns: val : container Container with the given header values ''' fin = open(filename, 'r') if isinstance(filename, str) else filename #lastline = "" try: last = None while True: # Remove header comments line = fin.readline() if line == "": raise format_utility.ParseFormatError, "Not a Frealign file or empty" if line[0] != 'C': break last=line if last is None: raise ValueError, "Not frealign format" vals = line.split() try:id = int(vals[0]) except: id=-1 if len(vals) < 2 or id != 1: raise format_utility.ParseFormatError, "Not a valid PAR file" tmpheader = ['id', 'psi', 'theta', 'phi', 'shx', 'shy', 'mag', 'film', 'defocusu', 'defocusv', 'unk1', 'unk2'] #PSI THETA PHI SHX SHY MAG FILM DF1 DF2 tot = len(vals) if last is not None and len(last) > 0 and tot == (len(last.split()[1:])+1): tmpheader=['id']+last.split()[1:] if isinstance(header, dict): if len(header) == 0: raise ValueError, "Dictionary header cannot have zero elements" for key, val in header.iteritems(): tmpheader[val] = key elif len(header) == 0: header.extend(tmpheader) if tot != len(header): raise format_utility.ParseFormatError, "Header does not match the file: %s"%header if isinstance(filename, str): fin.close() return factory.create(header, **extra), header, line except: fin.close() raise else: fin.close() raise format_utility.ParseFormatError, "Cannot parse header of Star document file - end of document" def reader(filename, header=[], lastline="", **extra): '''Creates a Star read iterator .. sourcecode:: py >>> import os >>> os.system("more data.par") id,select,peak 1/1,1,0.00025182 1/2,1,0.00023578 >>> header = [] >>> fin = open("data.par", 'r') >>> factory, lastline = read_header(fin, header) >>> header ["id", "select", "peak"] >>> r = reader(fin, header, lastline) >>> map(factory, r) [ BasicTuple("1/1", 1, 0.00025182), BasicTuple("1/2", 1, 0.00023578) ] :Parameters: filename : string or stream Input filename or input stream header : list List of strings overriding parsed header lastline : string Last line read by header parser, first line to parse extra : dict Unused keyword arguments :Returns: val : iterator Star read iterator ''' fin = open(filename, 'r') if isinstance(filename, str) else filename return read_iterator(fin, len(header), lastline, **extra) ############################################################################################################ # Write format # ############################################################################################################ def write(filename, values, factory=namedtuple_factory, **extra): '''Write a metadata (Frealign) file .. sourcecode:: py >>> BasicTuple = namedtuple("BasicTuple", "_rlnImageName,_rlnClassNumber,_rlnDefocusU") >>> values = [ BasicTuple("1/1", 1, 0.00025182), BasicTuple("1/2", 1, 0.00023578) ] >>> write("data.par", values) >>> import os >>> os.system("more data.par") id,select,peak 1/1,1,0.00025182 1/2,1,0.00023578 :Parameters: filename : string or stream Output filename or stream values : container Value container such as a list or an ndarray factory : Factory Class or module that creates the container for the values returned by the parser extra : dict Unused keyword arguments ''' fout = open(filename, 'w') if isinstance(filename, str) else filename write_header(fout, values, factory, **extra) write_values(fout, values, factory, **extra) if isinstance(filename, str): fout.close() def write_header(fout, values, factory=namedtuple_factory, **extra): '''Write a comma separated value (Star) header .. sourcecode:: py >>> BasicTuple = namedtuple("BasicTuple", "id,select,peak") >>> values = [ BasicTuple("1/1", 1, 0.00025182), BasicTuple("1/2", 1, 0.00023578) ] >>> write_header("data.par", values) >>> import os >>> os.system("more data.par") data_images loop_ _rlnImageName _rlnDefocusU _rlnDefocusV _rlnDefocusAngle _rlnVoltage _rlnAmplitudeContrast _rlnSphericalAberration :Parameters: fout : string or stream Output filename or stream values : container Value container such as a list or an ndarray factory : Factory Class or module that creates the container for the values returned by the parser extra : dict Unused keyword arguments ''' header = factory.get_header(values, **extra) fout.write("C ") for h in header: fout.write(" "+h.rjust(11)) fout.write("\n") def write_values(fout, values, factory=namedtuple_factory, header=None, write_offset=1, **extra): '''Write comma separated value (Star) values .. sourcecode:: py >>> BasicTuple = namedtuple("BasicTuple", "id,select,peak") >>> values = [ BasicTuple("1/1", 1, 0.00025182), BasicTuple("1/2", 1, 0.00023578) ] >>> write_values("data.par", values) >>> import os >>> os.system("more data.par") 1/1,1,0.00025182 1/2,1,0.00023578 :Parameters: fout : string or stream Output fout or stream values : container Value container such as a list or an ndarray factory : Factory Class or module that creates the container for the values returned by the parser extra : dict Unused keyword arguments ''' if "float_format" in extra: del extra["float_format"] header = factory.get_header(values, header=header, offset=False, **extra) index = write_offset header = factory.get_header(values, header=header, offset=True, **extra) for v in values: vals = factory.get_values(v, header, float_format="%11g", **extra) fout.write("%d " % (index, )) fout.write(" ".join(vals)) fout.write("\n") index += 1 ############################################################################################################ # Extension and Filters # ############################################################################################################ def extension(): '''Get extension of Star format :Returns: val : string File extension - par ''' return "par" def filter(): '''Get filter of Star format :Returns: val : string File filter - Frealign (\*.par) ''' return "Frealign (*.par)"
gpl-2.0
-7,479,898,496,465,285,000
37.136476
212
0.514607
false
RetailMeNotSandbox/dartclient
tests/test_model_factory.py
1
1515
def test_create_datastore(model_factory, model_defaults): datastore = model_factory.create_datastore() assert_common_properties(datastore, model_defaults) def test_create_workflow(model_factory, model_defaults): workflow = model_factory.create_workflow() assert_common_properties(workflow, model_defaults) assert workflow.data.on_failure_email == model_defaults.get('on_failure_email') assert workflow.data.on_started_email == model_defaults.get('on_started_email') assert workflow.data.on_success_email == model_defaults.get('on_success_email') def test_create_action(model_factory, model_defaults): action = model_factory.create_action() assert_common_properties(action, model_defaults) assert action.data.on_failure_email == model_defaults.get('on_failure_email') assert action.data.on_success_email == model_defaults.get('on_success_email') def test_create_trigger(model_factory, model_defaults): trigger = model_factory.create_trigger() assert_common_properties(trigger, model_defaults) def test_create_dataset(model_factory, model_defaults): dataset = model_factory.create_dataset() assert_common_properties(dataset, model_defaults) assert dataset.data.data_format is not None def assert_common_properties(obj, model_defaults): assert obj.created is None assert obj.data is not None assert obj.id is None assert obj.updated is None assert obj.version_id is None assert obj.data.tags == model_defaults['tags']
mit
8,079,510,072,219,188,000
36.875
83
0.743894
false
joaduo/mepinta
core/python_core/mepinta/pipeline/hi/value_manager/ValueManager.py
1
4051
# -*- coding: utf-8 -*- ''' Mepinta Copyright (c) 2011-2012, Joaquin G. Duo, [email protected] This file is part of Mepinta. Mepinta is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Mepinta is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Mepinta. If not, see <http://www.gnu.org/licenses/>. ''' from mepinta.pipeline.hi.base import unwrap_decorator, \ unwrapLo from mepinta.plugins_manifest.proxy.data_model import GenericEnumProxy from mepinta.pipeline.hi.FactoryLo import FactoryLo from common.abstract.FrameworkBase import FrameworkBase # TODO: should be able to set ctypes structs. It will be implemented on C # + Ctypes. But not in this module class ValueManager(FrameworkBase): # TODO: rename to TypedValueManager def __post_init__(self): self._value_manager_lo = FactoryLo( self.context).getInstance('ValueManager', self.context) self._get_dispatch_dict = self.__getDispatchDict('get') self._set_dispatch_dict = self.__getDispatchDict('set') def getValue(self, pline, prop): if prop.data_type_name in self._get_dispatch_dict: return self._get_dispatch_dict[prop.data_type_name](pline, prop) else: self.log.warning('Unsupported getter for type %s' % prop.getDataTypeShortName()) def setValue(self, pline, prop, value): if isinstance(prop, GenericEnumProxy): self.__setGenericEnum(pline, prop, value) elif prop.data_type_name in self._set_dispatch_dict: self._set_dispatch_dict[prop.data_type_name](pline, prop, value) else: self.log.warning('Unsupported setter for type %s' % prop.getDataTypeShortName()) @unwrap_decorator def markChanged(self, pline, prop): self._value_manager_lo.markChangedProps(pline, [prop]) # TODO: distinguish mutiple from mono values def __getDispatchDict(self, prefix): dispatch_template = {'c.builtin.int': ('Int', int), 'c.builtin.double': ('Double', float), 'c.builtin.charp': ('Charp', str), 'cpp.std.string': ('StdString', str), } dispatch_dict = {} for data_type_name, (data_type_nick, type_cast) in dispatch_template.items(): method = getattr(self._value_manager_lo, '%s%ss' % (prefix, data_type_nick)) unwrap_and_cast = self._getUnwrapAndCast(prefix, method, type_cast) dispatch_dict[data_type_name] = unwrap_and_cast return dispatch_dict def _getUnwrapAndCast(self, prefix, method, type_cast): if prefix == 'set': def unwrapAndCast(pline, prop, value): method(unwrapLo(pline), [unwrapLo(prop)], [type_cast(value)]) elif prefix == 'get': def unwrapAndCast(pline, prop): values = method(unwrapLo(pline), [unwrapLo(prop)]) if len(values) > 0: return values[0] else: return None return unwrapAndCast def __setGenericEnum(self, pline, prop, value): if value in prop.enum_dict: value = prop.enum_dict[value] elif value not in prop.enum_dict.values(): self.log.w('Setting default value for generic enum.') value = prop.default_value self.__setInt(pline, prop, value) @unwrap_decorator def __setInt(self, pline, prop, int_): self._value_manager_lo.setInts(pline, [prop], [int(int_)])
gpl-3.0
5,921,391,486,611,870,000
41.197917
85
0.625278
false
mcs07/ChemDataExtractor
chemdataextractor/biblio/person.py
1
11216
# -*- coding: utf-8 -*- """ chemdataextractor.biblio.person ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tools for parsing people's names from strings into various name components. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re import string from ..text import QUOTES from ..text.latex import latex_to_unicode ORCID_RE = re.compile(r'^\d{4}-\d{4}-\d{4}-\d{4}$') TITLES = { 'ms', 'miss', 'mrs', 'mr', 'master', 'dr', 'doctor', 'prof', 'professor', 'sir', 'dame', 'madam', 'madame', 'mademoiselle', 'monsieur', 'lord', 'lady', 'rev', 'reverend', 'fr', 'father', 'brother', 'sister', 'pastor', 'cardinal', 'abbot', 'abbess', 'friar', 'mother', 'bishop', 'archbishop', 'priest', 'priestess', 'pope', 'vicar', 'chaplain', 'saint', 'deacon', 'archdeacon', 'rabbi', 'ayatollah', 'imam', 'pres', 'president', 'gov', 'governor', 'rep', 'representative', 'sen', 'senator', 'minister', 'chancellor', 'cllr', 'councillor', 'secretary', 'speaker', 'alderman', 'delegate', 'mayor', 'ambassador', 'prefect', 'premier', 'envoy', 'provost', 'coach', 'principal', 'king', 'queen', 'prince', 'princess', 'royal', 'majesty', 'highness', 'rt', 'duke', 'duchess', 'archduke', 'archduchess', 'marquis', 'marquess', 'marchioness', 'earl', 'count', 'countess', 'viscount', 'viscountess', 'baron', 'baroness', 'sheikh', 'emperor', 'empress', 'tsar', 'tsarina', 'uncle', 'auntie', 'aunt', 'atty', 'attorney', 'advocate', 'judge', 'solicitor', 'barrister', 'comptroller', 'sheriff', 'registrar', 'treasurer', 'associate', 'assistant', 'honorable', 'honourable', 'deputy', 'vice', 'executive', 'his', 'her', 'private', 'corporal', 'sargent', 'seargent', 'officer', 'major', 'captain', 'commander', 'lieutenant', 'colonel', 'general', 'chief', 'admiral', 'pilot', 'resident', 'surgeon', 'nurse', 'col', 'capt', 'cpt', 'maj', 'cpl', 'ltc', 'sgt', 'pfc', 'sfc', 'mg', 'bg', 'ssgt', 'ltcol', 'majgen', 'gen', 'ltgen', 'sgtmaj', 'bgen', 'lcpl', '2ndlt', '1stlt', 'briggen', '1stsgt', 'pvt', '2lt', '1lt', 'ens', 'lt', 'adm', 'vadm', 'cpo', 'mcpo', 'mcpoc', 'scpo', 'radm(lh)', 'radm(uh)', 'ltg' } PREFIXES = { 'abu', 'bon', 'bin', 'da', 'dal', 'de', 'del', 'der', 'de', 'di', 'dí', 'ibn', 'la', 'le', 'san', 'st', 'ste', 'van', 'vel', 'von' } SUFFIXES = { 'Esq', 'Esquire', 'Bt', 'Btss', 'Jr', 'Sr', '2', 'I', 'II', 'III', 'IV', 'V', 'CLU', 'ChFC', 'CFP', 'MP', 'MSP', 'MEP', 'AM', 'MLA', 'QC', 'KC', 'PC', 'SCJ', 'MHA', 'MNA', 'MPP', 'VC', 'GC', 'KBE', 'CBE', 'MBE', 'DBE', 'GBE', 'OBE', 'MD', 'PhD', 'DBEnv', 'DConstMgt', 'DREst', 'EdD', 'DPhil', 'DLitt', 'DSocSci', 'EngD', 'DD', 'LLD', 'DProf', 'BA', 'BSc', 'LLB', 'BEng', 'MBChB', 'MA', 'MSc', 'MSci', 'MPhil', 'MArch', 'MMORSE', 'MMath', 'MMathStat', 'MPharm', 'MSt', 'MRes', 'MEng', 'MChem', 'MSocSc', 'MMus', 'LLM', 'BCL', 'MPhys', 'MComp', 'MAcc', 'MFin', 'MBA', 'MPA', 'MEd', 'MEnt', 'MCGI', 'MGeol', 'MLitt', 'MEarthSc', 'MClinRes', 'MJur', 'FdA', 'FdSc', 'FdEng', 'PgD', 'PgDip', 'PgC', 'PgCert', 'DipHE', 'OND', 'CertHE', 'RA', 'FRCP', 'FRSC', 'FRSA', 'FRCS', 'FMedSci', 'AMSB', 'MSB', 'FSB', 'FBA', 'FBCS', 'FCPS', 'FGS', 'FREng', 'FRS', 'FRAeS', 'FRAI', 'FRAS', 'MRCP', 'MRCS', 'MRCA', 'FRCA', 'MRCGP', 'FRCGP', 'MRSC', 'MRPharmS', 'FRPharmS', 'FZS', 'FRES', 'CBiol', 'CChem', 'CEng', 'CMath', 'CPhys', 'CSci' } SUFFIXES_LOWER = {suf.lower() for suf in SUFFIXES} NOT_SUFFIX = {'I.', 'V.'} # Make attributes instead of dict style. # Parse from string as a class method. # Mutable attributes that can be set via constructor or modified at any time. # to_dict, to_json method? class PersonName(dict): """Class for parsing a person's name into its constituent parts. Parses a name string into title, firstname, middlename, nickname, prefix, lastname, suffix. Example usage: p = PersonName('von Beethoven, Ludwig') PersonName acts like a dict: print p print p['firstname'] print json.dumps(p) Name components can also be access as attributes: print p.lastname Instances can be reused by setting the name property: p.name = 'Henry Ford Jr. III' print p Two PersonName objects are equal if every name component matches exactly. For fuzzy matching, use the `could_be` method. This returns True for names that are not explicitly inconsistent. This class was written with the intention of parsing BibTeX author names, so name components enclosed within curly brackets will not be split. """ # Useful info at http://nwalsh.com/tex/texhelp/bibtx-23.html # Issues: # - Prefix 'ben' is recognised as middlename. Could distinguish 'ben' and 'Ben'? # - Multiple word first names like "Emma May" or "Billy Joe" aren't supported def __init__(self, fullname=None, from_bibtex=False): """Initialize with a name string. :param fullname: A person name as a string. """ super(PersonName, self).__init__() self._from_bibtex = from_bibtex self.fullname = fullname def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.fullname) def __str__(self): return dict.__repr__(self) def could_be(self, other): """Return True if the other PersonName is not explicitly inconsistent.""" # TODO: Some suffix and title differences should be allowed if type(other) is not type(self): return NotImplemented if self == other: return True for attr in ['title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix']: if attr not in self or attr not in other: continue puncmap = dict((ord(char), None) for char in string.punctuation) s = self[attr].lower().translate(puncmap) o = other[attr].lower().translate(puncmap) if s == o: continue if attr in {'firstname', 'middlename', 'lastname'}: if (({len(comp) for comp in s.split()} == {1} and [el[0] for el in o.split()] == s.split()) or ({len(comp) for comp in o.split()} == {1} and [el[0] for el in s.split()] == o.split())): continue return False return True @property def fullname(self): return self.get('fullname', '') @fullname.setter def fullname(self, fullname): self.clear() self._parse(fullname) def __getattr__(self, name): if name in {'title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix'}: return self.get(name) else: raise AttributeError def _is_title(self, t): """Return true if t is a title.""" return t.lower().replace('.', '') in TITLES def _is_prefix(self, t): """Return true if t is a prefix.""" return t.lower().replace('.', '') in PREFIXES def _is_suffix(self, t): """Return true if t is a suffix.""" return t not in NOT_SUFFIX and (t.replace('.', '') in SUFFIXES or t.replace('.', '') in SUFFIXES_LOWER) def _tokenize(self, comps): """Split name on spaces, unless inside curly brackets or quotes.""" ps = [] for comp in comps: ps.extend([c.strip(' ,') for c in re.split(r'\s+(?=[^{}]*(?:\{|$))', comp)]) return [p for p in ps if p] def _clean(self, t, capitalize=None): """Convert to normalized unicode and strip trailing full stops.""" if self._from_bibtex: t = latex_to_unicode(t, capitalize=capitalize) t = ' '.join([el.rstrip('.') if el.count('.') == 1 else el for el in t.split()]) return t def _strip(self, tokens, criteria, prop, rev=False): """Strip off contiguous tokens from the start or end of the list that meet the criteria.""" num = len(tokens) res = [] for i, token in enumerate(reversed(tokens) if rev else tokens): if criteria(token) and num > i + 1: res.insert(0, tokens.pop()) if rev else res.append(tokens.pop(0)) else: break if res: self[prop] = self._clean(' '.join(res)) return tokens def _parse(self, fullname): """Perform the parsing.""" n = ' '.join(fullname.split()).strip(',') if not n: return comps = [p.strip() for p in n.split(',')] if len(comps) > 1 and not all([self._is_suffix(comp) for comp in comps[1:]]): vlj = [] while True: vlj.append(comps.pop(0)) if not self._is_suffix(comps[0]): break ltokens = self._tokenize(vlj) ltokens = self._strip(ltokens, self._is_prefix, 'prefix') ltokens = self._strip(ltokens, self._is_suffix, 'suffix', True) self['lastname'] = self._clean(' '.join(ltokens), capitalize='name') tokens = self._tokenize(comps) tokens = self._strip(tokens, self._is_title, 'title') if not 'lastname' in self: tokens = self._strip(tokens, self._is_suffix, 'suffix', True) voni = [] end = len(tokens) - 1 if not 'prefix' in self: for i, token in enumerate(reversed(tokens)): if self._is_prefix(token): if (i == 0 and end > 0) or (not 'lastname' in self and not i == end): voni.append(end - i) else: if (i == 0 and 'lastname' in self) or voni: break if voni: if not 'lastname' in self: self['lastname'] = self._clean(' '.join(tokens[voni[0]+1:]), capitalize='name') self['prefix'] = self._clean(' '.join(tokens[voni[-1]:voni[0]+1])) tokens = tokens[:voni[-1]] else: if not 'lastname' in self: self['lastname'] = self._clean(tokens.pop(), capitalize='name') if tokens: self['firstname'] = self._clean(tokens.pop(0), capitalize='name') if tokens: nicki = [] for i, token in enumerate(tokens): if token[0] in QUOTES: for j, token2 in enumerate(tokens[i:]): if token2[-1] in QUOTES: nicki = range(i, i+j+1) break if nicki: self['nickname'] = self._clean(' '.join(tokens[nicki[0]:nicki[-1]+1]).strip(''.join(QUOTES)), capitalize='name') tokens[nicki[0]:nicki[-1]+1] = [] if tokens: self['middlename'] = self._clean(' '.join(tokens), capitalize='name') namelist = [] for attr in ['title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix']: if attr in self: namelist.append('"%s"' % self[attr] if attr == 'nickname' else self[attr]) self['fullname'] = ' '.join(namelist)
mit
7,306,425,480,414,132,000
42.301158
120
0.544539
false
Azure/azure-sdk-for-python
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2021_01_01/aio/operations/_tags_operations.py
1
27043
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class TagsOperations: """TagsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.resource.resources.v2021_01_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def delete_value( self, tag_name: str, tag_value: str, **kwargs: Any ) -> None: """Deletes a predefined tag value for a predefined tag name. This operation allows deleting a value from the list of predefined values for an existing predefined tag name. The value being deleted must not be in use as a tag value for the given tag name for any resource. :param tag_name: The name of the tag. :type tag_name: str :param tag_value: The value of the tag to delete. :type tag_value: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" # Construct URL url = self.delete_value.metadata['url'] # type: ignore path_format_arguments = { 'tagName': self._serialize.url("tag_name", tag_name, 'str'), 'tagValue': self._serialize.url("tag_value", tag_value, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore async def create_or_update_value( self, tag_name: str, tag_value: str, **kwargs: Any ) -> "_models.TagValue": """Creates a predefined value for a predefined tag name. This operation allows adding a value to the list of predefined values for an existing predefined tag name. A tag value can have a maximum of 256 characters. :param tag_name: The name of the tag. :type tag_name: str :param tag_value: The value of the tag to create. :type tag_value: str :keyword callable cls: A custom type or function that will be passed the direct response :return: TagValue, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.TagValue :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TagValue"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" # Construct URL url = self.create_or_update_value.metadata['url'] # type: ignore path_format_arguments = { 'tagName': self._serialize.url("tag_name", tag_name, 'str'), 'tagValue': self._serialize.url("tag_value", tag_value, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.put(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('TagValue', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('TagValue', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore async def create_or_update( self, tag_name: str, **kwargs: Any ) -> "_models.TagDetails": """Creates a predefined tag name. This operation allows adding a name to the list of predefined tag names for the given subscription. A tag name can have a maximum of 512 characters and is case-insensitive. Tag names cannot have the following prefixes which are reserved for Azure use: 'microsoft', 'azure', 'windows'. :param tag_name: The name of the tag to create. :type tag_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: TagDetails, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.TagDetails :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TagDetails"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'tagName': self._serialize.url("tag_name", tag_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.put(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('TagDetails', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('TagDetails', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore async def delete( self, tag_name: str, **kwargs: Any ) -> None: """Deletes a predefined tag name. This operation allows deleting a name from the list of predefined tag names for the given subscription. The name being deleted must not be in use as a tag name for any resource. All predefined values for the given name must have already been deleted. :param tag_name: The name of the tag. :type tag_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'tagName': self._serialize.url("tag_name", tag_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore def list( self, **kwargs: Any ) -> AsyncIterable["_models.TagsListResult"]: """Gets a summary of tag usage under the subscription. This operation performs a union of predefined tags, resource tags, resource group tags and subscription tags, and returns a summary of usage for each tag name and value under the given subscription. In case of a large number of tags, this operation may return a previously cached result. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either TagsListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2021_01_01.models.TagsListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('TagsListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames'} # type: ignore async def create_or_update_at_scope( self, scope: str, parameters: "_models.TagsResource", **kwargs: Any ) -> "_models.TagsResource": """Creates or updates the entire set of tags on a resource or subscription. This operation allows adding or replacing the entire set of tags on the specified resource or subscription. The specified entity can have a maximum of 50 tags. :param scope: The resource scope. :type scope: str :param parameters: :type parameters: ~azure.mgmt.resource.resources.v2021_01_01.models.TagsResource :keyword callable cls: A custom type or function that will be passed the direct response :return: TagsResource, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.TagsResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update_at_scope.metadata['url'] # type: ignore path_format_arguments = { 'scope': self._serialize.url("scope", scope, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsResource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('TagsResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update_at_scope.metadata = {'url': '/{scope}/providers/Microsoft.Resources/tags/default'} # type: ignore async def update_at_scope( self, scope: str, parameters: "_models.TagsPatchResource", **kwargs: Any ) -> "_models.TagsResource": """Selectively updates the set of tags on a resource or subscription. This operation allows replacing, merging or selectively deleting tags on the specified resource or subscription. The specified entity can have a maximum of 50 tags at the end of the operation. The 'replace' option replaces the entire set of existing tags with a new set. The 'merge' option allows adding tags with new names and updating the values of tags with existing names. The 'delete' option allows selectively deleting tags based on given names or name/value pairs. :param scope: The resource scope. :type scope: str :param parameters: :type parameters: ~azure.mgmt.resource.resources.v2021_01_01.models.TagsPatchResource :keyword callable cls: A custom type or function that will be passed the direct response :return: TagsResource, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.TagsResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_at_scope.metadata['url'] # type: ignore path_format_arguments = { 'scope': self._serialize.url("scope", scope, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsPatchResource') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('TagsResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_at_scope.metadata = {'url': '/{scope}/providers/Microsoft.Resources/tags/default'} # type: ignore async def get_at_scope( self, scope: str, **kwargs: Any ) -> "_models.TagsResource": """Gets the entire set of tags on a resource or subscription. Gets the entire set of tags on a resource or subscription. :param scope: The resource scope. :type scope: str :keyword callable cls: A custom type or function that will be passed the direct response :return: TagsResource, or the result of cls(response) :rtype: ~azure.mgmt.resource.resources.v2021_01_01.models.TagsResource :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsResource"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" # Construct URL url = self.get_at_scope.metadata['url'] # type: ignore path_format_arguments = { 'scope': self._serialize.url("scope", scope, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('TagsResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_at_scope.metadata = {'url': '/{scope}/providers/Microsoft.Resources/tags/default'} # type: ignore async def delete_at_scope( self, scope: str, **kwargs: Any ) -> None: """Deletes the entire set of tags on a resource or subscription. Deletes the entire set of tags on a resource or subscription. :param scope: The resource scope. :type scope: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-01-01" accept = "application/json" # Construct URL url = self.delete_at_scope.metadata['url'] # type: ignore path_format_arguments = { 'scope': self._serialize.url("scope", scope, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete_at_scope.metadata = {'url': '/{scope}/providers/Microsoft.Resources/tags/default'} # type: ignore
mit
2,788,179,830,713,370,600
44.526936
136
0.638908
false
fedspendingtransparency/data-act-broker
dataactbroker/scripts/setupEmails.py
1
4631
from dataactcore.models.userModel import EmailTemplateType from dataactbroker.handlers.userHandler import UserHandler from dataactbroker.handlers.interfaceHolder import InterfaceHolder def setupEmails(): """Create email templates from model metadata.""" userDb = UserHandler() # insert email template types typeList = [ ('validate_email', ''), ('account_approved', ''), ('account_rejected', ''), ('reset_password', ''), ('account_creation', ''), ('account_creation_user', '') ] for t in typeList: emailId = userDb.session.query( EmailTemplateType.email_template_type_id).filter( EmailTemplateType.name == t[0]).one_or_none() if not emailId: type = EmailTemplateType(name=t[0], description=t[1]) userDb.session.add(type) # insert email templates #Confirm template = "This email address was just used to create a user account with the DATA Act Broker. To continue the registration process, please click <a href='[URL]'>here</a>. The link will expire in 24 hours. <br /> <br /> If you did not initiate this process, you may disregard this email.<br /><br />The DATA Act Broker Helpdesk<br />[email protected] " userDb.loadEmailTemplate("DATA Act Broker - Registration", template, "validate_email") #Approve template = "Thank you for registering for a user account with the DATA Act Broker. Your request has been approved by the DATA Act Broker Help Desk. You may now log into the Data Broker portal, using the password you created at registration, by clicking <a href='[URL]'>here</a>.<br /><br /> If you have any questions, please contact the DATA Act Broker Help Desk at [EMAIL].<br /><br />DATA Act Broker Helpdesk<br />[email protected]" userDb.loadEmailTemplate("DATA Act Broker - Access Approved", template, "account_approved") #Reject template = "Thank you for requesting log-in credentials for the DATA Act Broker. Your attempt to register has been denied. If you believe this determination was made in error, please contact the DATA Act Broker Helpdesk at [email protected].<br /><br />DATA Act Broker Helpdesk<br />[email protected]" userDb.loadEmailTemplate("DATA Act Broker - Access Denied", template, "account_rejected") #Password Reset template = "You have requested your password to be reset for your account. Please click the following link <a href='[URL]'>here</a> to start the processs. The link will expire in 24 hours. <br/> <br/> If you did not request this password reset, please notify the DATA Act Broker Helpdesk ([email protected]) <br /><br />DATA Act Broker Helpdesk<br /><br />[email protected]" userDb.loadEmailTemplate("DATA Act Broker - Password Reset", template, "reset_password") #Admin Email template = "This email is to notify you that the following person has requested an account for the DATA Act Broker:<br /><br />Name: [REG_NAME]<br /><br />Title: [REG_TITLE]<br /><br />Agency: [REG_AGENCY]<br /><br />Email: [REG_EMAIL]<br /><br /><br /><br />To approve or deny this user for access to the Data Broker, please click <a href='[URL]'>here</a>.<br /><br />This action must be taken within 24 hours. <br /><br />Thank you for your prompt attention.<br /><br />DATA Act Broker Helpdesk<br />[email protected]" userDb.loadEmailTemplate("New Data Broker registration - Action Required", template, "account_creation") #User Email When finished submitting template = ("Thank you for registering a DATA Act Broker user account. " "The final registration step is for the Help Desk to review your " "request. You should receive an e-mail update from them within one " "business day, saying whether they've approved or denied your access." "<br /><br />" "Until the Help Desk approves your request, you won't be able to log " "into the Broker. Thanks for being patient with the security process--" "we appreciate your interest and look forward to working with you." "<br /><br/>" "If you have any questions or haven't received a follow-up e-mail " "within one business day, please get in touch with the Help Desk at " "[EMAIL]." "<br /><br />" "The DATA Act Implementation Team <br />" "[EMAIL]") userDb.loadEmailTemplate("DATA Act Broker - Registration", template, "account_creation_user") InterfaceHolder.closeOne(userDb) if __name__ == '__main__': setupEmails()
cc0-1.0
-6,575,477,788,770,621,000
66.115942
539
0.691211
false
odrolliv13/Hex-Photos
shop/views/new_account.py
1
3873
from django import forms from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.contrib.auth import authenticate, login from manager import models as pmod from . import templater from django.core.mail import send_mail, EmailMultiAlternatives import decimal, datetime, string, random def process_request(request): '''py that handles the functionality of creating a new account''' if request.user.is_authenticated(): return HttpResponseRedirect('/shop/') form = UserForm(initial ={ 'username': "", 'email': "", 'password': "", 'retypepassword': "", }) #when user submits form, a new user is created with the information provided #a random link is generated and stored in the user's db. if request.method == 'POST': form = UserForm(request.POST, request=request) if form.is_valid(): user = pmod.User() user.email = form.cleaned_data['email'] user.username = form.cleaned_data['username'] user.set_password(form.cleaned_data['password']) user.confirmed = False link = ''.join(random.choice(string.ascii_uppercase) for i in range(15)) date = datetime.datetime.now() + datetime.timedelta(hours=2) user.confirmedlink = link user.confirmeddate = date user.save() #this adds to the new user this credit card information for testing purposes. validcard = pmod.UserBilling() validcard.user = user validcard.name = "Cosmo Limesandal" visa = pmod.CardType.objects.get(name = "Visa") validcard.cardtype = visa validcard.number = "4732817300654" validcard.security = "411" validcard.expmonth = 10 validcard.expyear = 14 validcard.save() #the following lines generate an emial that is sent to the customer to confirm account html = "<html><body></body>Please click <a href=\"http://www.djuvo.com/shop/confirmed/" + str(user.id) +"/" + str(user.confirmedlink) + "\">here</a> to verify your account.<br>Thank you!<br>HexPhotos</html>" message = "/shop/confirmed/" + str(user.id) +"/" + str(user.confirmedlink) message = html msg = EmailMultiAlternatives('HexPhotos Confirmation Email', message, '[email protected]', [user.email]) msg.attach_alternative(html, "text/html") msg.send() #once the account is created, the user is being logged in loginuser = authenticate(username = form.cleaned_data['username'], password = form.cleaned_data['password']) login(request, loginuser) redirect = request.META.get("HTTP_REFERER") return HttpResponse('<script> window.location.href="' + redirect +'" </script>') tvars = { 'form': form, } return templater.render_to_response(request, 'new_account.html', tvars) class UserForm(forms.Form): username = forms.CharField(required=False, label='Username', widget=forms.TextInput(attrs={'class':'form-control'})) email = forms.EmailField(required=False, label='Email', widget=forms.TextInput(attrs={'class':'form-control'})) password = forms.CharField(required=False, label='Password', widget=forms.PasswordInput(attrs={'class':'form-control'})) retypepassword = forms.CharField(required=False, label='Confirm Password', widget=forms.PasswordInput(attrs={'class':'form-control'})) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) super(UserForm, self).__init__(*args, **kwargs) def clean(self): allUsers = pmod.User.objects.all() for u in allUsers: if self.cleaned_data['email'] == u.email: raise forms.ValidationError("That email is already in use.") if self.cleaned_data['password'] == "": raise forms.ValidationError("You must enter a password.") if self.cleaned_data['password'] != self.cleaned_data['retypepassword']: raise forms.ValidationError("The passwords do not match.") return self.cleaned_data
apache-2.0
344,836,642,478,137,600
41.033333
210
0.700749
false
shadowmint/nwidget
tests/nwidget/textbox_tests.py
1
1879
#!/usr/bin/env python from datetime import timedelta try: import bootstrap except: pass import unittest import nwidget from nwidget.helpers import * class Tests(PygletTestBase): def setup(self): return nwidget.Textbox(), nwidget.Assets() def shutdown(self): self.stop_pyglet() def test_can_create_instance(self): def update(): if self._tested: if self._elpased > timedelta(seconds=8): self.shutdown() def draw(): if self._tested: self._window.clear() for i in self.__widgets: i.draw() def runner(): self.__widgets = [] self.enable_blending() # Multiline label with edge wrapping i, a = self.setup() i.bounds(50, 110, 250, 210) i.text = "Hello World Thd sf s dfas df sdf dsf adf dsf dsf dsaf dsa fdsaf adsf adsf asdf asdf" i.font = a.resolve("data", "roboto.ttf") i.color = (255, 255, 0, 255) i.size = 10 i.panel = a.resolve("data", "textbox_panel1.png") i.panel_focus = a.resolve("data", "textbox_panel2.png") i.register(self._window) i0 = i self.__widgets.append(i) # Single line label i, a = self.setup() i.bounds(10, 250, 390, 330) i.text = "Hello World" i.font = a.resolve("data", "roboto.ttf") i.color = (30, 30, 30, 255) i.multiline = False i.size = 12 i.limit = 30 i.register(self._window) i.panel = a.resolve("data", "textbox_panel1.png") i.panel_focus = a.resolve("data", "textbox_panel2.png") i.on_change = "TEXT_CHANGE" i.padding = 20 i1 = i self.__widgets.append(i) def cb(code, widget): print("New text: %s" % widget.text) nwidget.listen("TEXT_CHANGE", cb) self.run_pyglet(runner, draw, update) if __name__ == "__main__": unittest.main()
apache-2.0
1,046,455,372,296,335,500
24.053333
100
0.575306
false