{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \" % nombrepeli\n return HttpResponse(resp)\n else:\n return render_to_response('pelicula.html', context_instance=RequestContext(request))\n\ndef peliculasView(request):\n return render_to_response('peliculas.html', context_instance=RequestContext(request))\n\ndef user(request, NombrePelicula):\n Nombreusuario = str(NombrePelicula)\n if NombrePelicula:\n resp = \"

Pelicula: %s

\" % (Nombreusuario)\n return HttpResponse(resp)\n else:\n resp = \"

Peliculas index%s

\" % (Nombreusuario)\n return HttpResponse(resp)\n\t\t\n#TODO get the movie or return none\n\"\"\"\ndef get_pelicula(arg)\n\"\"\""},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":186,"cells":{"__id__":{"kind":"number","value":3015067071912,"string":"3,015,067,071,912"},"blob_id":{"kind":"string","value":"8c3b7571b6aee8b98af4b981f89271e4d80c4c54"},"directory_id":{"kind":"string","value":"6f7fbf2c5d785c3db9c7f8949d8f5d6755de7462"},"path":{"kind":"string","value":"/sitepaths/sitemap.py"},"content_id":{"kind":"string","value":"91c89edb2307d7640a63c247dd72da0f45448ee4"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"StuartMacKay/django-sitepaths"},"repo_url":{"kind":"string","value":"https://github.com/StuartMacKay/django-sitepaths"},"snapshot_id":{"kind":"string","value":"4a9add281876da23e85b20ef8d5fc605e34a0ae7"},"revision_id":{"kind":"string","value":"f07fa810f0bbcc5e9f086d32e5930451e6acb64a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T23:52:59.074440","string":"2016-09-05T23:52:59.074440"},"revision_date":{"kind":"timestamp","value":"2012-10-14T09:01:01","string":"2012-10-14T09:01:01"},"committer_date":{"kind":"timestamp","value":"2012-10-14T09:01:01","string":"2012-10-14T09:01:01"},"github_id":{"kind":"number","value":6162196,"string":"6,162,196"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf import settings\nfrom django.contrib.sitemaps import Sitemap\n\nfrom .models import Sitepath\n\n\nclass SitepathSitemap(Sitemap):\n\n def items(self):\n items = {}\n\n objects = Sitepath.objects.filter(type='page', sitemap=True)\n\n if hasattr(settings, 'SITE_ID'):\n objects = objects.filter(site_id=settings.SITE_ID)\n\n for obj in objects:\n if not obj.referrer:\n if not obj.location in items:\n obj.items = []\n items[obj.location] = obj\n else:\n # TODO report duplicate location\n pass\n\n for obj in objects:\n if obj.referrer:\n if obj.referrer in items:\n items[obj.referrer].items.append(obj)\n else:\n # TODO report missing landing page\n pass\n\n return items.values()\n\n def changefreq(self, obj):\n return obj.frequency\n\n def priority(self, obj):\n return obj.priority\n\n def lastmod(self, obj):\n return obj.modified\n\n def location(self, obj):\n return obj.location\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":187,"cells":{"__id__":{"kind":"number","value":3375844330809,"string":"3,375,844,330,809"},"blob_id":{"kind":"string","value":"5917e3c0783bff374a59196c81c6d499a8aebdbb"},"directory_id":{"kind":"string","value":"364249d5c7e9af7a7fd5d6122d4489fa5250919c"},"path":{"kind":"string","value":"/salest/core/payments.py"},"content_id":{"kind":"string","value":"1180de5b629278dd0fc7407b56bc3e088c68bdcf"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-or-later"],"string":"[\n \"GPL-3.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"anvil8/salest"},"repo_url":{"kind":"string","value":"https://github.com/anvil8/salest"},"snapshot_id":{"kind":"string","value":"b67cbaee6edf4cdfae77bd31191f5bd05ace213b"},"revision_id":{"kind":"string","value":"a25b9ab5ff2fab309b5d8b85b4c46d0e60f71410"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-30T14:23:47.923820","string":"2020-05-30T14:23:47.923820"},"revision_date":{"kind":"timestamp","value":"2012-08-07T11:29:27","string":"2012-08-07T11:29:27"},"committer_date":{"kind":"timestamp","value":"2012-08-07T11:29:27","string":"2012-08-07T11:29:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\" This module should consists of Basic Payment Management functionality \"\"\"\n\n\nclass BasePaymentProcessorManager(object):\n \"\"\" This class should convert any order information to PaymentInfo instance\n that is a standart class for input data to any Payment processor.\n Then he should get the necessary processor and make payments using the\n selected payment processor.\"\"\"\n\n def __init__(self, order_info):\n \"\"\" init \"\"\"\n self.order_info = order_info\n prepared_data = self.get_data_dict(order_info)\n payment_info = PaymentInfo(prepared_data)\n self.payment_info = payment_info\n self.processor = self.get_processor(self.order_info)\n\n def run(self):\n \"\"\" run Payment process \"\"\"\n processor = self.processor\n processor.prepare_data(self.payment_info)\n return processor.process()\n\n def get_order_info(self, order_info):\n \"\"\" \"\"\"\n return order_info or self.order_info\n\n def get_processor(self, order_info):\n \"\"\" This method shound return necessary payment processor\"\"\"\n raise NotImplementedError('get_processor must be overrided according \\\n to Your data schema')\n\n def get_data_dict(self, order_info=None):\n \"\"\" This method should return dict that would be understandable by\n PaymentInfo and converted to it.\n Here is example of dict -\n {\n 'firts_name': 'Chris',\n 'last_name': 'Smith',\n 'phone': ''801-555-9242'',\n 'address': '123 Main Street',\n 'city': 'New York',\n 'state': 'NY',\n 'country': 'US',\n 'post_code': '12345',\n 'credit_type': 'VISA',\n 'credit_number': '4111111111111111',\n 'credit_ccv': '123',\n 'credit_expire_year': '2012',\n 'id': 12,\n 'order_cost': '1.00',\n 'order_description': 'Some stuff',\n }\n\n \"\"\"\n raise NotImplementedError('get_data_dict must be overrided according \\\n to Your data schema')\n\n\nclass PaymentInfo(object):\n \"\"\" This class should store information that should be input to payment\n processor and processed there. \"\"\"\n\n def __init__(self, data_dict):\n \"\"\" This method setup basic info of this PaymentInfo and should get\n such dict to setup necessary attributes -\n\n {\n# USER INFO\n 'firts_name': 'Chris',\n 'last_name': 'Smith',\n 'phone': ''801-555-9242'',\n# LOCATION INFO\n 'address': '123 Main Street',\n 'city': 'New York',\n 'state': 'NY',\n 'country': 'US',\n 'post_code': '12345',\n# CREDIT CARD INFO\n 'credit_type': 'VISA',\n 'credit_number': '4111111111111111',\n 'credit_ccv': '123',\n 'credit_expire_month': '10',\n 'credit_expire_year': '2012',\n# ORDER INFO\n 'id': 12,\n 'order_cost': '1.00',\n 'order_description': 'Some stuff',\n }\n Also all values of basic dict accessible as PaymentInfo instance\n attribute\n\n info = PaymentInfo({'x':1})\n info.data_dict.get('x') >> 1\n info.x >> 1\n\n \"\"\"\n self.data_dict = data_dict\n\n def __getattr__(self, name):\n \"\"\" This method redefined to get access to basic dict keys as\n PaymentInfo attributes. \"\"\"\n return self.data_dict[name]\n\n def get_full_name(self):\n \"\"\" This method returns card holder full name. \"\"\"\n return \"%s %s\" % (self.firts_name, self.last_name)\n\n\nclass ProcessorResult(object):\n \"\"\" Instance of this class should be returned from payment processor. \"\"\"\n\n def __init__(self, processor, success, message, payment=None):\n \"\"\"Initialize with:\n\n processor - the key of the processor setting the result\n success - boolean\n message - a lazy string label, such as _('OK)\n payment - an OrderPayment or OrderAuthorization\n \"\"\"\n self.success = success\n self.processor = processor\n self.message = message\n self.payment = payment\n\n def __unicode__(self):\n \"\"\" Unicode \"\"\"\n status = 'Success' if self.success else 'Failure'\n return u\"ProcessorResult: %s [%s] %s\" % (self.processor, status,\n self.message)\n\n def print_result(self):\n \"\"\" print results \"\"\"\n print {\n 'key': self.processor,\n \"status\": self.success,\n 'msg': self.message,\n 'payment': self.payment,\n }\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":188,"cells":{"__id__":{"kind":"number","value":11046655892996,"string":"11,046,655,892,996"},"blob_id":{"kind":"string","value":"88b9b843f1dc06e89315e5f23d2a035b7e72c132"},"directory_id":{"kind":"string","value":"2f0cac2d094ecb700c7c92a638eba417eaffe4de"},"path":{"kind":"string","value":"/mishapp_api/database.py"},"content_id":{"kind":"string","value":"9b70b8a3faa92925e31f5cdce747d3e3e1c7a852"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"gudeg-united/mishapp-api-hackathon"},"repo_url":{"kind":"string","value":"https://github.com/gudeg-united/mishapp-api-hackathon"},"snapshot_id":{"kind":"string","value":"decb3b69aa525335c705944f102bf9c7e1ac87c1"},"revision_id":{"kind":"string","value":"282c59173b41fdc24959f38a6b1699756ecf2780"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-16T01:01:38.534588","string":"2021-01-16T01:01:38.534588"},"revision_date":{"kind":"timestamp","value":"2014-12-07T20:27:50","string":"2014-12-07T20:27:50"},"committer_date":{"kind":"timestamp","value":"2014-12-07T20:27:50","string":"2014-12-07T20:27:50"},"github_id":{"kind":"number","value":33819419,"string":"33,819,419"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nfrom datetime import datetime\n\nfrom Pubnub import Pubnub\nfrom flask.ext.mongoengine import MongoEngine\nfrom flask import current_app\n\ndb = MongoEngine()\n\n\nclass Disaster(db.Document):\n source = db.StringField(required=True)\n source_id = db.StringField(required=True)\n type = db.StringField()\n properties = db.DictField()\n geometry = db.DictField()\n modified_at = db.DateTimeField(default=datetime.utcnow)\n\n meta = {\n \"indexes\": [[(\"geometry\", \"2dsphere\")]],\n }\n\n def asdict(self):\n return {\n \"id\": \"{}\".format(self[\"id\"]),\n \"source\": self[\"source\"],\n \"source_id\": self[\"source_id\"],\n \"type\": self[\"type\"],\n \"properties\": self[\"properties\"],\n \"geometry\": self[\"geometry\"],\n \"modified_at\": self[\"modified_at\"].isoformat(),\n }\n\n @classmethod\n def create_unique(cls, **fields):\n # TODO: use upsert!\n if not cls.objects(source=fields[\"source\"], source_id=fields[\"id\"]).count(): # noqa\n disaster = Disaster()\n disaster.source = fields[\"source\"]\n disaster.source_id = fields[\"id\"]\n disaster.type = \"Feature\"\n disaster.properties = fields[\"properties\"]\n disaster.geometry = fields[\"geometry\"]\n disaster.save()\n return disaster\n\n @classmethod\n def post_save(cls, sender, document, **kwargs):\n cfg = current_app.config\n pubnub = Pubnub(\n publish_key=cfg[\"PUBNUB_PUB_KEY\"],\n subscribe_key=cfg[\"PUBNUB_SUB_KEY\"],\n ssl_on=False,\n )\n pubnub.publish(channel=\"globaldisaster\",\n message=json.dumps(document.asdict()))\n\n\nfrom mongoengine import signals\n\nsignals.post_save.connect(Disaster.post_save, sender=Disaster)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":189,"cells":{"__id__":{"kind":"number","value":7679401531684,"string":"7,679,401,531,684"},"blob_id":{"kind":"string","value":"ca5a6006497000055ba56de949250ab44b8c39d8"},"directory_id":{"kind":"string","value":"362b2669b19c4f9f8817a2e13f186f66cd47c96a"},"path":{"kind":"string","value":"/src/py/models/ideal.py"},"content_id":{"kind":"string","value":"62d8dcc3ec7f3a4ba4d07c3a89e38005534d4a29"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"andreaspetrovic/mrifit"},"repo_url":{"kind":"string","value":"https://github.com/andreaspetrovic/mrifit"},"snapshot_id":{"kind":"string","value":"812c029ae41119089cfeb6d2d51cd8300fc7ce6d"},"revision_id":{"kind":"string","value":"91f8f6579c1bf6663991cb20a7782e5fb9c39a37"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-31T07:35:13.077737","string":"2020-05-31T07:35:13.077737"},"revision_date":{"kind":"timestamp","value":"2014-07-24T14:51:56","string":"2014-07-24T14:51:56"},"committer_date":{"kind":"timestamp","value":"2014-07-24T14:51:56","string":"2014-07-24T14:51:56"},"github_id":{"kind":"number","value":25912320,"string":"25,912,320"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n##@package ideal\n# Functionality to perform IDEAL fat water separation\n#\n# Implemented according to S. B. Reeder et al., Multicoil Dixon Chemical Species Separation With an\n# Iterative Least-Squares Estimation Method, Magn Reson Med, 51:35-45, (2004)\n\nimport numpy as np\nimport numpy.ma as ma\nimport sys;\nimport numpy.random as ra\n\n\n# ----------------------------------------------------------------------------\n## Model for IDEAL fat-water separation\n# Has functions to compute the system matrices A and B\n# as well as intermediate least squares result u\n# FIXXXME: remove echo time length hardcoded (matrix reshaping)\nclass IDEAL:\n ## Constructor\n #@param data numpy array holding sample points of complex fat water signal\n #@param te numpy array of sampling times (echo times)\n def __init__(self, data, te):\n self.te = te;\n self.omega = 0;\n self.data = data;\n self.temp = 0;\n \n # fat offset frequency\n freq = 460;\n self.freq = 460;\n \n # fat coefficients\n self.c1n = np.cos(2*np.pi*freq*te);\n self.d1n = np.sin(2*np.pi*freq*te);\n \n # water coefficients\n self.c2n = np.ones(len(time));\n self.d2n = np.zeros(len(time));\n \n self.fresh = 0;\n self.deltaomega = 0;\n \n # vector to hold results of first least squares solution\n self.u = np.zeros(4);\n \n # data prepared as vector for least squares optimization\n self.signalvec = np.concatenate((signal.real, signal.imag),axis=0);\n \n ## Returns Least Squares Matrix A\n #@return 4x6 numpy matrix for first least squares step\n def getA(self):\n A = np.concatenate((self.c1n, self.d1n, -self.d1n, self.c1n, self.c2n, self.d2n, -self.d2n, self.c2n),axis=0)\n A.shape = (4,6);\n A = A.T;\n return A;\n \n ## Returns Least Squares Matrix B\n #@return 5x6 numpy matrix for second least squares step\n def getB(self):\n gr = 2*np.pi*self.te*(-self.u[0]*self.d1n - self.u[1]*self.c1n - self.u[2]*self.d2n - self.u[3]*self.c2n)\n gi = 2*np.pi*self.te*( self.u[0]*self.c1n - self.u[1]*self.d1n + self.u[2]*self.c2n - self.u[3]*self.d2n) \n \n C = np.concatenate((gr, gi, self.c1n, self.d1n, -self.d1n, self.c1n, self.c2n, self.d2n, -self.d2n, self.c2n),axis=0)\n C.shape = (5,6);\n C = C.T;\n return C;\n \n ## Returns delta omega of last iteration\n #@return delta omega\n def getDeltaOmega(self):\n return self.deltaomega;\n \n ## Sets the intermediate result u, real and imaginary parts of fat and water signal\n #@param u numpy array (1x4)\n def setU(self, u):\n self.u = u;\n self.fresh = 1;\n \n ## Update field map at end of iteration\n #@param v numpy array (1x5)\n def updateOmega(self, v):\n self.omega += v[0];\n self.deltaomega = v[0];\n omegafact = np.exp(2*np.pi*1j*self.omega*self.te);\n dummyr = v[1]*self.c1n - v[2]*self.d1n + v[3]*self.c2n - v[4]*self.d2n;\n dummyi = v[1]*self.d1n + v[2]*self.c1n + v[3]*self.d2n + v[4]*self.c2n;\n #self.data += dummyr + 1j*dummyi;\n self.data = self.data / omegafact;\n self.signalvec = np.concatenate((self.data.real, self.data.imag),axis=0);\n \n def getSignalVector(self):\n return self.signalvec;\n \n def getSignalVector2(self):\n dummyr =np.array(self.u[0]*self.c1n-self.u[1]*self.d1n+self.u[2]*self.c2n - self.u[3]*self.d2n);\n dummyi =np.array(self.u[0]*self.d1n+self.u[1]*self.c1n+self.u[2]*self.d2n + self.u[3]*self.c2n);\n return self.signalvec - np.concatenate((dummyr, dummyi));\n \n\n# ----------------------------------------------------------------------------\n## Class for least squares optimization of IDEAL data\nclass IDEALOptimizer:\n ## Constructor\n #@param idealmodel IDEAL model object\n def __init__(self, idealmodel):\n self.ideal = idealmodel;\n self.iterationcount = 0;\n \n ## Function to start least squares optimization \n def optimize(self):\n \n while (ideal.fresh == 0 or ideal.getDeltaOmega() > 0.6):\n \n A = self.ideal.getA();\n data = self.ideal.getSignalVector();\n u = np.linalg.lstsq(A, data)[0];\n \n self.ideal.setU(u);\n \n B = self.ideal.getB()\n data = self.ideal.getSignalVector2();\n v = np.linalg.lstsq(B,data)[0];\n \n v[1:len(v)] += u;\n \n self.ideal.updateOmega(v);\n self.iterationcount += 1;\n \n self.result = v[1:len(v)];\n self.omega = self.ideal.omega;\n\n \n \n\n# ----------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n \n # create test signal ==========================================\n time = np.array([1, 3, 5])*0.001\n print time\n freq = 460;\n omega = 2*np.pi*460;\n fat = 23.4*np.exp(1j*time*omega)\n water = 10.05*np.ones(len(time));\n signal = (fat + water)*np.exp(1j*2*np.pi*10*time);\n print(signal)\n signal += (ra.random(3) + ra.random(3)*1j) *0.0;\n \n # reconstruction ============================================= \n ideal = IDEAL(signal, time);\n\n while (ideal.fresh == 0 or ideal.getDeltaOmega() > 0.6):\n A = ideal.getA();\n data = ideal.getSignalVector();\n u = np.linalg.lstsq(A, data)[0];\n \n ideal.setU(u);\n \n B = ideal.getB()\n data = ideal.getSignalVector2();\n v = np.linalg.lstsq(B,data)[0];\n \n v[1:len(v)] += u;\n \n ideal.updateOmega(v);\n \n # Print results ================================================\n np.set_printoptions(suppress=True,precision=2)\n print \"RESULT:\"\n print \"coefficients: %s\" % np.array_str(v)\n print \"omega: %s\" % np.array_str(ideal.omega)\n \n ideal = IDEAL(signal, time);\n optim = IDEALOptimizer(ideal);\n optim.optimize();\n \n print optim.result\n print optim.omega\n \n \n \n \n \n \n \n \n ##############################################################\n ##############################################################\n sys.exit();\n \n b= [];\n a = np.array([1.0, 1.0, 100.0, 1.0, 200.0, 3.0, 400.0, 30.0])\n a.shape = (2,4)\n b = ma.array(a, mask = [a[0] <= 3, a[0] < 3])\n print a[0]\n print a\n print b\n print a > 3.0\n print b.shape\n \n res = ma.array(np.zeros(b.shape),mask = b.mask)\n it = np.nditer([b[~b.mask], None])\n print \"Bmask:\"\n print b[~b.mask]\n \n count = 0\n for i,o in it:\n print i\n o[...] = i * 2;\n count +=1\n\n res[~res.mask] = it.operands[1];\n print res\n print b\n \n "},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":190,"cells":{"__id__":{"kind":"number","value":19378892473393,"string":"19,378,892,473,393"},"blob_id":{"kind":"string","value":"52faa4eaaf391d0d86547167ada0c06831819905"},"directory_id":{"kind":"string","value":"1f61f009b4b70cfd8dc05ea50a4238d8e236c259"},"path":{"kind":"string","value":"/EasyMerge/merger/type3_dealer.py"},"content_id":{"kind":"string","value":"0bd870f3fa05a602ed9f67c72f54f38057b85bef"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"h2oloopan/easymerge"},"repo_url":{"kind":"string","value":"https://github.com/h2oloopan/easymerge"},"snapshot_id":{"kind":"string","value":"3e3fe651bdd4bc13ba81cf2bd339108c2e79cc67"},"revision_id":{"kind":"string","value":"980307bfc7da40dd1bb752371b000f362edb2dda"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T20:17:15.301839","string":"2021-01-01T20:17:15.301839"},"revision_date":{"kind":"timestamp","value":"2014-03-26T15:37:21","string":"2014-03-26T15:37:21"},"committer_date":{"kind":"timestamp","value":"2014-03-26T15:37:21","string":"2014-03-26T15:37:21"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"'''\r\nCreated on Mar 15, 2014\r\n\r\n@author: h7qin\r\n'''\r\n\"Usage: unparse.py \"\r\nimport sys\r\nimport ast\r\nimport cStringIO\r\nimport os\r\n\r\n# Large float and imaginary literals get turned into infinities in the AST.\r\n# We unparse those infinities to INFSTR.\r\nINFSTR = \"1e\" + repr(sys.float_info.max_10_exp + 1)\r\nbuild_in_name = [\"abs\", \"divmod\", \"input\", \"open\", \"staticmethod\", \"all\", \"enumerate\", \"int\", \"ord\", \"str\", \"any\", \"eval\", \"isinstance\", \"pow\", \"sum\", \"basestring\", \"execfile\", \"issubclass\", \"print\", \"super\", \"bin\", \"file\", \"iter\", \"property\", \"tuple\", \"bool\", \"filter\", \"len\", \"range\", \"type\", \"bytearray\", \"float\", \"list\", \"raw_input\", \"unichr\", \"callable\", \"format\", \"locals\", \"reduce\", \"unicode\", \"chr\", \"frozenset\", \"long\", \"reload\", \"vars\", \"classmethod\", \"getattr\", \"map\", \"repr\", \"xrange\", \"cmp\", \"globals\", \"max\", \"reversed\", \"zip\", \"compile\", \"hasattr\", \"memoryview\", \"round\", \"__import__\", \"complex\", \"hash\", \"min\", \"set\", \"apply\", \"delattr\", \"help\", \"next\", \"setattr\", \"buffer\", \"dict\", \"hex\", \"object\", \"slice\", \"coerce\", \"dir\", \"id\", \"oct\", \"sorted\", \"intern\", \"None\", \"False\", \"True\"]\r\n\r\ndef interleave(inter, f, seq):\r\n \"\"\"Call f on each item in seq, calling inter() in between.\r\n \"\"\"\r\n seq = iter(seq)\r\n try:\r\n f(next(seq))\r\n except StopIteration:\r\n pass\r\n else:\r\n for x in seq:\r\n inter()\r\n f(x)\r\n \r\nclass Code:\r\n s = \"\"\r\n def __init__(self, id):\r\n self.id = id\r\n def write(self, s):\r\n self.s+=s\r\n def flush(self):\r\n pass\r\n def split(self):\r\n self.code_lines = self.s.splitlines()\r\n #self.code_lines = filter(lambda a: a != \"\", self.code_lines)\r\n while self.code_lines[0]==\"\":\r\n self.code_lines = self.code_lines[1:]\r\n \r\n def generate_var_set(self, vars):\r\n self.orig_vars = vars\r\n self.var_set = []\r\n for i in range(len(vars)):\r\n self.var_set.append(i)\r\n var = vars[i]\r\n for j in range(0,i):\r\n cur_var = vars[j]\r\n if cur_var==var:\r\n self.var_set[-1] = j\r\n break\r\n #print self.var_set\r\n \r\n def add_parameter(self, vars, bins):\r\n \r\n self.generate_var_set(vars)\r\n self.orig_bins = bins\r\n \r\n param = [] \r\n \r\n def merge_vars(vars):\r\n var_dict = {}\r\n for i in vars:\r\n var_dict[i]=1\r\n return var_dict \r\n\r\n self.vars = merge_vars(vars)\r\n if \"self\" in self.vars:\r\n param.append(\"self\") \r\n for i in self.vars:\r\n if i==\"self\":\r\n continue\r\n if not i.startswith(\"new@\"):\r\n param.append(i)\r\n \r\n self.param = param\r\n\r\n def write_head_line(self,vars, bins):\r\n self.add_parameter(vars, bins)\r\n func_name = \"helper\"+str(self.id)\r\n if len(self.param)>0:\r\n param = \"(\"\r\n for i in self.param:\r\n param += i+\", \"\r\n param = param[:-2]+\")\"\r\n else:\r\n param = \"()\"\r\n headline = \"def \"+func_name+param+\":\"\r\n for i in range(len(self.code_lines)):\r\n self.code_lines[i] = \" \"+self.code_lines[i]\r\n self.code_lines = [headline]+self.code_lines\r\n \r\n def rewrite_head_line(self, new_param):\r\n func_name = \"helper\"+str(self.id)\r\n if len(new_param)>0:\r\n param = \"(\"\r\n for i in new_param:\r\n param += (i+\", \")\r\n param = param[:-2]+\")\"\r\n else:\r\n param = \"()\"\r\n headline = \"def \"+func_name+param+\":\"\r\n self.code_lines[0] = headline\r\n \r\n def add_rets(self, return_vars):\r\n def merge_vars(vars):\r\n var_dict = {}\r\n for i in vars:\r\n var_dict[i]=1\r\n return var_dict\r\n ret_vars = []\r\n for i in merge_vars(return_vars):\r\n ret_vars.append(i)\r\n self.return_vars = ret_vars\r\n \r\n \r\n def write_return_line(self,return_vars):\r\n \r\n self.add_rets(return_vars)\r\n \r\n ret_line = \" return (\"\r\n if len(self.return_vars)>1:\r\n for i in self.return_vars:\r\n ret_line+=(i+\", \")\r\n ret_line = ret_line[:-2]+\")\"\r\n elif len(self.return_vars)==1:\r\n ret_line = ret_line[:-1]+self.return_vars[0]\r\n else:\r\n ret_line = ret_line[:-2]\r\n self.code_lines.append(ret_line)\r\n \r\n self.receiver = ret_line.strip()[6:].strip()\r\n \r\n def rewrite_return_line(self, new_ret):\r\n\r\n ret_line = \" return (\"\r\n if len(new_ret)>1:\r\n for i in new_ret:\r\n ret_line+=(i+\", \")\r\n ret_line = ret_line[:-2]+\")\"\r\n elif len(new_ret)==1:\r\n ret_line = ret_line[:-1]+new_ret[0]\r\n else:\r\n ret_line = ret_line[:-2]\r\n self.code_lines[-1] = ret_line\r\n \r\n\r\n def get_caller(self):\r\n \r\n func_name = \"helper\"+str(self.id)\r\n \r\n s2 = \"\"\r\n if self.receiver:\r\n s2 += self.receiver+\" = \"\r\n s2 += self.code_lines[0][4:-1]\r\n\r\n self.caller = s2\r\n \r\n def reget_caller(self, new_caller, new_receiver):\r\n \r\n func_name = \"helper\"+str(self.id)\r\n \r\n if len(new_caller)>0:\r\n paramline = \"(\"\r\n for i in new_caller:\r\n paramline += i+\", \"\r\n paramline = paramline[:-2]+\")\"\r\n else:\r\n paramline = \"()\"\r\n \r\n ret_line = \"(\"\r\n if len(new_receiver)>1:\r\n for i in new_receiver:\r\n ret_line+=(i+\", \")\r\n ret_line = ret_line[:-2]+\")\"\r\n elif len(new_receiver)==1:\r\n ret_line = ret_line[:-1]+new_receiver[0]\r\n else:\r\n ret_line = \"\"\r\n \r\n s2 = \"\"\r\n if ret_line!=\"\":\r\n s2 += ret_line+\" = \"\r\n s2 += func_name+paramline\r\n\r\n self.caller = s2\r\n \r\n def rewrite_code(self, new_vars, diff_bins):\r\n for j in range(len(new_vars)):\r\n for i in range(1,len(self.code_lines)-1):\r\n line = self.code_lines[i]\r\n tag = self.orig_vars[j]\r\n if tag.startswith(\"new@\"):\r\n tag = tag[4:]\r\n tag+=(\"@\"+str(j))\r\n if line.find(tag)>=0:\r\n line = line.replace(tag,new_vars[j])\r\n break\r\n self.code_lines[i] = line\r\n for j in range(len(self.orig_bins)):\r\n for i in range(1,len(self.code_lines)-1):\r\n line = self.code_lines[i]\r\n tag = self.orig_bins[j]\r\n tag+=(\"@bin\"+str(j))\r\n new_tag = self.orig_bins[j]\r\n if j in diff_bins:\r\n id = diff_bins.index(j)\r\n new_tag = \"diff_params[\"+str(id)+\"]\"\r\n if line.find(tag)>=0:\r\n line = line.replace(tag,new_tag)\r\n break\r\n self.code_lines[i] = line\r\n \r\n \r\n def get_code(self):\r\n s = \"\"\r\n for i in self.code_lines:\r\n s+=i+\"\\n\"\r\n return s\r\n \r\n def output(self,file = sys.stdout):\r\n self.f = file\r\n self.f.write(\"======================\\n\")\r\n self.f.write(self.caller+\"\\n\")\r\n for i in self.code_lines:\r\n self.f.write(i+\"\\n\")\r\n self.f.flush()\r\n\r\nclass Unparser:\r\n \"\"\"Methods in this class recursively traverse an AST and\r\n output source code for the abstract syntax; original formatting\r\n is disregarded. \"\"\"\r\n\r\n def __init__(self, tree, lines, src_ast, file = sys.stdout):\r\n \"\"\"Unparser(tree, file=sys.stdout) -> None.\r\n Print the source for tree to file.\"\"\"\r\n self.calls = self.crop_calls(lines, src_ast)\r\n self.variable, self.return_vars = self.crop_vars(lines, src_ast)\r\n self.mod_calls = []\r\n self.functions = src_ast.functions\r\n self.classes = src_ast.classes\r\n self.lines = lines\r\n self.cur_call = -1 \r\n self.incall = False\r\n self.cur_str = \"\"\r\n self.ret_str = False\r\n self.top_level = True\r\n self.args = []\r\n \r\n self.all_name = []\r\n self.bins = []\r\n self.vars = []\r\n self.is_func_name = False \r\n \r\n self.f = file\r\n \r\n #for i in src_ast.raw_scope:\r\n # print i\r\n \r\n '''for i in self.calls:\r\n self.f.write(\"=======================\\n\")\r\n self.f.write(\"Name:\"+str(i.name)+\"\\n\")\r\n self.f.write(\"line:\"+str(i.line)+\"\\n\")\r\n self.f.write(\"scope:\"+str(i.scope)+\"\\n\")\r\n self.f.write(\"source:\"+str(i.source)+\"\\n\")\r\n self.f.write(\"tree:\"+str(i.tree)+\"\\n\") '''\r\n \r\n self.f = file\r\n self.future_imports = []\r\n self._indent = 0\r\n self.dispatch(tree)\r\n self.f.write(\"\\n\")\r\n self.f.flush()\r\n \r\n rm_ret_var = []\r\n for i in self.return_vars:\r\n if i in self.mod_calls:\r\n rm_ret_var.append(i)\r\n for i in rm_ret_var:\r\n self.return_vars.remove(i)\r\n \r\n \r\n #print \"mod_calls:\",self.mod_calls\r\n #print \"new_vars:\",self.vars\r\n '''for i in range(len(self.variable)):\r\n if self.variable[i].startswith(\"new@\"):\r\n self.variable[i]=\"\"'''\r\n #print \"old_vars:\",self.variable\r\n if self.vars!=self.variable:\r\n print \"ERROR\"\r\n #print \"return_vars:\",self.return_vars\r\n #print \"\"\r\n \r\n def crop_calls(self, lines, src_ast):\r\n calls = []\r\n for i in src_ast.calls:\r\n if i.linelines[1]:\r\n continue\r\n else:\r\n calls.append(i)\r\n return calls\r\n \r\n def crop_vars(self, lines, src_ast):\r\n \r\n #for i in src_ast.functions:\r\n # print i.name, i.env\r\n\r\n new_imports = []\r\n for i in src_ast.raw_imports:\r\n new_imports.append(i[0])\r\n if i[1]:\r\n new_imports.append(i[1])\r\n \r\n vars = []\r\n return_vars = []\r\n for i in src_ast.var:\r\n if i[0]lines[1]:\r\n continue\r\n else:\r\n \r\n if i[1] in build_in_name:\r\n continue\r\n \r\n if i[1]==\"self\":\r\n vars.append(i[1])\r\n return_vars.append(i[1])\r\n continue\r\n \r\n if i[1] in new_imports:\r\n vars.append(i[1])\r\n continue\r\n \r\n is_class = False\r\n for j in src_ast.classes:\r\n if i[1]==j.name:\r\n vars.append(i[1])\r\n is_class = True\r\n break\r\n if is_class:\r\n continue\r\n \r\n env = [\"Global\", \"Function\"]\r\n is_func = False\r\n for j in src_ast.functions:\r\n if i[1]==j.name: \r\n if j.env[-1][0] in env:\r\n vars.append(i[1])\r\n is_func = True\r\n break\r\n if is_func:\r\n continue\r\n \r\n vars.append(\"new@\"+i[1])\r\n for j in src_ast.var:\r\n if j[1]==i[1] and j[0]lines[1]:\r\n #-1?\r\n if i[2]==-1:\r\n #print \"return global\",i\r\n return_vars.append(i[1])\r\n break\r\n scope_i = src_ast.raw_scope[i[2]]\r\n scope_j = src_ast.raw_scope[j[2]]\r\n if len(scope_j)>=len(scope_i) and scope_i==scope_j[:len(scope_i)]:\r\n #print \"return:\", i,j\r\n return_vars.append(i[1])\r\n break\r\n #print src_ast.raw_scope[i[2]]\r\n #print i\r\n #print vars\r\n return vars,return_vars\r\n \r\n def call_dealer(self,tree):\r\n #self.write(\"CALL_HERE\"+str(tree.lineno)+\",\"+str(tree.col_offset))\r\n \r\n def process_mod_call(call):\r\n self.is_func_name = True\r\n self.ret_str = True \r\n self.dispatch(tree.func)\r\n self.f.write(self.cur_str) \r\n self.ret_str = False\r\n self.is_func_name = False\r\n \r\n #print \"@\"+self.cur_str, \r\n #self.write(\"unreachable_method[\"+str(len(self.mod_calls))+\"]\")\r\n #self.write(\"$CALL:\"+str(call.source)+\"$\")\r\n if not self.cur_str in build_in_name:\r\n if self.cur_str.find(\".\")>0:\r\n self.mod_calls.append(self.cur_str[:self.cur_str.find(\".\")])\r\n else:\r\n self.mod_calls.append(self.cur_str)\r\n self.cur_str = \"\"\r\n \r\n \r\n self.cur_call+=1\r\n call = self.calls[self.cur_call]\r\n if isinstance(call.source, tuple):\r\n source = call.source\r\n else:\r\n source = (\"Unknown\", call.source)\r\n \r\n if source==(\"Unknown\",-1) or source==(\"member\",-1):\r\n return False\r\n else: #call import\r\n process_mod_call(call)\r\n return True\r\n\r\n def fill(self, text = \"\"):\r\n \"Indent a piece of text, according to the current indentation level\"\r\n self.f.write(\"\\n\"+\" \"*self._indent + text)\r\n\r\n\r\n def write(self, text):\r\n \"Append a piece of text to the current line.\"\r\n if not self.ret_str:\r\n self.f.write(text)\r\n else:\r\n self.cur_str+=(text)\r\n\r\n def enter(self):\r\n \"Print ':', and increase the indentation.\"\r\n self.write(\":\")\r\n self._indent += 1\r\n\r\n def leave(self):\r\n \"Decrease the indentation level.\"\r\n self._indent -= 1\r\n\r\n def dispatch(self, tree):\r\n \"Dispatcher function, dispatching tree type T to method _T.\"\r\n if isinstance(tree, list):\r\n for t in tree:\r\n self.dispatch(t)\r\n return\r\n meth = getattr(self, \"_\"+tree.__class__.__name__)\r\n meth(tree)\r\n\r\n\r\n ############### Unparsing methods ######################\r\n # There should be one method per concrete grammar type #\r\n # Constructors should be grouped by sum type. Ideally, #\r\n # this would follow the order in the grammar, but #\r\n # currently doesn't. #\r\n ########################################################\r\n\r\n def _Module(self, tree):\r\n for stmt in tree.body:\r\n self.dispatch(stmt)\r\n\r\n # stmt\r\n def _Expr(self, tree):\r\n self.fill()\r\n self.dispatch(tree.value)\r\n\r\n def _Import(self, t):\r\n self.fill(\"import \")\r\n interleave(lambda: self.write(\", \"), self.dispatch, t.names)\r\n\r\n def _ImportFrom(self, t):\r\n # A from __future__ import may affect unparsing, so record it.\r\n if t.module and t.module == '__future__':\r\n self.future_imports.extend(n.name for n in t.names)\r\n\r\n self.fill(\"from \")\r\n self.write(\".\" * t.level)\r\n if t.module:\r\n self.write(t.module)\r\n self.write(\" import \")\r\n interleave(lambda: self.write(\", \"), self.dispatch, t.names)\r\n\r\n def _Assign(self, t):\r\n self.fill()\r\n for target in t.targets:\r\n self.dispatch(target)\r\n self.write(\" = \")\r\n self.dispatch(t.value)\r\n\r\n def _AugAssign(self, t):\r\n self.fill()\r\n self.dispatch(t.target)\r\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\r\n self.dispatch(t.value)\r\n\r\n def _Return(self, t):\r\n self.fill(\"return\")\r\n if t.value:\r\n self.write(\" \")\r\n self.dispatch(t.value)\r\n\r\n def _Pass(self, t):\r\n self.fill(\"pass\")\r\n\r\n def _Break(self, t):\r\n self.fill(\"break\")\r\n\r\n def _Continue(self, t):\r\n self.fill(\"continue\")\r\n\r\n def _Delete(self, t):\r\n self.fill(\"del \")\r\n interleave(lambda: self.write(\", \"), self.dispatch, t.targets)\r\n\r\n def _Assert(self, t):\r\n self.fill(\"assert \")\r\n self.dispatch(t.test)\r\n if t.msg:\r\n self.write(\", \")\r\n self.dispatch(t.msg)\r\n\r\n def _Exec(self, t):\r\n self.fill(\"exec \")\r\n self.dispatch(t.body)\r\n if t.globals:\r\n self.write(\" in \")\r\n self.dispatch(t.globals)\r\n if t.locals:\r\n self.write(\", \")\r\n self.dispatch(t.locals)\r\n\r\n def _Print(self, t):\r\n self.fill(\"print \")\r\n do_comma = False\r\n if t.dest:\r\n self.write(\">>\")\r\n self.dispatch(t.dest)\r\n do_comma = True\r\n for e in t.values:\r\n if do_comma:self.write(\", \")\r\n else:do_comma=True\r\n self.dispatch(e)\r\n if not t.nl:\r\n self.write(\",\")\r\n\r\n def _Global(self, t):\r\n self.fill(\"global \")\r\n interleave(lambda: self.write(\", \"), self.write, t.names)\r\n\r\n def _Yield(self, t):\r\n self.write(\"(\")\r\n self.write(\"yield\")\r\n if t.value:\r\n self.write(\" \")\r\n self.dispatch(t.value)\r\n self.write(\")\")\r\n\r\n def _Raise(self, t):\r\n self.fill('raise ')\r\n if t.type:\r\n self.dispatch(t.type)\r\n if t.inst:\r\n self.write(\", \")\r\n self.dispatch(t.inst)\r\n if t.tback:\r\n self.write(\", \")\r\n self.dispatch(t.tback)\r\n\r\n def _TryExcept(self, t):\r\n self.fill(\"try\")\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n\r\n for ex in t.handlers:\r\n self.dispatch(ex)\r\n if t.orelse:\r\n self.fill(\"else\")\r\n self.enter()\r\n self.dispatch(t.orelse)\r\n self.leave()\r\n\r\n def _TryFinally(self, t):\r\n if len(t.body) == 1 and isinstance(t.body[0], ast.TryExcept):\r\n # try-except-finally\r\n self.dispatch(t.body)\r\n else:\r\n self.fill(\"try\")\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n\r\n self.fill(\"finally\")\r\n self.enter()\r\n self.dispatch(t.finalbody)\r\n self.leave()\r\n\r\n def _ExceptHandler(self, t):\r\n self.fill(\"except\")\r\n if t.type:\r\n self.write(\" \")\r\n self.dispatch(t.type)\r\n if t.name:\r\n self.write(\" as \")\r\n self.dispatch(t.name)\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n\r\n def _ClassDef(self, t):\r\n self.write(\"\\n\")\r\n for deco in t.decorator_list:\r\n self.fill(\"@\")\r\n self.dispatch(deco)\r\n self.fill(\"class \"+t.name)\r\n if t.bases:\r\n self.write(\"(\")\r\n for a in t.bases:\r\n self.dispatch(a)\r\n self.write(\", \")\r\n self.write(\")\")\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n\r\n def _FunctionDef(self, t):\r\n self.write(\"\\n\")\r\n for deco in t.decorator_list:\r\n self.fill(\"@\")\r\n self.dispatch(deco)\r\n self.fill(\"def \"+t.name + \"(\")\r\n self.dispatch(t.args)\r\n self.write(\")\")\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n\r\n def _For(self, t):\r\n self.fill(\"for \")\r\n self.dispatch(t.target)\r\n self.write(\" in \")\r\n self.dispatch(t.iter)\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n if t.orelse:\r\n self.fill(\"else\")\r\n self.enter()\r\n self.dispatch(t.orelse)\r\n self.leave()\r\n\r\n def _If(self, t):\r\n self.fill(\"if \")\r\n self.dispatch(t.test)\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n # collapse nested ifs into equivalent elifs.\r\n while (t.orelse and len(t.orelse) == 1 and\r\n isinstance(t.orelse[0], ast.If)):\r\n t = t.orelse[0]\r\n self.fill(\"elif \")\r\n self.dispatch(t.test)\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n # final else\r\n if t.orelse:\r\n self.fill(\"else\")\r\n self.enter()\r\n self.dispatch(t.orelse)\r\n self.leave()\r\n\r\n def _While(self, t):\r\n self.fill(\"while \")\r\n self.dispatch(t.test)\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n if t.orelse:\r\n self.fill(\"else\")\r\n self.enter()\r\n self.dispatch(t.orelse)\r\n self.leave()\r\n\r\n def _With(self, t):\r\n self.fill(\"with \")\r\n self.dispatch(t.context_expr)\r\n if t.optional_vars:\r\n self.write(\" as \")\r\n self.dispatch(t.optional_vars)\r\n self.enter()\r\n self.dispatch(t.body)\r\n self.leave()\r\n\r\n # expr\r\n def _Str(self, tree):\r\n # if from __future__ import unicode_literals is in effect,\r\n # then we want to output string literals using a 'b' prefix\r\n # and unicode literals with no prefix.\r\n if \"unicode_literals\" not in self.future_imports:\r\n self.write(repr(tree.s)+\"@bin\"+str(len(self.bins)))\r\n self.bins.append(repr(tree.s))\r\n elif isinstance(tree.s, str):\r\n self.write(\"b\" + repr(tree.s)+\"@bin\"+str(len(self.bins)))\r\n self.bins.append(\"b\" + repr(tree.s))\r\n elif isinstance(tree.s, unicode):\r\n self.write(repr(tree.s).lstrip(\"u\")+\"@bin\"+str(len(self.bins)))\r\n self.bins.append(repr(tree.s).lstrip(\"u\"))\r\n else:\r\n assert False, \"shouldn't get here\"\r\n\r\n def _Name(self, t):\r\n \r\n self.all_name.append(t.id)\r\n #if t.id==\"ui\":\r\n #print \"ui=======\",self.rec_var\r\n if t.id in build_in_name:\r\n self.write(t.id+\"@bin\"+str(len(self.bins)))\r\n self.bins.append(t.id) \r\n else:\r\n idx = len(self.vars)\r\n should_var = self.variable[idx]\r\n if should_var.startswith(\"new@\"):\r\n should_id = should_var[4:]\r\n if should_id!=t.id:\r\n self.write(t.id)\r\n else:\r\n self.vars.append(\"new@\"+t.id)\r\n self.write(t.id+\"@\"+str(idx))\r\n else:\r\n if t.id!=should_var:\r\n self.write(t.id)\r\n else:\r\n self.vars.append(t.id)\r\n self.write(t.id+\"@\"+str(idx))\r\n \r\n \r\n '''if not self.is_func_name: \r\n #print self.variable \r\n #print self.vars\r\n #print self.incall\r\n #print self.is_func_name\r\n #print t.id\r\n if t.id in build_in_name:\r\n self.write(t.id)\r\n elif self.variable[len(self.vars)]!=\"new\" :\r\n if t.id==self.variable[len(self.vars)]:\r\n self.vars.append(t.id)\r\n self.write(t.id)#+\"@VAR\")\r\n else:\r\n self.vars.append(\"\")\r\n self.write(t.id)\r\n else:\r\n #del self.variable[len(self.vars)]\r\n self.write(t.id)'''\r\n\r\n \r\n\r\n def _Repr(self, t):\r\n self.write(\"`\")\r\n self.dispatch(t.value)\r\n self.write(\"`\")\r\n\r\n def _Num(self, t):\r\n tmp_s = \"\"\r\n repr_n = repr(t.n)\r\n # Parenthesize negative numbers, to avoid turning (-1)**2 into -1**2.\r\n if repr_n.startswith(\"-\"):\r\n self.write(\"(\")\r\n tmp_s += \"(\"\r\n # Substitute overflowing decimal literal for AST infinities.\r\n self.write(repr_n.replace(\"inf\", INFSTR))\r\n tmp_s += (repr_n.replace(\"inf\", INFSTR))\r\n if repr_n.startswith(\"-\"):\r\n self.write(\")\")\r\n tmp_s += \")\"\r\n self.write(\"@bin\"+str(len(self.bins)))\r\n self.bins.append(tmp_s)\r\n\r\n def _List(self, t):\r\n self.write(\"[\")\r\n interleave(lambda: self.write(\", \"), self.dispatch, t.elts)\r\n self.write(\"]\")\r\n\r\n def _ListComp(self, t):\r\n self.write(\"[\")\r\n self.dispatch(t.elt)\r\n for gen in t.generators:\r\n self.dispatch(gen)\r\n self.write(\"]\")\r\n\r\n def _GeneratorExp(self, t):\r\n self.write(\"(\")\r\n self.dispatch(t.elt)\r\n for gen in t.generators:\r\n self.dispatch(gen)\r\n self.write(\")\")\r\n\r\n def _SetComp(self, t):\r\n self.write(\"{\")\r\n self.dispatch(t.elt)\r\n for gen in t.generators:\r\n self.dispatch(gen)\r\n self.write(\"}\")\r\n\r\n def _DictComp(self, t):\r\n self.write(\"{\")\r\n self.dispatch(t.key)\r\n self.write(\": \")\r\n self.dispatch(t.value)\r\n for gen in t.generators:\r\n self.dispatch(gen)\r\n self.write(\"}\")\r\n\r\n def _comprehension(self, t):\r\n self.write(\" for \")\r\n self.dispatch(t.target)\r\n self.write(\" in \")\r\n self.dispatch(t.iter)\r\n for if_clause in t.ifs:\r\n self.write(\" if \")\r\n self.dispatch(if_clause)\r\n\r\n def _IfExp(self, t):\r\n self.write(\"(\")\r\n self.dispatch(t.body)\r\n self.write(\" if \")\r\n self.dispatch(t.test)\r\n self.write(\" else \")\r\n self.dispatch(t.orelse)\r\n self.write(\")\")\r\n\r\n def _Set(self, t):\r\n assert(t.elts) # should be at least one element\r\n self.write(\"{\")\r\n interleave(lambda: self.write(\", \"), self.dispatch, t.elts)\r\n self.write(\"}\")\r\n\r\n def _Dict(self, t):\r\n self.write(\"{\")\r\n def write_pair(pair):\r\n (k, v) = pair\r\n self.dispatch(k)\r\n self.write(\": \")\r\n self.dispatch(v)\r\n interleave(lambda: self.write(\", \"), write_pair, zip(t.keys, t.values))\r\n self.write(\"}\")\r\n\r\n def _Tuple(self, t):\r\n self.write(\"(\")\r\n if len(t.elts) == 1:\r\n (elt,) = t.elts\r\n self.dispatch(elt)\r\n self.write(\",\")\r\n else:\r\n interleave(lambda: self.write(\", \"), self.dispatch, t.elts)\r\n self.write(\")\")\r\n\r\n unop = {\"Invert\":\"~\", \"Not\": \"not\", \"UAdd\":\"+\", \"USub\":\"-\"}\r\n def _UnaryOp(self, t):\r\n self.write(\"(\")\r\n self.write(self.unop[t.op.__class__.__name__])\r\n self.write(\" \")\r\n # If we're applying unary minus to a number, parenthesize the number.\r\n # This is necessary: -2147483648 is different from -(2147483648) on\r\n # a 32-bit machine (the first is an int, the second a long), and\r\n # -7j is different from -(7j). (The first has real part 0.0, the second\r\n # has real part -0.0.)\r\n if isinstance(t.op, ast.USub) and isinstance(t.operand, ast.Num):\r\n self.write(\"(\")\r\n self.dispatch(t.operand)\r\n self.write(\")\")\r\n else:\r\n self.dispatch(t.operand)\r\n self.write(\")\")\r\n\r\n binop = { \"Add\":\"+\", \"Sub\":\"-\", \"Mult\":\"*\", \"Div\":\"/\", \"Mod\":\"%\",\r\n \"LShift\":\"<<\", \"RShift\":\">>\", \"BitOr\":\"|\", \"BitXor\":\"^\", \"BitAnd\":\"&\",\r\n \"FloorDiv\":\"//\", \"Pow\": \"**\"}\r\n def _BinOp(self, t):\r\n self.write(\"(\")\r\n self.dispatch(t.left)\r\n self.write(\" \" + self.binop[t.op.__class__.__name__] + \" \")\r\n self.dispatch(t.right)\r\n self.write(\")\")\r\n\r\n cmpops = {\"Eq\":\"==\", \"NotEq\":\"!=\", \"Lt\":\"<\", \"LtE\":\"<=\", \"Gt\":\">\", \"GtE\":\">=\",\r\n \"Is\":\"is\", \"IsNot\":\"is not\", \"In\":\"in\", \"NotIn\":\"not in\"}\r\n def _Compare(self, t):\r\n self.write(\"(\")\r\n self.dispatch(t.left)\r\n for o, e in zip(t.ops, t.comparators):\r\n self.write(\" \" + self.cmpops[o.__class__.__name__] + \" \")\r\n self.dispatch(e)\r\n self.write(\")\")\r\n\r\n boolops = {ast.And: 'and', ast.Or: 'or'}\r\n def _BoolOp(self, t):\r\n self.write(\"(\")\r\n s = \" %s \" % self.boolops[t.op.__class__]\r\n interleave(lambda: self.write(s), self.dispatch, t.values)\r\n self.write(\")\")\r\n\r\n def _Attribute(self,t):\r\n self.dispatch(t.value)\r\n # Special case: 3.__abs__() is a syntax error, so if t.value\r\n # is an integer literal then we need to either parenthesize\r\n # it or add an extra space to get 3 .__abs__().\r\n if isinstance(t.value, ast.Num) and isinstance(t.value.n, int):\r\n self.write(\" \")\r\n self.write(\".\")\r\n self.write(t.attr)\r\n \r\n def _Call(self, t):\r\n mod = False\r\n #print \"F:=========\",t.func._fields\r\n \r\n cur_incall = self.incall\r\n\r\n if not self.incall:\r\n self.incall = True\r\n if self.call_dealer(t):\r\n mod = True \r\n \r\n if not mod: \r\n self.dispatch(t.func) \r\n '''else:\r\n self.is_func_name = True\r\n self.ret_str = True\r\n self.dispatch(t.func)\r\n self.ret_str = False\r\n self.mod_calls[-1] = self.cur_str\r\n self.cur_str = \"\"\r\n self.is_func_name = False'''\r\n \r\n self.write(\"(\")\r\n if not cur_incall:\r\n self.incall = False\r\n comma = False\r\n for e in t.args:\r\n if comma: self.write(\", \")\r\n else: comma = True\r\n self.dispatch(e)\r\n for e in t.keywords:\r\n if comma: self.write(\", \")\r\n else: comma = True\r\n self.dispatch(e)\r\n if t.starargs:\r\n if comma: self.write(\", \")\r\n else: comma = True\r\n self.write(\"*\")\r\n self.dispatch(t.starargs)\r\n if t.kwargs:\r\n if comma: self.write(\", \")\r\n else: comma = True\r\n self.write(\"**\")\r\n self.dispatch(t.kwargs)\r\n self.write(\")\")\r\n \r\n\r\n def _Subscript(self, t):\r\n self.dispatch(t.value)\r\n self.write(\"[\")\r\n self.dispatch(t.slice)\r\n self.write(\"]\")\r\n\r\n # slice\r\n def _Ellipsis(self, t):\r\n self.write(\"...\")\r\n\r\n def _Index(self, t):\r\n self.dispatch(t.value)\r\n\r\n def _Slice(self, t):\r\n if t.lower:\r\n self.dispatch(t.lower)\r\n self.write(\":\")\r\n if t.upper:\r\n self.dispatch(t.upper)\r\n if t.step:\r\n self.write(\":\")\r\n self.dispatch(t.step)\r\n\r\n def _ExtSlice(self, t):\r\n interleave(lambda: self.write(', '), self.dispatch, t.dims)\r\n\r\n # others\r\n def _arguments(self, t):\r\n first = True\r\n # normal arguments\r\n defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults\r\n for a,d in zip(t.args, defaults):\r\n if first:first = False\r\n else: self.write(\", \")\r\n if self.top_level:\r\n self.args.append(a.id)\r\n \r\n self.dispatch(a),\r\n if d:\r\n self.write(\"=\")\r\n self.dispatch(d)\r\n\r\n # varargs\r\n if t.vararg:\r\n if first:first = False\r\n else: self.write(\", \")\r\n if self.top_level:\r\n self.args.append(\"*\"+str(t.vararg))\r\n self.write(\"*\")\r\n self.write(t.vararg)\r\n\r\n # kwargs\r\n if t.kwarg:\r\n if first:first = False\r\n else: self.write(\", \")\r\n if self.top_level:\r\n self.args.append(\"**\"+str(t.kwarg))\r\n self.write(\"**\"+t.kwarg)\r\n \r\n self.top_level = False\r\n\r\n def _keyword(self, t):\r\n self.write(t.arg)\r\n self.write(\"=\")\r\n self.dispatch(t.value)\r\n\r\n def _Lambda(self, t):\r\n self.write(\"(\")\r\n self.write(\"lambda \")\r\n self.dispatch(t.args)\r\n self.write(\": \")\r\n self.dispatch(t.body)\r\n self.write(\")\")\r\n\r\n def _alias(self, t):\r\n self.write(t.name)\r\n if t.asname:\r\n self.write(\" as \"+t.asname)\r\n\r\ndef generateNewCode(id, source, lines, src_ast, tag, output=sys.stdout):\r\n tree = compile(source, tag[0], \"exec\", ast.PyCF_ONLY_AST)\r\n merged = Code(id)\r\n up = Unparser(tree, lines, src_ast, merged)\r\n merged.split() \r\n merged.write_head_line(up.vars, up.bins)\r\n merged.write_return_line(up.return_vars)\r\n merged.get_caller()\r\n #merged.output()\r\n #print up.vars\r\n return merged\r\n\r\ndef checkMergable(merged_list):\r\n mergable = True\r\n for merge in merged_list[1:]:\r\n if merge.var_set!=merged_list[0].var_set:\r\n mergable = False\r\n if len(merge.orig_bins) != len(merged_list[0].orig_bins):\r\n mergable = False\r\n \r\n if mergable:\r\n #print \"YES MERGABLE\"\r\n return True\r\n else:\r\n #print \"NOT MERGABLE\"\r\n return False\r\n \r\n\r\ndef generateCommonCode(merged_list):\r\n common_vars = []\r\n common_param = []\r\n common_ret = []\r\n new_caller = []\r\n new_receiver = []\r\n for i in merged_list:\r\n new_caller.append([])\r\n new_receiver.append([])\r\n \r\n def mergeNames(names):\r\n new_name = []\r\n for i in names:\r\n if i.startswith(\"new@\"):\r\n i = i[4:]\r\n if not i in new_name:\r\n new_name.append(i)\r\n if len(new_name)==1:\r\n return new_name[0]\r\n else:\r\n name = \"\"\r\n for i in new_name:\r\n name+=(i+\"_\")\r\n name=name[:-1]\r\n return name\r\n \r\n def setTrue(li):\r\n for i in li:\r\n if i:\r\n return True\r\n return False \r\n \r\n for i in range(len(merged_list[0].var_set)):\r\n names = []\r\n is_param = []\r\n is_ret = []\r\n for merge in merged_list:\r\n names.append(merge.orig_vars[i])\r\n if merge.orig_vars[i] in merge.param:\r\n is_param.append(True)\r\n else:\r\n is_param.append(False)\r\n if merge.orig_vars[i] in merge.return_vars:\r\n is_ret.append(True)\r\n else:\r\n is_ret.append(False)\r\n name = mergeNames(names)\r\n common_vars.append(name)\r\n if setTrue(is_param) and not name in common_param:\r\n common_param.append(name)\r\n for i2 in range(len(is_param)):\r\n if is_param[i2]:\r\n new_caller[i2].append(names[i2])\r\n else:\r\n new_caller[i2].append(\"None\")\r\n if setTrue(is_ret) and not name in common_ret:\r\n common_ret.append(name)\r\n for i2 in range(len(is_ret)):\r\n if is_ret[i2]:\r\n new_receiver[i2].append(names[i2])\r\n else:\r\n new_receiver[i2].append(\"None\")\r\n \r\n diff_bins = [] \r\n for i in range(len(merged_list[0].orig_bins)):\r\n for merge in merged_list[1:]:\r\n if merge.orig_bins[i]!=merged_list[0].orig_bins[i]:\r\n diff_bins.append(i)\r\n break\r\n #print \"DIFF_BIN :\", diff_bins\r\n \r\n if len(diff_bins)>0:\r\n common_param.append(\"*diff_params\")\r\n param_bins = []\r\n for i in merged_list:\r\n param_bins.append([])\r\n for j in diff_bins:\r\n param_bins[-1].append(i.orig_bins[j])\r\n for j in param_bins[-1]:\r\n new_caller[merged_list.index(i)].append(j)\r\n #print \"PARAM :\", param_bins[-1]\r\n \r\n \r\n for i in range(len(merged_list)):\r\n merge = merged_list[i]\r\n #merge.output()\r\n merge.rewrite_code(common_vars, diff_bins)\r\n merge.rewrite_head_line(common_param)\r\n merge.rewrite_return_line(common_ret)\r\n merge.reget_caller(new_caller[i], new_receiver[i])\r\n \r\n merge.param = common_param\r\n merge.return_vars = common_ret\r\n \r\n #merge.output()\r\n merged_list[i] = merge\r\n \r\n return merged_list\r\n \r\n \r\n \r\n \r\n\r\n \r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":191,"cells":{"__id__":{"kind":"number","value":15126874865407,"string":"15,126,874,865,407"},"blob_id":{"kind":"string","value":"985c59bdba268e05a22276e17b95e523614a5349"},"directory_id":{"kind":"string","value":"0d37e3f4b32525ccd440f084b548003d618eccaf"},"path":{"kind":"string","value":"/core/settings-example.py"},"content_id":{"kind":"string","value":"6bdb8b34e570cc4aef2f1f705ebb42d900bbb505"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"bensentropy/happytransport"},"repo_url":{"kind":"string","value":"https://github.com/bensentropy/happytransport"},"snapshot_id":{"kind":"string","value":"bffecea27a6a5c3de287a3d097dd683a5aee25a1"},"revision_id":{"kind":"string","value":"e6ffe6db798f035954820a36848f229b7b1adfa3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-02T20:18:27.646933","string":"2020-06-02T20:18:27.646933"},"revision_date":{"kind":"timestamp","value":"2014-05-25T08:44:33","string":"2014-05-25T08:44:33"},"committer_date":{"kind":"timestamp","value":"2014-05-25T08:44:57","string":"2014-05-25T08:44:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"DEBUG = False\n\n# Set secret keys for CSRF protection\nSECRET_KEY = 'replace_with_secret_key'\nCSRF_SESSION_LKEY = 'replace_with_secret_lkey'\n\nCSRF_ENABLED = True\n\nANALYTICS_ID = 'UA-your-id'"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":192,"cells":{"__id__":{"kind":"number","value":13743895375667,"string":"13,743,895,375,667"},"blob_id":{"kind":"string","value":"ac5c4ccc52062ae9d6ce66220d4afb9be19afad6"},"directory_id":{"kind":"string","value":"e2ba841119b79cc56f011e5b0e6a8f97ba3493e1"},"path":{"kind":"string","value":"/export/middleware.py"},"content_id":{"kind":"string","value":"3608bb4aa6be9123717b01e4b01c491e508e5f5f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"tlam/exportrecord"},"repo_url":{"kind":"string","value":"https://github.com/tlam/exportrecord"},"snapshot_id":{"kind":"string","value":"a1b38e784e90d9e94f1c888ee1b238c66c79ab6a"},"revision_id":{"kind":"string","value":"b7121537f8edf442e0c800097f17f573cd63228d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-08T01:23:46.570699","string":"2016-08-08T01:23:46.570699"},"revision_date":{"kind":"timestamp","value":"2013-02-03T14:56:15","string":"2013-02-03T14:56:15"},"committer_date":{"kind":"timestamp","value":"2013-02-03T14:56:15","string":"2013-02-03T14:56:15"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import redirect\n\n\nclass RequireLoginMiddleware(object):\n\n def __init__(self):\n self.path_exceptions = (\n reverse('admin:index'),\n reverse('export:login'),\n )\n\n def process_request(self, request):\n if request.user.is_authenticated():\n return None\n\n if request.path.replace('/export', '').startswith(self.path_exceptions):\n return None\n\n return redirect('export:login')\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":193,"cells":{"__id__":{"kind":"number","value":17008070524690,"string":"17,008,070,524,690"},"blob_id":{"kind":"string","value":"df7621c0fb778304b2c526068668a66115cd0613"},"directory_id":{"kind":"string","value":"da7d8d6edd5747ebff5ed0304b1d1bd6c35f1be5"},"path":{"kind":"string","value":"/reviewer.py"},"content_id":{"kind":"string","value":"8031ba34f731a8a70af21a6e14c07ef5b4328833"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"stuycs-softdev-fall-2013/proj2-pd7-08-theRoom"},"repo_url":{"kind":"string","value":"https://github.com/stuycs-softdev-fall-2013/proj2-pd7-08-theRoom"},"snapshot_id":{"kind":"string","value":"accce55daac8cb6a8665f42fc4eb2e33154db4f1"},"revision_id":{"kind":"string","value":"3ece7ea496f63b1865a8a461b01f8cf9aaac949b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T20:27:32.077899","string":"2021-01-19T20:27:32.077899"},"revision_date":{"kind":"timestamp","value":"2013-12-05T18:00:07","string":"2013-12-05T18:00:07"},"committer_date":{"kind":"timestamp","value":"2013-12-05T18:00:07","string":"2013-12-05T18:00:07"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from random import choice\nfrom document import doc\n# our magical, automated reviewer\n\n\ndef weightedListofKeys(corpus):\n out = []\n for key in corpus:\n for i in range(corpus[key]):\n out.append(key)\n return out\n\n# returns a list of keys, with there being weight instances of each key\ndef weightedListofKeys2D(corpus):\n out = []\n for key in corpus:\n for k in corpus[key]:\n for i in range(corpus[key][k]):\n out.append(key)\n return out\n\n\nfrom collections import defaultdict\nfrom re import sub, UNICODE\n\nSTART = doc.SQLss\nEND = doc.SQLes\n# credit to Earwig for this (I got his permission to use it)\n# https://github.com/earwig/earwigbot/blob/develop/earwigbot/wiki/copyvios/markov.py\ndef generateCorpus(text, corpus=None):\n \"\"\"Implements a basic ngram Markov chain of words.\"\"\"\n degree = 3 # 2 for bigrams, 3 for trigrams, etc.\n sentences = text.split(\". \")\n if (len(sentences) > 1):\n for sentence in sentences:\n corpus = generateCorpus(sentence, corpus)\n return corpus\n # use the given corpus, else use a blank new one.\n chain = defaultdict(lambda: defaultdict(lambda: 0)) if corpus == None else corpus\n words = sub(\"\", \"\", text.lower(), flags=UNICODE).split()\n padding = degree - 1\n words = ([START] * padding) + words + ([END] * padding)\n for i in range(len(words) - degree + 1):\n last = i + degree - 1\n chain[tuple(words[i:last])][words[last]] += 1\n return chain\n\n\ndef generateSentence(corpus,getNext,maxLength):\n pick = [END, END];\n while pick[0] != START:\n pick = choice(corpus)\n sentence = \" \"\n while not END in pick:\n if pick[1] != START:\n sentence += str(pick[1]) + \" \" \n pick = (pick[1], choice(weightedListofKeys(getNext(pick))))\n\tif len(sentence.split(\" \")) > maxLength:\n\t\treturn None\n return sentence\n\ndef generateReview(movie_id):\n return \"something something ipsum\"\n\ndef generateSentenceWithGrammar(corpus,getNext):\n max_len = 30\n sentence = generateSentence(corpus,getNext,max_len)\n while sentence is None or len(sentence.split(' ')) > max_len:\n sentence = generateSentence(corpus,getNext,max_len)\n return sentence\n\ncorpus = None\nfor sentence in open('quorum4').read().split('. '):\n\tcorpus = generateCorpus(sentence + '.', corpus)\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":194,"cells":{"__id__":{"kind":"number","value":15023795612828,"string":"15,023,795,612,828"},"blob_id":{"kind":"string","value":"45f7b94b02bbdba91306c70be6e13e60d0c1b51f"},"directory_id":{"kind":"string","value":"a968e67fa4c2ba1a673d518a5b53fe97b1d370c9"},"path":{"kind":"string","value":"/hsgame/agents/user_agents.py"},"content_id":{"kind":"string","value":"f00885ce6ea8ea5dd16b3c5a4530d6bfff56a722"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"JonathanFlynn/hearthstone-simulator"},"repo_url":{"kind":"string","value":"https://github.com/JonathanFlynn/hearthstone-simulator"},"snapshot_id":{"kind":"string","value":"a9353c8c54a9323faf323323fd6e5b4b3905d2ce"},"revision_id":{"kind":"string","value":"799896be1c52ad1df93b2023403be8da18d168a6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-08-26T09:16:39.147573","string":"2018-08-26T09:16:39.147573"},"revision_date":{"kind":"timestamp","value":"2014-07-11T15:49:49","string":"2014-07-11T15:49:49"},"committer_date":{"kind":"timestamp","value":"2014-07-11T15:49:49","string":"2014-07-11T15:49:49"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"class Observer:\n def __init__(self, writer):\n self.game = None\n self.writer = writer\n\n def observe(self, game):\n self.game = game\n game.players[0].bind(\"turn_started\", self.turn_started, game.players[0])\n game.players[0].bind(\"turn_ended\", self.turn_ended, game.players[0])\n game.players[0].hero.bind(\"died\", self.died, game.players[0])\n game.players[0].bind(\"card_drawn\", self.card_drawn, game.players[0])\n game.players[0].bind(\"card_put back\", self.card_put_back, game.players[0])\n game.players[0].hero.bind(\"damaged\", self.damaged, game.players[0])\n\n game.players[1].bind(\"turn_started\", self.turn_started, game.players[1])\n game.players[1].bind(\"turn_ended\", self.turn_ended, game.players[1])\n game.players[1].hero.bind(\"died\", self.died, game.players[1])\n game.players[1].bind(\"card_drawn\", self.card_drawn, game.players[1])\n game.players[1].bind(\"card_put back\", self.card_put_back, game.players[1])\n game.players[1].hero.bind(\"damaged\", self.damaged, game.players[1])\n\n def turn_started(self, player):\n self.writer.write(\"Turn started for \" + str(player) + \"\\n\")\n\n def turn_ended(self, player):\n self.writer.write(\"Turn ended for \" + str(player) + \"\\n\")\n\n def died(self, attacker, player):\n self.writer.write(str(player) + \" died!\\n\")\n\n def card_drawn(self, card, player):\n self.writer.write(str(player) + \" drew card \" + str(card) + \"\\n\")\n\n def card_put_back(self, card, player):\n self.writer.write(str(player) + \" put back card \" + str(card) + \"\\n\")\n\n def damaged(self, amount, what, player):\n if what is None:\n what = \"fatigue\"\n self.writer.write(str(player) + \" was damaged \" + str(amount) + \" by \" + str(what) + \"\\n\")\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":195,"cells":{"__id__":{"kind":"number","value":3143916109031,"string":"3,143,916,109,031"},"blob_id":{"kind":"string","value":"a47204f2bfa1afb3cb8435130062bcb71f92c3eb"},"directory_id":{"kind":"string","value":"801435fc03c772a53bc8397dd0ab8607ce6bd73a"},"path":{"kind":"string","value":"/path_finder.py"},"content_id":{"kind":"string","value":"7f9b68140d8e35d5dbda0c035fdf251faf36d507"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"ragnraok/RandomMaze"},"repo_url":{"kind":"string","value":"https://github.com/ragnraok/RandomMaze"},"snapshot_id":{"kind":"string","value":"548448b0bde3619b1bd1b2f2c18d8a702c781fb9"},"revision_id":{"kind":"string","value":"f7a8f24d417a63ebcc0b6e30b0da65e6640c571f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-27T08:55:09.913876","string":"2020-04-27T08:55:09.913876"},"revision_date":{"kind":"timestamp","value":"2012-07-15T14:05:03","string":"2012-07-15T14:05:03"},"committer_date":{"kind":"timestamp","value":"2012-07-15T14:05:03","string":"2012-07-15T14:05:03"},"github_id":{"kind":"number","value":5014910,"string":"5,014,910"},"star_events_count":{"kind":"number","value":7,"string":"7"},"fork_events_count":{"kind":"number","value":3,"string":"3"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from random_maze import RandomMaze\n\nclass MazePathFinder(object):\n def __init__(self, maze, start_pos, end_pos, row_num, col_num):\n \"\"\"\n the maze:\n 1 denote wall, 0 denote road\n and the borders are all 0\n the maze must be solvable\n start_pos: the start position tuple(row, col)\n end_pos: the end position tuple(row, col)\n \"\"\"\n self.maze = maze\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.row_num = row_num\n self.col_num = col_num\n \n def __if_pos_valid(self, row, col):\n # check the index range\n if row < 0 or row >= self.row_num or col < 0 or col >= self.col_num:\n return False\n # check if borders\n elif row == 0 or row == self.row_num - 1:\n return False\n elif col == 0 or col == self.col_num - 1:\n return False\n # then check if wall\n elif self.maze[row][col] == 1:\n return False\n # then check if visited, which is marked as -1\n elif self.maze[row][col] == -1:\n return False\n return True\n\n def bfs_find_path(self):\n \"\"\"\n use BFS to find the shortest path from start_pos to end_pos\n \"\"\"\n direction = [\n (0, 1),\n (0, -1),\n (1, 0),\n (-1, 0)]\n path_queue = [tuple(self.start_pos)]\n self.maze[self.start_pos[0]][self.start_pos[1]] = -1\n\n if_find = False\n\n record = [(0, 0)]*self.row_num\n for i in range(self.row_num):\n record[i] = [(0, 0)]*self.col_num\n\n while len(path_queue) > 0 and not if_find:\n cur_pos = path_queue[0]\n del path_queue[0]\n #print cur_pos\n self.maze[cur_pos[0]][cur_pos[1]] = -1\n\n for i in range(4):\n next_dir = direction[i]\n next_pos = (cur_pos[0] + next_dir[0], cur_pos[1] + next_dir[1])\n #print next_pos\n if self.__if_pos_valid(next_pos[0], next_pos[1]):\n self.maze[next_pos[0]][next_pos[1]] = -1\n path_queue.append(next_pos)\n\n # mark the precursor\n record[next_pos[0]][next_pos[1]] = cur_pos\n\n if next_pos[0] == self.end_pos[0] and next_pos[1] == self.end_pos[1]:\n if_find = True\n break\n if if_find:\n path = []\n start_row = self.start_pos[0]\n start_col = self.start_pos[1]\n end_row = self.end_pos[0]\n end_col = self.end_pos[1]\n\n cur_row = end_row\n cur_col = end_col\n\n path.append(self.end_pos)\n\n #print record\n \n while cur_row != start_row or cur_col != start_col:\n row = cur_row\n col = cur_col\n cur_row = record[row][col][0]\n cur_col = record[row][col][1]\n path.append((cur_row, cur_col))\n\n print path_queue\n print path\n\n return path\n else:\n print 'no path find'\n return []\n\nif __name__ == '__main__':\n row = 11\n col = 11\n cell_row_num = (row - 3) // 2\n cell_col_num = (col - 3) // 2\n rand_maze = RandomMaze(row, col)\n maze, path_track = rand_maze.dfs_maze()\n\n path_finder = MazePathFinder(maze, (2, 1), (2 * cell_row_num, 2 * cell_col_num + 1), row, col)\n\n path_queue = path_finder.bfs_find_path()\n \n for i in range(len(maze)):\n print maze[i]\n\n print path_queue\n\n \n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":196,"cells":{"__id__":{"kind":"number","value":4647154644953,"string":"4,647,154,644,953"},"blob_id":{"kind":"string","value":"92c6aa421841b6c02a06b73cdd23bc76014c6630"},"directory_id":{"kind":"string","value":"c561e7c6bd87ee3da54d057b3b1dc6847f373606"},"path":{"kind":"string","value":"/sbfury/tests/test_common.py"},"content_id":{"kind":"string","value":"9f245c6ab0b23cbc6ba7f5459635913d769d6f47"},"detected_licenses":{"kind":"list like","value":["GPL-3.0-only"],"string":"[\n \"GPL-3.0-only\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"HieuLsw/sbfury"},"repo_url":{"kind":"string","value":"https://github.com/HieuLsw/sbfury"},"snapshot_id":{"kind":"string","value":"b4d48c7df0f485ae3c9b9a754ed8377244e7519f"},"revision_id":{"kind":"string","value":"926529b4fb0085fbcb2976833a62b1e20aa895c8"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-08-12T21:37:24.071552","string":"2016-08-12T21:37:24.071552"},"revision_date":{"kind":"timestamp","value":"2010-07-02T01:44:49","string":"2010-07-02T01:44:49"},"committer_date":{"kind":"timestamp","value":"2010-07-02T01:44:49","string":"2010-07-02T01:44:49"},"github_id":{"kind":"number","value":53143507,"string":"53,143,507"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import unittest\nimport sys\nsys.path.append('./')\nfrom test import *\n\nimport common\n\nclass TestCommon(unittest.TestCase):\n\n def test_load_image(self):\n common.load_image('dragon/head.png')\n common.load_image('dragon/part.png')\n\nif __name__ == '__main__':\n unittest.main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2010,"string":"2,010"}}},{"rowIdx":197,"cells":{"__id__":{"kind":"number","value":1443109054110,"string":"1,443,109,054,110"},"blob_id":{"kind":"string","value":"2d2a17a6310d04618e216674f12c0f9165fe4cdf"},"directory_id":{"kind":"string","value":"8449ba9666e2eeab9311986fc9232d84c17426be"},"path":{"kind":"string","value":"/unidecode/x063.py"},"content_id":{"kind":"string","value":"794dbe899070a8f67659351f7eeebaef8f4d338b"},"detected_licenses":{"kind":"list like","value":["GPL-1.0-or-later","Artistic-1.0-Perl"],"string":"[\n \"GPL-1.0-or-later\",\n \"Artistic-1.0-Perl\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"youngking/Unidecode"},"repo_url":{"kind":"string","value":"https://github.com/youngking/Unidecode"},"snapshot_id":{"kind":"string","value":"c9f0ad04f89e8a421cef2319b22852b4974e9c57"},"revision_id":{"kind":"string","value":"f5894da85874e2ecd5e19d2236e6c9522f218fd3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-19T06:31:15.184221","string":"2021-01-19T06:31:15.184221"},"revision_date":{"kind":"timestamp","value":"2011-12-11T15:21:52","string":"2011-12-11T15:21:52"},"committer_date":{"kind":"timestamp","value":"2011-12-11T15:21:52","string":"2011-12-11T15:21:52"},"github_id":{"kind":"number","value":2958623,"string":"2,958,623"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"data = (\n'Bo ',#0x0\n'Chi ',#0x1\n'Gua ',#0x2\n'Zhi ',#0x3\n'Kuo ',#0x4\n'Duo ',#0x5\n'Duo ',#0x6\n'Zhi ',#0x7\n'Qie ',#0x8\n'An ',#0x9\n'Nong ',#0xa\n'Zhen ',#0xb\n'Ge ',#0xc\n'Jiao ',#0xd\n'Ku ',#0xe\n'Dong ',#0xf\n'Ru ',#0x10\n'Tiao ',#0x11\n'Lie ',#0x12\n'Zha ',#0x13\n'Lu ',#0x14\n'Die ',#0x15\n'Wa ',#0x16\n'Jue ',#0x17\n'Mushiru ',#0x18\n'Ju ',#0x19\n'Zhi ',#0x1a\n'Luan ',#0x1b\n'Ya ',#0x1c\n'Zhua ',#0x1d\n'Ta ',#0x1e\n'Xie ',#0x1f\n'Nao ',#0x20\n'Dang ',#0x21\n'Jiao ',#0x22\n'Zheng ',#0x23\n'Ji ',#0x24\n'Hui ',#0x25\n'Xun ',#0x26\n'Ku ',#0x27\n'Ai ',#0x28\n'Tuo ',#0x29\n'Nuo ',#0x2a\n'Cuo ',#0x2b\n'Bo ',#0x2c\n'Geng ',#0x2d\n'Ti ',#0x2e\n'Zhen ',#0x2f\n'Cheng ',#0x30\n'Suo ',#0x31\n'Suo ',#0x32\n'Keng ',#0x33\n'Mei ',#0x34\n'Long ',#0x35\n'Ju ',#0x36\n'Peng ',#0x37\n'Jian ',#0x38\n'Yi ',#0x39\n'Ting ',#0x3a\n'Shan ',#0x3b\n'Nuo ',#0x3c\n'Wan ',#0x3d\n'Xie ',#0x3e\n'Cha ',#0x3f\n'Feng ',#0x40\n'Jiao ',#0x41\n'Wu ',#0x42\n'Jun ',#0x43\n'Jiu ',#0x44\n'Tong ',#0x45\n'Kun ',#0x46\n'Huo ',#0x47\n'Tu ',#0x48\n'Zhuo ',#0x49\n'Pou ',#0x4a\n'Le ',#0x4b\n'Ba ',#0x4c\n'Han ',#0x4d\n'Shao ',#0x4e\n'Nie ',#0x4f\n'Juan ',#0x50\n'Ze ',#0x51\n'Song ',#0x52\n'Ye ',#0x53\n'Jue ',#0x54\n'Bu ',#0x55\n'Huan ',#0x56\n'Bu ',#0x57\n'Zun ',#0x58\n'Yi ',#0x59\n'Zhai ',#0x5a\n'Lu ',#0x5b\n'Sou ',#0x5c\n'Tuo ',#0x5d\n'Lao ',#0x5e\n'Sun ',#0x5f\n'Bang ',#0x60\n'Jian ',#0x61\n'Huan ',#0x62\n'Dao ',#0x63\n'wei',#0x64\n'Wan ',#0x65\n'Qin ',#0x66\n'Peng ',#0x67\n'She ',#0x68\n'Lie ',#0x69\n'Min ',#0x6a\n'Men ',#0x6b\n'Fu ',#0x6c\n'Bai ',#0x6d\n'Ju ',#0x6e\n'Dao ',#0x6f\n'Wo ',#0x70\n'Ai ',#0x71\n'Juan ',#0x72\n'Yue ',#0x73\n'Zong ',#0x74\n'Chen ',#0x75\n'Chui ',#0x76\n'Jie ',#0x77\n'Tu ',#0x78\n'Ben ',#0x79\n'Na ',#0x7a\n'Nian ',#0x7b\n'Nuo ',#0x7c\n'Zu ',#0x7d\n'Wo ',#0x7e\n'Xi ',#0x7f\n'Xian ',#0x80\n'Cheng ',#0x81\n'Dian ',#0x82\n'Sao ',#0x83\n'Lun ',#0x84\n'Qing ',#0x85\n'Gang ',#0x86\n'Duo ',#0x87\n'Shou ',#0x88\n'Diao ',#0x89\n'Pou ',#0x8a\n'Di ',#0x8b\n'Zhang ',#0x8c\n'Gun ',#0x8d\n'Ji ',#0x8e\n'Tao ',#0x8f\n'Qia ',#0x90\n'Qi ',#0x91\n'Pai ',#0x92\n'Shu ',#0x93\n'Qian ',#0x94\n'Ling ',#0x95\n'Yi ',#0x96\n'Ya ',#0x97\n'Jue ',#0x98\n'Zheng ',#0x99\n'Liang ',#0x9a\n'Gua ',#0x9b\n'Yi ',#0x9c\n'Huo ',#0x9d\n'Shan ',#0x9e\n'Zheng ',#0x9f\n'Lue ',#0xa0\n'Cai ',#0xa1\n'Tan ',#0xa2\n'Che ',#0xa3\n'Bing ',#0xa4\n'Jie ',#0xa5\n'Ti ',#0xa6\n'Kong ',#0xa7\n'Tui ',#0xa8\n'Yan ',#0xa9\n'Cuo ',#0xaa\n'Zou ',#0xab\n'Ju ',#0xac\n'Tian ',#0xad\n'Qian ',#0xae\n'Ken ',#0xaf\n'Bai ',#0xb0\n'Shou ',#0xb1\n'Jie ',#0xb2\n'Lu ',#0xb3\n'Guo ',#0xb4\n'Haba ',#0xb5\n'jie',#0xb6\n'Zhi ',#0xb7\n'Dan ',#0xb8\n'Mang ',#0xb9\n'Xian ',#0xba\n'Sao ',#0xbb\n'Guan ',#0xbc\n'Peng ',#0xbd\n'Yuan ',#0xbe\n'Nuo ',#0xbf\n'Jian ',#0xc0\n'Zhen ',#0xc1\n'Jiu ',#0xc2\n'Jian ',#0xc3\n'Yu ',#0xc4\n'Yan ',#0xc5\n'Kui ',#0xc6\n'Nan ',#0xc7\n'Hong ',#0xc8\n'Rou ',#0xc9\n'Pi ',#0xca\n'Wei ',#0xcb\n'Sai ',#0xcc\n'Zou ',#0xcd\n'Xuan ',#0xce\n'Miao ',#0xcf\n'Ti ',#0xd0\n'Nie ',#0xd1\n'Cha ',#0xd2\n'Shi ',#0xd3\n'Zong ',#0xd4\n'Zhen ',#0xd5\n'Yi ',#0xd6\n'Shun ',#0xd7\n'Heng ',#0xd8\n'Bian ',#0xd9\n'Yang ',#0xda\n'Huan ',#0xdb\n'Yan ',#0xdc\n'Zuan ',#0xdd\n'An ',#0xde\n'Xu ',#0xdf\n'Ya ',#0xe0\n'Wo ',#0xe1\n'Ke ',#0xe2\n'Chuai ',#0xe3\n'Ji ',#0xe4\n'Ti ',#0xe5\n'La ',#0xe6\n'La ',#0xe7\n'Cheng ',#0xe8\n'Kai ',#0xe9\n'Jiu ',#0xea\n'Jiu ',#0xeb\n'Tu ',#0xec\n'Jie ',#0xed\n'Hui ',#0xee\n'Geng ',#0xef\n'Chong ',#0xf0\n'Shuo ',#0xf1\n'She ',#0xf2\n'Xie ',#0xf3\n'Yuan ',#0xf4\n'Qian ',#0xf5\n'Ye ',#0xf6\n'Cha ',#0xf7\n'Zha ',#0xf8\n'Bei ',#0xf9\n'Yao ',#0xfa\n'wei',#0xfb\n'beng',#0xfc\n'Lan ',#0xfd\n'Wen ',#0xfe\n'Qin ',#0xff\n)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":198,"cells":{"__id__":{"kind":"number","value":10874857225659,"string":"10,874,857,225,659"},"blob_id":{"kind":"string","value":"1d18dab92987ba26b45411dbec6bcacc94ceae38"},"directory_id":{"kind":"string","value":"2324903d6cd9aa8c82881a6f8d842d3663363d4d"},"path":{"kind":"string","value":"/server/main.py"},"content_id":{"kind":"string","value":"dad0955a232ddb99871076162ae50a97042e8347"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"xyzwvut/CacheFS"},"repo_url":{"kind":"string","value":"https://github.com/xyzwvut/CacheFS"},"snapshot_id":{"kind":"string","value":"4cf701abbbe118bb1726c9bcdd0086e5634db9db"},"revision_id":{"kind":"string","value":"b329c13f4449cfb1a6fa2175cbce39140eb55f08"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T07:02:59.969429","string":"2021-01-20T07:02:59.969429"},"revision_date":{"kind":"timestamp","value":"2014-09-14T21:01:15","string":"2014-09-14T21:01:15"},"committer_date":{"kind":"timestamp","value":"2014-09-14T21:01:15","string":"2014-09-14T21:01:15"},"github_id":{"kind":"number","value":16241332,"string":"16,241,332"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport os\n\nimport config\nimport backend\n\nfrom console import CacheFSConsole\nfrom cache import Cache\n\n\ndef accessible_directory(path):\n \"\"\" Check if given path is an readable and writable directory \"\"\"\n if not os.path.isdir(path):\n raise argparse.ArgumentTypeError(\"{0} is not a valid directory\".format(path))\n if os.access(path, os.R_OK | os.W_OK):\n return path\n else:\n raise argparse.ArgumentTypeError(\"{0} is not a accessible\".format(path))\n\n\ndef parse_cmdline(argv):\n parser = argparse.ArgumentParser()\n parser.set_defaults(show_console=False)\n parser.add_argument('config', type=argparse.FileType('r'),\n help='path to configfile')\n parser.add_argument('-v', '--verbosity', action='count',\n help='increase verbosity of debug output')\n parser.add_argument('-p', '--port', type=int, default=None,\n help='port number (default: 5555)')\n parser.add_argument('-c', '--with-console', dest='show_console',\n action='store_true',\n help='Show interactive console')\n\n args = parser.parse_args(argv)\n return args\n\n\ndef apply_cmdline_overwrites(args):\n \"\"\" Update configs with overwrites from commandline \"\"\"\n if args.port:\n config.config.set('main', 'port', args.port)\n\n if args.show_console:\n config.config.set('main', 'console', 'True')\n\n\ndef main(argv):\n args = parse_cmdline(argv)\n\n config.load_config(args.config)\n\n apply_cmdline_overwrites(args)\n\n # TODO: Path not expaned used before cache sanity-checked it\n back = backend.create(config.config['back'], config.config['cache']['dir'])\n cache = Cache(config.config['cache'], back)\n\n if config.config.getboolean('main', 'console'):\n CacheFSConsole(cache).cmdloop()\n\n cache.shutdown()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":199,"cells":{"__id__":{"kind":"number","value":11733850675398,"string":"11,733,850,675,398"},"blob_id":{"kind":"string","value":"6f58ab4ffe28a72a4eb32d0c39e250b5538be3f5"},"directory_id":{"kind":"string","value":"9514ae9f7784995eebafcf92635298a7448fe1d4"},"path":{"kind":"string","value":"/test/partial.py"},"content_id":{"kind":"string","value":"9dac37b47195df533557c36d6ae84b2bb7ffd271"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"GreenTree96/hotpy"},"repo_url":{"kind":"string","value":"https://github.com/GreenTree96/hotpy"},"snapshot_id":{"kind":"string","value":"6799b5ce09350cf22faa0990c6d6fcf85ba6a566"},"revision_id":{"kind":"string","value":"df049de08cda7ee39b2e3b7698bb9dcd89e8740d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-08-21T19:01:07.711253","string":"2015-08-21T19:01:07.711253"},"revision_date":{"kind":"timestamp","value":"2011-12-06T11:41:31","string":"2011-12-06T11:41:31"},"committer_date":{"kind":"timestamp","value":"2011-12-06T11:41:31","string":"2011-12-06T11:41:31"},"github_id":{"kind":"number","value":32379294,"string":"32,379,294"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"iadd = int.__add__\nprint(iadd.__class__)\nprint(iadd(1,2))\n\nineg = int.__neg__\nprint(ineg.__class__)\nprint(ineg(1))\n\noneadd = (1).__add__\nprint(oneadd.__class__)\nprint(oneadd(2))\n\noneneg = (1).__neg__\nprint(oneneg.__class__)\nprint(oneneg())\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1,"numItemsPerPage":100,"numTotalItems":42509,"offset":100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjE3NjEzMywic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9vbGRfcHl0aG9uIiwiZXhwIjoxNzU2MTc5NzMzLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.4YKkZeEgozispGzyydJrRRel4fnac9iCeBKzj1Z5z5brexswdjI-fB1AdXpM49zuteYu-yq_CKA5g0FvYvTcDg","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
__id__
int64
3.09k
19,722B
blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
256
content_id
stringlengths
40
40
detected_licenses
list
license_type
stringclasses
3 values
repo_name
stringlengths
5
109
repo_url
stringlengths
24
128
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
42
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
6.65k
581M
star_events_count
int64
0
1.17k
fork_events_count
int64
0
154
gha_license_id
stringclasses
16 values
gha_fork
bool
2 classes
gha_event_created_at
timestamp[ns]
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_size
int64
0
5.76M
gha_stargazers_count
int32
0
407
gha_forks_count
int32
0
119
gha_open_issues_count
int32
0
640
gha_language
stringlengths
1
16
gha_archived
bool
2 classes
gha_disabled
bool
1 class
content
stringlengths
9
4.53M
src_encoding
stringclasses
18 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
year
int64
1.97k
2.01k
5,918,464,969,094
e5a0034396935f262671078e419a3696562121fd
b857c9d22c6282f1566b5ee34c2996c33a536a93
/scripts/maps/spqr_map.py
77ba089594c6250fda1b77c8b4ffd9a43eea9cb4
[ "GPL-3.0-only", "GPL-2.0-only" ]
non_permissive
alej0varas/spqr
https://github.com/alej0varas/spqr
f5295f9e8f1b1c0b2bf50ba530ba49a2007c1a60
b9a30b06589f75c4dd3a0fb33c509dc9ca8dd29b
refs/heads/master
2021-01-25T06:37:18.385694
2012-02-06T13:39:11
2012-02-06T13:39:11
3,341,184
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from __future__ import absolute_import from .. import spqr_defines as SPQR import pygame, yaml import networkx as nx from . import spqr_city as SCITY class Position(object): def __init__(self, position): self.x = position[0] self.y = position[1] class CMap(object): def __init__(self, players): self.regions = {} self.masks = {} self.graph = nx.Graph() # sort the naval connections self.loadRegions(players) def loadCities(self): cities = yaml.load(open("./data/regions/cities.yml")) city_hash = {} for i in cities: city_hash[i["city"]] = SCITY.CCity(i["city"], i["population"]) return city_hash def loadRegions(self, players): cities = self.loadCities() # Load the map's regions from a file var = yaml.load(open("./data/regions/map.yml")) # Make a temp list with the borders wlist=[] # for every region we will import the specific data for i in var: # we will add now a list with the connecting regions for region in i["borders"]: wlist.append((i["name"], region["name"])) # make the player know it's the owner players[i["owner"]].owned_regions.append(i["name"]) # Append the temp list to our data colour = players[i["owner"]].colour self.regions[i["name"]] = CRegion(i["name"], i["xpos"], i["ypos"], i["owner"], colour, (i["unit_x"], i["unit_y"]), cities[i["city"]]) self.graph.add_node(self.regions[i["name"]]) # repeat again for graph connections for node in wlist: self.graph.add_edge(self.regions[node[0]], self.regions[node[1]]) self.computeNavalAreas(var) def computeNavalAreas(self, data): """A port has access to ports in the same area. Here we store all the naval regions in a hash list""" for region in data: if region.has_key("naval"): for nregion in region["naval"]: self.regions[region["name"]].naval_regions.append(nregion["name"]) def getNeighbors(self, region, as_region = False): if as_region: return [i for i in self.graph.neighbors(self.regions[region])] else: return [i.image for i in self.graph.neighbors(self.regions[region])] def getRetreatNeighbors(self, region): """Return a list of all regions the unit can retreat to""" possibles = [] for r in self.getNeighbors(region.image, True): if r.owner == region.owner and len(r.units) < SPQR.MAX_STACKING: possibles.append(r) return possibles def addUnit(self, region, unit): if len(self.regions[region].units) == SPQR.MAX_STACKING: return False unit.region = region self.regions[region].units.append(unit) return True class CRegion(object): def __init__(self, image, x, y, owner, colour, city_pos, city): # image is a str self.image = image self.naval_regions = [] self.rect = pygame.Rect(x, y, 0, 0) self.owner = owner self.colour = colour self.city_position = Position(city_pos) self.city = city self.units = [] # sometimes the area that the city text appears in is outside the area of the region # we need to save this area to make re-drawing the region not need a whole map redraw self.text_rect = None def changeOwner(self, new_owner, colour): self.units = [] self.owner = new_owner self.colour = colour def __str__(self): return self.image.replace("_", " ").title()
UTF-8
Python
false
false
2,012
5,196,910,468,344
c7a269914934d381867609f1dbde93b499c3fbc7
817dcadf248d3b021bd38990ce0799759cfb94c3
/morse/src/morse/core/sensor.py
163e939f5ac0a37c35176bde75b515c0b0792d86
[ "BSD-3-Clause" ]
permissive
danyalrehman/Aerospace-Simulations
https://github.com/danyalrehman/Aerospace-Simulations
4abc87fc9783a137bf1a31315471acbb4ca16e00
44a74ca86e08ac7ee8a916590e5242d4879613ab
refs/heads/master
2020-05-30T11:24:50.689457
2013-08-15T15:09:52
2013-08-15T15:09:52
11,979,645
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging; logger = logging.getLogger("morse." + __name__) from abc import ABCMeta import time # profiler import morse.core.object from morse.core.services import service class Sensor(morse.core.object.Object): """ Basic Class for all sensors Inherits from the base object class. """ # Make this an abstract class __metaclass__ = ABCMeta def __init__ (self, obj, parent=None): """ Constructor method. """ # Call the constructor of the parent class super(Sensor, self).__init__(obj, parent) # Define lists of dynamically added functions self.output_functions = [] self.output_modifiers = [] self.profile = None if "profile" in self.bge_object: self.time = {} self.profile = ["profile", "profile_action", "profile_modifiers", "profile_datastreams"] for key in self.profile: self.time[key] = 0.0 self.time_start = time.time() def finalize(self): self._active = False super(Sensor, self).finalize() del self.output_functions[:] del self.output_modifiers[:] def sensor_to_robot_position_3d(self): """ Compute the transformation between the sensor and the associated robot """ main_to_origin = self.robot_parent.position_3d main_to_sensor = main_to_origin.transformation3d_with(self.position_3d) return main_to_sensor def action(self): """ Call the action functions that have been added to the list. """ # Do nothing if this component has been deactivated if not self._active: return # Update the component's position in the world self.position_3d.update(self.bge_object) # record the time before performing the default action for profiling if self.profile: time_before_action = time.time() # Call the regular action function of the component self.default_action() # record the time before calling modifiers for profiling if self.profile: time_before_modifiers = time.time() # Data modification functions for function in self.output_modifiers: function(self) # record the time before calling datastreams for profiling if self.profile: time_before_datastreams = time.time() # Lastly output functions for function in self.output_functions: function(self) # profiling if self.profile: time_now = time.time() self.time["profile"] += time_now - time_before_action self.time["profile_action"] += time_before_modifiers - time_before_action self.time["profile_modifiers"] += time_before_datastreams - time_before_modifiers self.time["profile_datastreams"] += time_now - time_before_datastreams morse_time = time_now - self.time_start for key in self.profile: ratio = self.time[key] / morse_time # format the display self.bge_object[key] = "%4.1f%% %s"% (100.0 * ratio, '█' * int(10 * ratio)) if morse_time > 1: # re-init mean every sec for key in self.profile: self.time[key] = 0.0 self.time_start = time.time() @service def get_local_data(self): """ Returns the current data stored in the sensor. :return: a dictionary of the current sensor's data """ return (self.local_data)
UTF-8
Python
false
false
2,013
16,338,055,614,738
4ced2361a4c51c127ebfc9150d5ca65ebd3efeab
2b7eeb26b02ada4c2b15edaf32c954851d8a6e8f
/src/sequoia/security.py
b15e2d82ee14e377d05d0ccf5a49619173ffe713
[ "LicenseRef-scancode-warranty-disclaimer", "LGPL-2.1-or-later", "GPL-1.0-or-later", "GPL-2.0-only", "GPL-2.0-or-later" ]
non_permissive
oblalex/Sequoia
https://github.com/oblalex/Sequoia
6f3553efaaee2489e3481ce17e6d16d77950bbc6
ca04be0559ce6c4e2c7a3184f10b501be9155d2f
refs/heads/master
2021-01-01T17:10:19.486822
2013-11-10T19:44:10
2013-11-10T19:44:10
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from OpenSSL import SSL from twisted.internet import ssl from twisted.python import log class ServerContextFactory(ssl.DefaultOpenSSLContextFactory): def __init__(self, privateKeyFileName, certificateFileName, caPEM_FileName, sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context): self.caPEM_FileName = caPEM_FileName ssl.DefaultOpenSSLContextFactory.__init__(self, privateKeyFileName, certificateFileName, sslmethod, _contextFactory) def cacheContext(self): ssl.DefaultOpenSSLContextFactory.cacheContext(self) self._context.set_verify( SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, self._verify) self._context.load_verify_locations(self.caPEM_FileName) def _verify(self, connection, x509, errnum, errdepth, ok): if not ok: log.err( "Invalid cert from subject: {0}".format(x509.get_subject())) return False else: return True class ClientCtxFactory(ssl.ClientContextFactory): def __init__(self, privateKeyFileName, certificateFileName): self.privateKeyFileName = privateKeyFileName self.certificateFileName = certificateFileName def getContext(self): self.method = SSL.SSLv23_METHOD ctx = ssl.ClientContextFactory.getContext(self) ctx.use_privatekey_file(self.privateKeyFileName) ctx.use_certificate_file(self.certificateFileName) return ctx
UTF-8
Python
false
false
2,013
14,164,802,151,614
7f8d636216df82d4394c24ad3101dbd21ea01b16
08319ef51e3f82bf0c04ea75710cf4f898430875
/feature_arima/arima_toy.py
836a8ea7c65930bafc1d0036920d367cc5da5d0a
[]
no_license
walkingsparrow/tests
https://github.com/walkingsparrow/tests
d6a4b36919d6029b898cc3743e4b8e3640371b3d
2d2490f4d1149f66cf943e5f4b5bb63b097fbe71
refs/heads/master
2016-09-05T21:57:12.348291
2014-02-26T22:17:03
2014-02-26T22:17:03
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys import random from numpy import matrix from numpy import linalg def diff(ts, d): """ difference a time series ts <- (1 - B)^d ts """ for i in range(d): y = [] for j in range(1, len(ts)): y.append(ts[j] - ts[j - 1]) ts = y return ts # ------------------------------------------------------------------------ def get_errors(ts, p, q, phi, theta): """ compute the error items """ mu = float(sum(ts))/len(ts) n = max(p, q) z = [] for i in range(n, len(ts)): t = 0.0 for j in range(p): t += phi[j] * (mu - ts[i - j - 1]) for j in range(q): t += theta[j] * (0 if (i - j - 1) < n else z[i - j - 1 - n]) z.append(ts[i] - t) return z # ------------------------------------------------------------------------ def get_jacobian(ts, p, q, z): """ compute the Jacobian matrix """ mu = float(sum(ts))/len(ts) n = max(p, q) jacob = [] for i in range(n, len(ts)): t = [] for j in range(p): t.append(mu - ts[i - j - 1]) for j in range(q): t.append(0 if (i - j - 1) < n else z[i - j - 1 - n]) jacob.append(t) return jacob # ------------------------------------------------------------------------ def iter(ts, p, q, phi, theta): """ LMA iteration """ z = get_errors(ts, p, q, phi, theta) jacob = get_jacobian(ts, p, q, z) m = matrix(jacob) m_crossprod = m.transpose() * m m_trans_z = m.transpose() * matrix(z).transpose() delta = linalg.solve(m_crossprod, m_trans_z).getA1() for i in range(p): phi[i] += delta[i] for i in range(q): theta[i] += delta[p + i] return [phi, theta] # ------------------------------------------------------------------------ def arima(ts, p, d, q): """ ARIMA """ ts = diff(ts, d) phi = [random.random()] * p theta = [random.random()] * q print '....init: phi = %s, theta = %s' % (str(phi), str(theta)) for i in range(100): [phi, theta] = iter(ts, p, q, phi, theta) print '....iter %d: phi = %s, theta = %s' % (i, str(phi), str(theta)) # ------------------------------------------------------------------------ ts = [100.8, 81.6, 66.5, 34.8, 30.6, 7, 19.8, 92.5, 154.4, 125.9, 84.8, 68.1, 38.5, 22.8, 10.2, 24.1, 82.9, 132, 130.9, 118.1, 89.9, 66.6, 60, 46.9, 41, 21.3, 16, 6.4, 4.1, 6.8, 14.5, 34, 45, 43.1, 47.5, 42.2, 28.1, 10.1, 8.1, 2.5, 0, 1.4, 5, 12.2, 13.9, 35.4, 45.8, 41.1, 30.4 ] p = int(sys.argv[1]) d = int(sys.argv[2]) q = int(sys.argv[3]) arima(ts, p, d, q)
UTF-8
Python
false
false
2,014
17,033,840,304,585
920400733863662189b12f3ea5b9ba194a527ae1
61b8528890d5ed7980fecb56f7a7ea09a090d351
/techent/extensions/__init__.py
b382be4d2a51edc5be3df41b7ea279a13daa3afd
[]
no_license
anlatunskaya/techent
https://github.com/anlatunskaya/techent
116d7833f419467afa6597fa49037b5e3d20cf0c
9962e8fc3bea02c78b1afb9dd3d71858850f5201
refs/heads/master
2021-01-17T06:52:58.298179
2012-07-27T15:16:09
2012-07-27T15:16:09
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ :Authors: - qweqwe """ from flask_mongoengine import MongoEngine from flaskext.mail import Mail from techent.extensions.login_manager import login mongoengine = MongoEngine() mail_ext = Mail() __all__ = ['login', 'mongoengine', 'mail_ext']
UTF-8
Python
false
false
2,012
19,215,683,684,689
92f2e6b4d5e804619e817230e4b62bac119e9176
9ec2a3acaed5776b823ede8c92fe4f3144772e74
/11_756_Project_1/AudioCapture.py
98d9dd40d1bc9e3d9fe275ece7064ad1ccbb8be2
[]
no_license
happyWinner/11_756_Design_and_Implementation_of_Speech_Recognition_Systems
https://github.com/happyWinner/11_756_Design_and_Implementation_of_Speech_Recognition_Systems
74f4ba4d16400d9ae61a389b9ee4ebd546e3e218
70f894743e71b3a1db1b41172fd36a07d461326c
refs/heads/master
2015-08-14T17:20:16.422367
2014-11-12T19:48:10
2014-11-12T19:48:10
26,552,725
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Capture audio from default input device and endpointing automatically ''' import pyaudio import math import wave import MFCC from array import array CHUNK = 1024 # Frames_Per_Buffer RATE = 16000 # Sampling rate CHANNELS = 1 # Mono FORMAT = pyaudio.paInt16 # 16-bit resolution THRESHOLD = 85.0 # THRESHOLD for determining whether a sound segment is silent or not MAX_TOLERENCE = 10 # Maximum number of silent segments system will tolerate # 10 segments = 10240 frames = 0.64 seconds (given sample rate of 16KHz) OUTPUT_RAW = "recording.raw" OUTPUT_WAV = "recording.wav" # This is the main function that controls the capture and write-to-file def main(): data = capture() write_to_raw(data, OUTPUT_RAW) write_to_wav(data, OUTPUT_WAV) mfcc_40 = MFCC.MFCC(data, RATE, 40, ) mfcc_40.plot_spectrum() input("Press ENTER to quit.") ''' The below is for presentation # Endpointing data = capture() write_to_raw(data, OUTPUT_RAW) write_to_wav(data, OUTPUT_WAV) # Zero & One data = capture() mfcc_40 = MFCC.MFCC(data, RATE, 40, None) mfcc_40.plot_spectrum() # Filter bank 40 & 25 data = capture() mfcc_40 = MFCC.MFCC(data, RATE, 40, None) mfcc_40.plot_spectrum() mfcc_25 = MFCC.MFCC(data, RATE, 25, None) mfcc_25.plot_spectrum() # Noise 40 bank mfcc_40 = MFCC.MFCC(None, RATE, 40, "without_noise.wav") mfcc_40.plot_spectrum() mfcc_40 = MFCC.MFCC(None, RATE, 40, "with_noise.wav") mfcc_40.plot_spectrum() # Noise 25 bank mfcc_25 = MFCC.MFCC(None, RATE, 25, "without_noise.wav") mfcc_25.plot_spectrum() mfcc_25 = MFCC.MFCC(None, RATE, 25, "with_noise.wav") mfcc_25.plot_spectrum() ''' def is_silent(segment): ''' Energy-based silence calculation Returns true if the sound segment is silent @param segment: sound signals @return: bool ''' return compute_energy(segment) < THRESHOLD def compute_energy(segment): ''' Computes the energy level in decibels(DB) @param segment: sound signals @return: float ''' total = 0 for sample in segment: total += sample * sample return 10 * math.log10(total) def print_segment(segment): ''' For testing purpose @param segment: audio signals ''' for sample in segment: print(sample) def capture(): ''' Capture speech and perform endpointing ''' # Hit to talk input("Please press ENTER to start recording.") # Instantiate the PyAudio class p = pyaudio.PyAudio() # Open a stream but not start recording stream = p.open(rate = RATE, channels = CHANNELS, format = FORMAT, input = True, frames_per_buffer = CHUNK, start = False) # Audio data data = [] # Start the stream stream.start_stream() print("*Recording, Please speak now") # Keeps track of the number of silent sound segments num_of_silent_segments = 0 # A boolean variable to indicate whether a speech has started or not started = False # Save audio data and endpointing it while stream.is_active(): # Retrieve the next CHUNK and store it in a local buffer buffer = stream.read(CHUNK) # Map the buffer into an array of signed shorts (16-bit samples) segment = array('h', buffer) # Display the current energy level , for testing #energy = computeEnergy(segment) #print(int(energy)) # A boolean variable to check for silence no_sound = is_silent(segment); if (no_sound): num_of_silent_segments += 1 elif (not no_sound and not started): started = True num_of_silent_segments = 0 # Resets the count of silent segments """ Only stop the stream when speech has started and more than 30 consecutive silient segments (1.92 secs) has been detected """ if (num_of_silent_segments > MAX_TOLERENCE and started == True): stream.stop_stream() data.append(buffer) print("*End of Speech detected.") # Close stream and terminate PyAudio stream.close() p.terminate() return b''.join(data) def write_to_raw(data, filename): ''' Save audio data to .raw file @param data: audio data @param filename: the name of the target file ''' file = open(filename, 'wb') file.write(data) file.close() print("Recorded speech successfully written to " + OUTPUT_RAW + ".") def write_to_wav(data, filename): ''' Save audio data to .wav file @param data: audio data @param filename: the name of the target file ''' wf = wave.open(filename, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(pyaudio.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(data) wf.close() print("Recorded speech successfully written to " + OUTPUT_WAV + ".") if __name__ == '__main__': main()
UTF-8
Python
false
false
2,014
9,603,546,893,862
d50cefca01120649dac00f5fe39d9079ac52b225
010c5fbc97731286be00028ff33fc981d943bca3
/primal/src/code/db-gene/tests/TestSnpLdDao.py
90da2484d4cf49752359c662141034d982ddd9da
[]
no_license
orenlivne/ober
https://github.com/orenlivne/ober
6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30
810b16b2611f32c191182042240851152784edea
refs/heads/master
2021-01-23T13:48:49.172653
2014-04-03T13:57:44
2014-04-03T13:57:44
6,902,212
7
1
null
null
null
null
null
null
null
null
null
null
null
null
null
''' ============================================================ Test retrieving Gene information from a local mirror of the UCSC gene browser. Created on November 2, 2012 @author: Oren Livne <[email protected]> ============================================================ ''' import unittest, networkx as nx, numpy as np, db_gene, tempfile from numpy.ma.testutils import assert_equal from sqlalchemy import create_engine class TestSnpLdDao(unittest.TestCase): #--------------------------------------------- # Constants #--------------------------------------------- #--------------------------------------------- # Setup Methods #--------------------------------------------- def setUp(self): '''Use a localhost UCSC copy.''' self.engine = create_engine(db_gene.DEFAULT_URL) self.ld_dao = db_gene.snp.snp_db_dao.DEFAULT_SNP_DB_DAOS().ld_dao self.snp_dao = db_gene.ucsc.ucsc_dao.DEFAULT_UCSC_DAOS().snp_dao self.my_snp_dao = db_gene.snp.snp_db_dao.DEFAULT_SNP_DB_DAOS().snp_dao # Base.metadata.create_all(self.engine) def tearDown(self): '''Drop the database.''' pass #--------------------------------------------- # Test Methods #--------------------------------------------- def test_num_records(self): '''Test getting the total # of SNP records.''' assert_equal(self.ld_dao.num_ld_records(), 1394244, 'Wrong row count from SNP table') assert_equal(self.ld_dao.num_snp_records(), 271486, 'Wrong row count from SNP table') def test_ld_graph(self): '''Test retrieving SNPs by chromosome.''' chrom = 22 g = self.ld_dao.ld_graph(chrom) assert_equal(g.number_of_nodes(), 2958, 'Wrong number of SNPs on chromosome %d' % (chrom,)) def test_ld_graph_compute_frames(self): '''Test retrieving SNPs by chromosome and calculating frames (=independent SNP sets).''' chrom = 22 g = self.ld_dao.ld_graph(chrom) blocks = nx.connected_components(g) assert_equal([len(x) for x in blocks], [1172, 445, 229, 137, 63, 61, 52, 34, 30, 28, 25, 24, 21, 17, 15, 12, 12, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 8, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], 'Unexpected LD block size') block_frames = list(db_gene.snp.ld_graph._compute_block_frames(g, self.snp_dao)) assert_equal(np.array([len(x) for x in block_frames[0].itervalues()]), [292, 392, 287, 100, 51, 20, 14, 6, 3, 2, 2, 1, 1, 1], 'Unexpected independent SNP frame sizes within LD block 0') frames = db_gene.snp.ld_graph._compute_frames(g, self.snp_dao) frame_size = [len(x) for x in frames] assert_equal(sum(frame_size), g.number_of_nodes(), 'Wrong total number of SNPs in all frames') assert_equal(frame_size, [392, 292, 287, 151, 122, 105, 100, 90, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 88, 88, 88, 88, 88], 'Wrong final frame sizes') def test_frames(self): '''Test splitting a SNP list into frames (main method called by client codes).''' # Create test data (using ld_graph as well, but could come from a different source) chrom = 22 independent_snp_names = ['rs0', 'rs1', 'zzzzzzz2'] # Are not in LD to other SNPs in snp_names snp_names = self.ld_dao.ld_graph(chrom).nodes() frames = db_gene.snp.ld_graph.frames(chrom, independent_snp_names + snp_names, self.snp_dao, self.ld_dao) frame_size = [len(x) for x in frames] assert_equal(sum(frame_size), len(snp_names) + len(frames) * len(independent_snp_names), 'Wrong total number of SNPs in all frames') assert_equal(frame_size, np.array([395, 295, 290, 154, 125, 108, 103, 93, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 91, 91, 91, 91, 91]), 'Wrong final frame sizes') def test_frames_with_different_dao(self): '''Test splitting a SNP list into frames (main method called by client codes) using our snp table instead of the snp135 UCSC table.''' # Create test data (using ld_graph as well, but could come from a different source) chrom = 22 independent_snp_names = ['rs0', 'rs1', 'zzzzzzz2'] # Are not in LD to other SNPs in snp_names snp_names = self.ld_dao.ld_graph(chrom).nodes() frames = db_gene.snp.ld_graph.frames(chrom, independent_snp_names + snp_names, self.my_snp_dao, self.ld_dao) frame_size = [len(x) for x in frames] assert_equal(sum(frame_size), len(snp_names) + len(frames) * len(independent_snp_names), 'Wrong total number of SNPs in all frames') assert_equal(frame_size, np.array([393, 295, 291, 154, 125, 108, 104, 93, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 91, 91, 91, 91, 91]), 'Wrong final frame sizes') def test_frames_genotype_sample(self): '''Same as test_frames(), but with dat aset as in impute's TestPhasePipeline test suite.''' chrom = 22 snp_names = np.array(['rs9605923', 'rs5747999', 'rs5746679', 'rs11089263', 'rs11089264', 'rs2845377', 'rs16984825', 'rs1654']) # Sort snp_names by anything (e.g., name) so that we can quickly search for items # within that list orig_indices = snp_names.argsort() ld_g = self.ld_dao.ld_graph(chrom, snps=snp_names) # SNPs that are independent of all other SNPs ind = list(set(snp_names) - set(ld_g.nodes())) independent = np.sort(orig_indices[np.searchsorted(snp_names[orig_indices], ind)]) frames = db_gene.snp.ld_graph._compute_frames(ld_g, self.snp_dao) frame_index = [np.sort(np.concatenate((independent, orig_indices[np.searchsorted(snp_names[orig_indices], list(f))]))) for f in frames] assert_equal(sorted(list(reduce(set.union, frame_index, set([])))), np.arange(len(snp_names)), 'Union of frames should be the original set') assert_equal(frame_index, [[0, 3, 4, 5, 6, 7], [0, 1, 2, 5, 6, 7]], 'Wrong frames') def test_save_and_load_frames(self): '''Check that saving and loading frames from a text file preserves them.''' chrom = 22 snp_names = np.array(['rs9605923', 'rs5747999', 'rs5746679', 'rs11089263', 'rs11089264', 'rs2845377', 'rs16984825', 'rs1654']) frames = db_gene.snp.ld_graph.Frames((chrom, x) for x in db_gene.snp.ld_graph.frames(chrom, snp_names, self.snp_dao, self.ld_dao)) out_file = tempfile.TemporaryFile() db_gene.snp.ld_graph.write_frames(frames, out_file) # Only needed here to simulate closing & reopening file; you will need to call # out_file.close() on to prevent file locking in Windows out_file.seek(0) loaded_frames = db_gene.snp.ld_graph.read_frames(out_file) out_file.close() assert_equal(loaded_frames, frames, 'Saving and loading did not restore original frames') #--------------------------------------------- # Private Methods #---------------------------------------------
UTF-8
Python
false
false
2,014
9,440,338,141,450
b38b7c8423fc7219781035a2b9345ea86b1b7c10
f1bf12c88c4233785c05d5cc21beb3514245c861
/ws/cartgethandler.py
7b0afb955627dbae883bca8438ca2b059dc0ee77
[]
no_license
chinostroza/ecommerce
https://github.com/chinostroza/ecommerce
001c512d07baff42e5e16c26117406bef95e681a
e5b0b49826a55137f6eb44899aa6fe0ff00f4407
refs/heads/master
2016-08-06T20:23:04.880201
2014-07-09T04:21:45
2014-07-09T04:21:45
19,540,309
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import tornado.auth import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from basehandler import BaseHandler from model.cart import Cart from bson import json_util class CartGetHandler(BaseHandler): def get(self): #validate access token if not self.ValidateToken(): return identifier = self.get_argument("identifier", "") cart=Cart() cart.InitById(identifier,self.db.carts) self.write(json_util.dumps(cart.Print()))
UTF-8
Python
false
false
2,014
10,625,749,113,066
78e20022d3bc59cebbb14ccc793486f36af8abda
949f7dc1b89df0f599d9780cf0a08dc2eaf17dda
/HySoP/setup.py.in
dec8ebdc15b095426ca2f1db1e40868eef7d00df
[ "GPL-3.0-only" ]
non_permissive
fperignon/parmessandbox
https://github.com/fperignon/parmessandbox
d6c9f6134e47a268da5c8c6e3b297a3620725fd0
135b20bd3c2991d0cab2bf25ad5ad2a6f428a454
refs/heads/master
2016-06-06T19:04:44.837812
2014-11-17T16:32:01
2014-11-17T16:32:01
51,591,432
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ setup.py file for @PYPACKAGE_NAME@ """ from numpy.distutils.core import setup, Extension from numpy.distutils.misc_util import Configuration import os # Full package name name = '@PYPACKAGE_NAME@' # List of modules (directories) to be included packages = ['parmepy', 'parmepy.domain', 'parmepy.domain.subsets', 'parmepy.fields', 'parmepy.operator', 'parmepy.operator.discrete', 'parmepy.problem', 'parmepy.tools', 'parmepy.numerics', 'parmepy.numerics.integrators', ] packages_for_tests = ['parmepy.domain.tests', 'parmepy.fields.tests', 'parmepy.operator.tests', 'parmepy.numerics.tests', 'parmepy.tools.tests', 'parmepy.problem.tests', 'parmepy.numerics.tests', ] if "@USE_MPI@" is "ON": packages.append('parmepy.mpi') packages_for_tests.append('parmepy.mpi.tests') if "@WITH_GPU@" is "ON": packages.append('parmepy.gpu') packages_for_tests.append('parmepy.gpu.tests') if "@WITH_TESTS@" is "ON": packages = packages + packages_for_tests # Enable this to get debug info DISTUTILS_DEBUG = 1 ext_modules = [] # Check if libparmes was created enable_fortran = "@WITH_LIB_FORTRAN@" if enable_fortran is "ON": inc_dir = '@MPI_Fortran_INCLUDE_PATH@'.split(';') # To avoid -I -I in compiler call. Result in a bug: while inc_dir.count('') > 0: inc_dir.remove('') parmes_dir = '@CMAKE_BINARY_DIR@/Modules' inc_dir.append(parmes_dir) fortran_dir = \ '@CMAKE_SOURCE_DIR@/parmepy/f2py/' parmes_libdir = ['@CMAKE_BINARY_DIR@/src'] parmeslib = ['@PARMES_LIBRARY_NAME@'] f2py_options = ['--no-lower'] fortran_src = [] withfftw = "@WITH_FFTW@" if withfftw is "ON": fortran_src.append(fortran_dir+'parameters.f90') fortran_src.append(fortran_dir+'fftw2py.f90') fftwdir = '@FFTWLIB@' parmeslib.append('fftw3') parmeslib.append('fftw3_mpi') parmes_libdir.append(fftwdir) else: packages.append('parmepy.fakef2py') packages.append('parmepy.fakef2py.fftw2py') withscales = '@WITH_SCALES@' if withscales is "ON": if withfftw is "OFF": fortran_src.append(fortran_dir+'parameters.f90') fortran_src.append(fortran_dir+'scales2py.f90') else: packages.append('parmepy.fakef2py') packages.append('parmepy.fakef2py.scales2py') options = [('F2PY_REPORT_ON_ARRAY_COPY', '1')] if os.uname()[0] == 'Linux': options.append(('F2PY_REPORT_ATEXIT', '1')) parpyModule = Extension(name='parmepy.f2py', f2py_options=f2py_options, sources=fortran_src, include_dirs=inc_dir, library_dirs=parmes_libdir, libraries=parmeslib, define_macros=options ) ext_modules.append(parpyModule) else: packages.append('parmepy.fakef2py') packages.append('parmepy.fakef2py.scales2py') packages.append('parmepy.fakef2py.fftw2py') data_files = [] if "@WITH_GPU@" is "ON": cl_src_dirs = ["cl_src", "cl_src/kernels", "cl_src/advection", "cl_src/remeshing"] for cl_dir in cl_src_dirs: data_files.append( ('./parmepy/gpu/'+cl_dir, ['@CMAKE_SOURCE_DIR@/parmepy/gpu/'+cl_dir+'/' + cl_file for cl_file in os.listdir( '@CMAKE_SOURCE_DIR@/parmepy/gpu/'+cl_dir+'/') if cl_file[0]!='.' and cl_file[0]!='#' and cl_file[-3:]=='.cl'])) config = Configuration(name=name, version='@PYPACKAGE_VERSION@', description = \ 'Particular Methods implementation for heterogenous platforms.', author = 'G.H Cottet, J.M Etancelin, C.Mimeau, F.Pérignon, C. Picard', author_email = '[email protected]', url = 'https://forge.imag.fr/projects/parmes/', license = 'GNU public license', package_dir = {'': '@CMAKE_SOURCE_DIR@'}, ext_modules = ext_modules, packages = packages, data_files = data_files, ) setup(**config.todict())
UTF-8
Python
false
false
2,014
9,302,899,171,928
7df1ba75d3abdcfe714927460ae5110892290f17
344aefd0e36f1909a51e25b38bbc6ca09fd036ee
/CS 303E/Python Programs/NestedLoops.py
300bcc8a0dd1ea7980164b92d99f52bfe732bb6d
[]
no_license
keachico/CS303E_and_CS313E
https://github.com/keachico/CS303E_and_CS313E
9fb41eee4ada718b91c10f3700831dae78c71db1
0cf0ba9e5ea48fec81fb56a8c84c315df7ad4b88
refs/heads/master
2021-01-25T05:21:35.490099
2014-04-02T02:20:00
2014-04-02T02:20:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
##def main(): ## sum1 = 0 ## for i in range(5): ## for j in range(9): ## sum1 = sum1 + 1 ## print sum1 ##def main(): ## for i in range(10): ## triangle_row = "*" ## while i > 0: ## triangle_row = triangle_row + "*" ## i = i-1 ## ## print triangle_row def main(): for i in range(10): triangle_row = "" while i >= 0: triangle_row = triangle_row + str(i) i = i-1 print triangle_row main()
UTF-8
Python
false
false
2,014
6,743,098,670,409
9228fddca441c9c7690d60ae16780c5fd87561ad
e6bf776e5f2b0b697c4dce4e4dc6043412fba94b
/src/pyfnc/tests/test2.py
eb1eee8c7b1d9d8007e3268bec952bdf2ca16ecd
[]
no_license
jldupont/pyfnc
https://github.com/jldupont/pyfnc
78c5feee23300acd1b9286f636a6fb63ac96139a
698131a5d3ba37d1986ca73d8cfd8639aff5e445
refs/heads/master
2021-01-22T16:26:28.112361
2013-03-03T12:34:00
2013-03-03T12:34:00
3,353,432
3
1
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Created on 2012-02-03 @author: jldupont """ import logging import types import unittest try: from pyfnc import patterned, pattern except: import os, sys ap=os.path.abspath(__file__) dn=os.path.dirname base=dn(dn(dn(ap))) sys.path.insert(0, base) from pyfnc import patterned, pattern @pattern('male', str) def greet_male(p1, p2): return "Hello M. %s!" % p2 @pattern('female', str) def greet_female(p1, p2): return "Hello Mrs. %s!" % p2 @patterned def greet(p1, p2): pass class TestCases(unittest.TestCase): def test_male(self): self.assertEqual("Hello M. Smith!", greet('male', 'Smith')) def test_female(self): self.assertEqual("Hello Mrs. Adam!", greet('female', 'Adam')) if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,013
13,700,945,698,380
6a4a64c7111af578602a3cee6a8088e48e2264e1
4db50e16482c6833c06a2d727d47ff625e22a54a
/run.py
bf4d0d0ed9486245b413b87bf969b7459b5ddd4c
[ "Apache-2.0" ]
permissive
crotsos/natblaster
https://github.com/crotsos/natblaster
da1f84bd67838c788d6b196c2dc36b097defa6f0
5e5b25e63308b2bc02ddb88151009289abc908ce
refs/heads/master
2020-12-24T22:20:08.472945
2012-04-24T17:35:21
2012-04-24T17:35:21
4,125,881
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import sys import os import string # the basics of the command basic_command = "sudo ./peer" #semi-static values helper_ip = "server.example.org" peer_ip = "192.168.1.100" buddy_ext_ip = "buddy.example.com" buddy_int_ip = "192.168.1.99" message = '"message from peer"' random = "" # "" means no device passed to peer, otherwise the named device is passed device = "" # dynamic values - set to defaults peer_port = 4000 helper_port = 8000 buddy_int_port = 3000 # loop over all arguments and create the extraports peer-args for i in range(1,len(sys.argv)) : if (sys.argv[i][len(sys.argv[i])-1] == "p") : peer_port = sys.argv[i][:len(sys.argv[i])-1] if (sys.argv[i][len(sys.argv[i])-1] == "h") : helper_port = sys.argv[i][:len(sys.argv[i])-1] if (sys.argv[i][len(sys.argv[i])-1] == "b") : buddy_int_port = sys.argv[i][:len(sys.argv[i])-1] if (sys.argv[i][0] == "r") : random = " --random" # create the command command = "" command += basic_command command += " --helper_ip " + helper_ip command += " --helper_port " + str(helper_port) command += " --local_ip " + peer_ip command += " --local_port " + str(peer_port) command += " --buddy_ext_ip " + buddy_ext_ip command += " --buddy_int_ip " + buddy_int_ip command += " --buddy_int_port " + str(buddy_int_port) command += " --message " + message command += random if (device != "") : command += " --device " + device # print the command being executed, FYI print "run.py executing sudo ./peer with the follow parameters:" print " helper IP ................",helper_ip print " helper port ..............",helper_port print " peer IP ..................",peer_ip print " peer port ................",peer_port print " buddy external IP ........",buddy_ext_ip print " buddy internal IP ........",buddy_int_ip print " buddy internal port ......",buddy_int_port if (device != "" ) : print " device ...................",device print " message ..................",message if (random != "" ) : print " random ................... yes" else : print " random ................... no" # execute the command #os.system(command) os.system(command + " >& run.py.tmp") fid = open("run.py.tmp","r") print fid.read() fid.close()
UTF-8
Python
false
false
2,012
16,441,134,827,577
3bbb717fe31aedf072c931535a537ae63e6c812a
b39ed76c82282caff9a266d7390d811b54f2f3d9
/blog/views.py
36a6506947f7b20f9058a4398a00ad2367bf2f06
[]
no_license
tofik/chatofido
https://github.com/tofik/chatofido
9b1c47a9c7cfd233a0d0640a944ea8d7428a360d
c24d0e1a037957e8d09099c9f9b2dc444777ee5c
refs/heads/master
2021-01-24T06:37:19.059947
2011-06-19T19:36:34
2011-06-19T19:36:34
462,632
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from blog.models import Post, Blog, Image, Comment from django.shortcuts import render_to_response, HttpResponseRedirect from blog.forms import NewPostForm, NewImageForm, NewCommentForm from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.contrib.auth import forms as auth_form from django.contrib.auth import logout, login, authenticate import datetime from django.template import RequestContext def login_view(request, next = "/"): if request.method == 'POST': username = request.POST['username'] password = request.POST['password'] user = authenticate(username = username, password = password) if user is not None: if user.is_active: login(request, user) return HttpResponseRedirect(request.POST['next']) else: return HttpResponse('cos poszlo nie tak, jak trzeba....') else: return HttpResponse('Podane konto nie istnieje!!') else: login_form = auth_form.AuthenticationForm() if next != "/": next = reques.GET['next'] return render_to_response('blog/login_form.html', {'login_form':login_form, 'login_next': next, }, context_instance = RequestContext(request) ) def logout_view(request): logout(request) return HttpResponseRedirect("/") def blog(request, name = "empty"): if name == "empty": blog = Blog.objects.get(id = 1) else: blog = Blog.objects.get(name = name) all_blogs = Blog.objects.values('name').distinct() blog_authors = blog.post_set.values('author').distinct() all_authors = Post.objects.values('author').distinct() all_posts = blog.post_set.all().order_by('-created') return render_to_response('blog/list.html', {'blog': blog, 'blogs': all_blogs, 'blog_authors': blog_authors, 'all_authors': all_authors, 'posts': all_posts, 'today': datetime.date.today().month, }, context_instance=RequestContext(request) ) @login_required def new_image(request, name): blog = Blog.objects.get(name = name) if request.method == 'POST': form = NewImageForm(request.POST, request.FILES) if form.is_valid(): post = form.save(commit=False) post.blog = blog post.save() print request.user else: return HttpResponse("Do dupy formularz!") else: blog_authors = Image.objects.values('author').distinct() form = NewImageForm({'blog': blog, 'comments': 0}) return render_to_response('blog/new_image.html', {'form': form, 'blog': blog, 'blog_authors': blog_authors, 'user': request.user, }, context_instance = RequestContext(request) ) return HttpResponseRedirect(reverse('blog.views.blog', args = (blog.name, ))) @login_required def new_post(request, name): blog = Blog.objects.get(name = name) if request.method == 'POST': form = NewPostForm(request.POST) if form.is_valid(): post = form.save(commit=False) post.blog = blog post.save() else: return HttpResponse("Niepoprawnie wypelniony formularz.") else: blog_authors = Post.objects.values('author').distinct() form = NewPostForm({'blog': blog, 'comments': 0}) return render_to_response('blog/new_post.html', {'form': form, 'blog': blog, 'blog_authors': blog_authors, 'user': request.user, }, context_instance = RequestContext(request) ) return HttpResponseRedirect(reverse('blog.views.blog', args = (blog.name, ))) #@login_required def new_comment(request, name, id): blog = Blog.objects.get(name = name) post = Post.objects.get(id = id) if request.method == 'POST': form = NewCommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.post = post comment.save() post.comments += 1 post.save() else: return HttpResponse("Incorrect form!!") else: form = NewCommentForm({'post':post}) all_comments = post.comment_set.all().order_by('-created') return render_to_response('blog/new_comment.html', {'form': form, 'post': post, 'blog': blog, 'comments': all_comments, }, context_instance = RequestContext(request) ) return HttpResponseRedirect(reverse('blog.views.blog', args = (blog.name, )))
UTF-8
Python
false
false
2,011
14,010,183,321,046
e68ded6b01316666f977c019f199a1dbaeeddaec
c149978fc7773739484950f153b9015946ce2346
/constants.py
baa4c6f3a6fd6e5ad36bad22154d7e7c791831ad
[]
no_license
suyosunu/gilded-age
https://github.com/suyosunu/gilded-age
239ed70dd67e3fe9874e059faaa3753af2655b11
ad6bd5fe646b8b9916af3d8cfec87fe164ad7be5
refs/heads/master
2017-12-20T15:58:31.583040
2010-11-08T17:49:18
2010-11-08T17:49:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Project constants file """ DB_FILE = 'output/test.db' BASE_DIR = 'output' """ Scraper constants """ ENABLED_SCRAPERS = ['VALLEY'] ### Valley consts VALLEY_CONFIG = 'cfg/valley.cfg' VALLEY_DIR = 'valley' """ Analyzer constants """ ENABLED_ANALYZERS = ['CALAIS'] ### Calais consts # Settings for calais analyzer CALAIS_CONFIG = 'cfg/calais.cfg' ### Graph consts # Settings GRAPH_CONFIG = 'cfg/graph.cfg'
UTF-8
Python
false
false
2,010
13,357,348,327,793
e655da49916549a3d4c580e3965e3200fbf6b79e
7cbfcaad0a21df025d9df8ebae86838ac038443b
/get_duplicates.py
29dfdf23c0e1ac75107ceec97ccc844ff3c873f6
[]
no_license
duganchen/duplicated_file_page
https://github.com/duganchen/duplicated_file_page
1d66a08cdb5a4e2fc3596e0d3dd10c688f4c5c3b
8a9d6c756d9174c99478005c46a8fe6bfc95f853
refs/heads/master
2020-06-01T18:48:27.116182
2013-07-02T04:27:31
2013-07-02T04:27:31
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import collections import flask import itertools import os import yaml import zlib app = flask.Flask(__name__) @app.route('/') def same_files(): settings = None with open(os.path.join(os.path.dirname(__file__), 'settings.yaml')) as f: settings = yaml.load(f) assert settings is not None assert 'path' in settings assert os.path.isdir(settings['path']) file_keys = iter_keys(settings['path'], key=os.path.getsize) get_key = lambda filekey: filekey.key file_keys = sorted(file_keys, key=get_key, reverse=True) key_groups = itertools.groupby(file_keys, key=get_key) dupes = iter_dupes(key_groups) if flask.request.args.get('checksum') is None: return flask.render_template('same_sizes.html', keys='sizes', root=settings['path'], dupes=dupes) # If checksums are desired, flatten what we have into a list of paths, # checksum them, group them by checksum, and remove duplicates. checksummed = sorted(iter_checksums(dupes), key=lambda x: x.key) checksum_groups = itertools.groupby(checksummed, key=lambda x: x.key) dupes = iter_dupes(checksum_groups) return flask.render_template('same_checksums.html', keys='checksums', root=settings['path'], dupes=dupes) def iter_keys(path, key): for root, _, files in os.walk(path): for filepath in files: path = os.path.join(root, filepath) yield FileKey(key=key(path), path=path.decode('utf-8')) def iter_dupes(key_groups): for key, key_group in key_groups: filekeys = tuple(key_group) if len(filekeys) > 1: yield KeyGroup(key, filekeys) def iter_checksums(dupe_groups): for group in dupe_groups: for filekey in group.filekeys: yield FileKey(key=checksum(filekey.path), path=filekey.path) def checksum(path): with open(path, 'rb') as f: # You're expected to clear out the obvious large files before turning # checksumming on. return zlib.adler32(f.read()) FileKey = collections.namedtuple('FileKey', ('key', 'path')) KeyGroup = collections.namedtuple('KeyGroup', ('key', 'filekeys')) if __name__ == '__main__': app.debug = True app.run(host='0.0.0.0')
UTF-8
Python
false
false
2,013
15,934,328,685,342
76cd578d6cbb156f987c519f3da3a9fc71e699e1
83adf0ee2a2b61f7da7a32a205a9a66cf148a5a5
/burp/url/analyzer.py
151ff0e2fbd81941d605abcfec2406a4ba9098e3
[]
no_license
osall2001/BURP
https://github.com/osall2001/BURP
91df686e40be14374bc936a1937afc6df638bc82
24c105c2582493788c5c0a8f8c5e4150ff5be659
refs/heads/master
2021-01-18T11:53:09.836168
2012-12-11T07:07:49
2012-12-11T07:07:49
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import socket import whois import sys from burp.url.TLD import tlds try: from urllib.parse import urlsplit except ImportError: from urlparse import urlsplit class URLAnalyzer: def analyze(self, url): """ Returns a dictionary containing whois, ip and url tokens """ tokens = self.getTokens(url) return { "whois" : self.getWhoIs(tokens["domain"]), "ip" : self.getIpAddr(tokens["domain"]), "tokens" : tokens } def getWhoIs(self, dom): """ Return a dictionary of whois infomation Will throw exception if tld server not known, or query limit reached """ ws = whois.query(dom) return ws.__dict__; def getIpAddr(self, dom): """Return the ip address of the domain""" return socket.gethostbyname(dom); def getTokens(self, url): """Returns a dictionary of subdomain, subdomain_length, number_subdomains, domain, domain_length, port, path""" parsed = urlsplit(url) path_and_port = parsed[1].split(':') url_elements = path_and_port[0].split('.') # url_elements = ["abcde","co","uk"] domain = "" for i in range(-len(url_elements), 0): last_i_elements = url_elements[i:] # i=-3: ["abcde","co","uk"] # i=-2: ["co","uk"] # i=-1: ["uk"] etc candidate = ".".join(last_i_elements) # abcde.co.uk, co.uk, uk wildcard_candidate = ".".join(["*"] + last_i_elements[1:]) # *.co.uk, *.uk, * exception_candidate = "!" + candidate # match tlds: if (exception_candidate in tlds): domain = ".".join(url_elements[i:]) if (candidate in tlds or wildcard_candidate in tlds): domain = ".".join(url_elements[i-1:]) # returns "abcde.co.uk" if domain == "": raise ValueError("Domain not in global list of TLDs") subdomain = path_and_port[0].replace(domain, "").strip('.') num_subdomains = len(subdomain.split('.')) return {"subdomain" : subdomain, "subdomain_length" : len(subdomain), "number_subdomains" : num_subdomains, "domain" : domain, "domain_length": len(domain), "port" : parsed.port, "path" : parsed.path }
UTF-8
Python
false
false
2,012
14,405,320,326,774
48e00bdeccf36d29f6d798c1a1a9d3725acc18b1
04e613f26c98a3642394a8e8e857b9dc013397e7
/python/GeneratorBD.py
e12e84bb6d661e7ee4e22da674fb1ad024216dd3
[ "Apache-2.0" ]
permissive
sdynerow/Semirings-Library
https://github.com/sdynerow/Semirings-Library
be65217ccca209f4284896c219f03ffd6eff106e
2febff3448c6112f768b470cd6a9e5960bfaf11a
refs/heads/master
2016-09-05T23:23:11.695230
2014-02-14T09:14:54
2014-02-14T09:14:54
14,085,189
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012 - Seweryn Dynerowicz, FUNDP. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # imitations under the License. import sys from Metarouting.Algebra.RoutingMatrix import * from Metarouting.Policy.Routing.Diamond import * from Metarouting.Policy.Routing.WidestShortest import * bdcart = [ (b,d) for b in [0,5,10] for d in [1,2,3,4,5,6] ] Z = WidestShortest.zeroElt U = WidestShortest.unitElt B = RoutingMatrix(1, 5, [Z, Z, Z, U, Z], cast=WidestShortest) def statusString(): global count, total sstr = str(round(100.0 * (count/total),1)).rjust(6) + "% (" + str(int(count)) + "/" + str(int(total)) + ")" sys.stdout.write("\r" + sstr) sys.stdout.flush() vcount = 0 count = 0.0 total = len(bdcart)**5 A = RoutingMatrix(5, 5, [ Z , (1,1), Z , Z , Z , Z , Z , Z , Z , Z , Z , (1,4), Z , (1,1), Z , (1,1), Z , Z , Z , (2,1) , (2,5), Z , (1,1), Z , Z ], cast=WidestShortest) R = A.rightLocalOptimum() X = RoutingMatrix(1, 5, [ R(4,1), R(4,2), R(4,3), R(4,4), R(4,5) ]) for y1 in bdcart: for y2 in bdcart: for y3 in bdcart: for y4 in bdcart: for y5 in bdcart: Y = RoutingMatrix(1, 5, [y1, y2, y3, y4, y5], cast=WidestShortest) if (Y == Y*A + B and X != Y): print("Y\n" + str(Y)) if ((X+Y) != (X+Y)*A + B): print print("A\n" + str(A)) print print("X\n" + str(X)) print print("Y\n" + str(Y)) print vcount += 1 count += 1 statusString() print("\nViolation count : " + str(vcount))
UTF-8
Python
false
false
2,014
7,017,976,610,160
5d47fa7781d608e6fbad7ba7fd9000c3ebc3e894
41d619385684b384687d40a833202f2491c51599
/sivir/sivir_request.py
2a4f3be4d43245dd7ee223cda139422f38ed8a2d
[ "GPL-2.0-only" ]
non_permissive
guilhermepontes/sivir
https://github.com/guilhermepontes/sivir
7437b3be3511e723e4e4117dc58c4090d5cb242b
0fc463a54bef8947936dfa0e1bd8d8822dcaa87f
refs/heads/master
2021-01-22T09:27:43.249531
2014-08-27T14:55:53
2014-08-27T14:55:53
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import requests, json, os import sivir_messages class SivirRequest: def __init__(self, region = "", params = ""): if not all((region, params)): SivirMessages.error(["region", "params"]) else: self.region = region self.params = params self.config() @staticmethod def load_config_file(): path = os.path.dirname(os.path.abspath(__file__)) config_file = open(path + "/config/sivir_config.json").read() return json.loads(config_file) @classmethod def config(self): config = self.load_config_file() self.__api_key = config["api_key"] self.__api_version = config["api_version"] self.__production_url = config["production_url"] @classmethod def make_url(self, prod, region, api_version, params): url = prod + region + "/" + api_version + params url = url if url[-1:] == "/" else url + "/" return url @staticmethod def make_api_param(key): return {"api_key": key} def request(self): url = self.make_url(self.__production_url, self.region, self.__api_version, self.params) api = self.make_api_param(self.__api_key) r = requests.get(url, params = api) return r.text
UTF-8
Python
false
false
2,014
1,967,095,027,090
3daa737f14dffcaaf6db5fc494ead69b2155ba43
5b75264a35c6b1faccb5dd46dc06c9bce1732d12
/raspberry-pi/rpi-server.py
0869882d6bfff05dd5dd3cb25d8be8638f38e3fd
[ "BSD-3-Clause" ]
permissive
skeezix/shadowcar
https://github.com/skeezix/shadowcar
e15fd926932ffba90ffcaf94cf44a2b9879abc4b
d61eb229c63795d131eed37a5990bc296852e125
refs/heads/master
2020-06-04T00:22:40.057826
2014-10-24T19:59:27
2014-10-24T19:59:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python3 # web server import socketserver import logging import os # makedirs import json import string import datetime import posixpath import urllib import http.server from http.server import SimpleHTTPRequestHandler # i2c / twi master import quick2wire.i2c as i2c import time import array import sys # setup # slave_address = 0x10 server_host = 'fw.skeleton.org' server_port = 12343 # set up routing table # routes = [] routes.append ( ['/dist/', './dist'] ) routes.append ( ['DEFAULT', '/dev/null'] ) class ShadowCommand: TC_HEARTBEAT = 1 TC_GETHELLO = 2 TC_SETMOTORS = 3 TC_TAKEOVER = 4 TC_RELEASE = 5 def __init__ ( self, slave_address ): self.slave_address = slave_address def sendbuf ( self, bytelist ): with i2c.I2CMaster() as bus: bus.transaction ( i2c.writing ( self.slave_address, bytes ( bytelist ) ) ) return None def sendbytes ( self, bytes ): with i2c.I2CMaster() as bus: bus.transaction ( i2c.writing_bytes ( self.slave_address, bytes ) ) return None def readbuf ( self, n = 8 ): with i2c.I2CMaster() as bus: read_results = bus.transaction ( i2c.reading ( slave_address, n ), ) return read_results def tc_takeover ( self ): data = array.array ( 'B', [ shadow.TC_TAKEOVER, 0, 0, 0, 0, 0, 0, 0 ] ) shadow.sendbuf ( data.tolist() ) def tc_setmotors ( self, l = 63, r = 63 ): data = array.array ( 'B', [ shadow.TC_SETMOTORS, l, r, 0, 0, 0, 0, 0 ] ) shadow.sendbuf ( data.tolist() ) class RequestHandler ( SimpleHTTPRequestHandler ): def translate_path ( self, path ): """translate path given routes""" original_path = path # set default root to cwd root = os.getcwd() # look up routes and set root directory accordingly found = False for pattern, rootdir in routes: if path.startswith(pattern): # found match! path = path[len(pattern):] # consume path up to pattern len root = rootdir found = True break if found == False: logging.warning ( "Routing request %s to default %s" % ( original_path, routes [ 1 ][ 1 ] ) ) return routes [ 1 ][ 1 ] # normalize path and prepend root directory path = path.split('?',1)[0] path = path.split('#',1)[0] path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) path = root for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) logging.info ( 'Mapped GET path %s to path %s' % ( original_path, path ) ) return path def do_GET ( self ): logging.debug ( "GET against path '%s'" % ( self.path ) ) print ( "do_GET" ) # parse out ?key=value sillyness putargs = dict() if '?' in self.path: try: self.path, crap = self.path.split ( '&', 2 ) except: pass self.path, query = self.path.split ( '?', 2 ) key, value = query.split ( '=', 2 ) putargs [ key ] = value # length handling length = -1 if 'Content-Length' in self.headers: try: length = int ( self.headers['Content-Length'] ) & 0xFFFF # why do I need to mask this? logging.debug ( "Content-Length header implies length is %s" % ( length ) ) except: logging.debug ( "Couldn't determine Content-Length from header" ) req = dict() try: paths = self.path.split ( "/", 6 ) # paths [ 0 ] == nil (leading blank) req [ 'basepage' ] = paths [ 1 ] req [ 'l' ] = paths [ 2 ] req [ 'r' ] = paths [ 3 ] except: basepage = '' logging.debug ( "request looks like POST %s" % ( req ) ) if req [ 'basepage' ] == 'dpad': self.send_response ( 200 ) # okay self.send_header ( 'Content-type', 'text/html; charset=utf-8' ) self.send_header ( 'Content-length', 0 ) self.end_headers() shadow.tc_takeover() shadow.tc_setmotors ( int ( req [ 'l' ] ), int ( req [ 'r' ] ) ) elif req [ 'basepage' ] == 'dist': f = open ( "dist/index.html", 'r' ) text = f.read() f.close() self.send_response ( 200 ) # okay; the following is the right header sequence self.send_header ( 'Content-type', 'text/html; charset=utf-8' ) self.send_header ( 'Content-length', len ( text ) ) self.end_headers() self.wfile.write ( bytes ( text, 'UTF-8' ) ) else: self.send_response ( 400 ) # shir not be okay class ThreadingHTTPServer ( socketserver.ThreadingMixIn, http.server.HTTPServer ): # use some mixins to make a threading server.. lets see how this works pass shadow = ShadowCommand ( slave_address ) server_address = ('', server_port) RequestHandler.protocol_version = "HTTP/1.0" httpd = ThreadingHTTPServer ( server_address, RequestHandler ) sa = httpd.socket.getsockname() logging.info ( "Serving HTTP on " + str(sa[0]) + " port " + str(sa[1]) + "..." ) httpd.serve_forever()
UTF-8
Python
false
false
2,014
2,216,203,151,856
b50163190901e4a099644e7ef79d1d028a0b4149
59642dc8f6d4059d2d36f4f64a92d8edf30c33c6
/setup.py
9d9d97845cbc8d28956384401d1af14da99fa462
[]
no_license
lrowe/plone.app.blocks
https://github.com/lrowe/plone.app.blocks
a855691f2b41ef8ad4b70d8a03c1076bcb1031f8
7a4df3a0aff953fe872f85b904ff5f51826ff7b1
refs/heads/master
2021-01-24T02:39:12.443662
2011-09-22T16:18:50
2011-09-22T16:18:50
2,428,688
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from setuptools import setup, find_packages import os version = '1.0a1' setup(name='plone.app.blocks', version=version, description="Implements the in-Plone blocks rendering process", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read() + "\n" + open(os.path.join("plone", "app", "blocks", "tests", "rendering.txt")).read() + "\n" + open(os.path.join("plone", "app", "blocks", "tests", "esi.txt")).read(), classifiers=[ "Framework :: Plone", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords='plone blocks deco', author='Martin Aspeli, Laurence Rowe', author_email='[email protected]', url='http://pypi.python.org/pypi/plone.app.blocks', license='GPL', packages=find_packages(exclude=['ez_setup']), namespace_packages=['plone', 'plone.app'], include_package_data=True, zip_safe=False, install_requires=[ 'setuptools', 'plone.transformchain', 'lxml', 'repoze.xmliter', 'plone.tiles>=1.0a2', 'plone.resource', 'plone.behavior', 'plone.subrequest', 'plone.app.registry', ], extras_require={ 'test': [ 'plone.app.testing', ], }, entry_points=""" [z3c.autoinclude.plugin] target = plone """, )
UTF-8
Python
false
false
2,011
446,676,617,757
30b6e637029c10265db022830133b86ca5eb638f
8eb73bc8654c731ddee4d7694b5750412d59c9ee
/app/grubhub/__init__.py
61b6ea80a123b73f238a501ede9dfb4d4dc41bc3
[]
no_license
TrevorDecker/HornyScotsServerside
https://github.com/TrevorDecker/HornyScotsServerside
bfad210d56a18919874e51cd1c040b4c206e6edc
dc2453ffdd9d12e8095c9496d1226db2cea6195b
refs/heads/master
2020-05-03T02:15:01.529655
2014-07-12T17:42:35
2014-07-12T17:42:35
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from grubhub import Grubhub from parllel import start_search
UTF-8
Python
false
false
2,014
463,856,497,211
f61cd39d4a8557cff08b9c1c65cd298910022c11
1ec50473af8e37dc2d4ccfa82a2d3f50cd99e173
/breadrobber/src/login.py
39653da68c84378c01af10f81838809c5cf28ea6
[]
no_license
anic/cyaproject
https://github.com/anic/cyaproject
4628c5fb79f2fd4cb9f4741d29a59e947db79a69
f20acd78ec0ee78d1eb26866dfad964e9b77bc78
refs/heads/master
2016-09-11T02:27:52.515184
2014-02-11T15:08:35
2014-02-11T15:08:35
32,201,822
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- import sys reload(sys) sys.setdefaultencoding('utf-8') import re, time from Ui_login import * from PyQt4.QtCore import * from PyQt4.QtGui import * from facade import * class LoginDlg(QDialog, Ui_Dialog): def __init__(self, parent=None): super(LoginDlg, self).__init__() self.setupUi(self) self.setFixedSize(200, 125) self.buttonBox.button(QDialogButtonBox.Ok).setText(u"确定"); self.buttonBox.button(QDialogButtonBox.Cancel).setText(u"取消");
UTF-8
Python
false
false
2,014
188,978,597,854
9f9027e46172d088040389cce7fc3f31df922e03
070636d993e96fbf0e066254092642935ee4eff8
/branches/release-0.8.2/old/alpha-0.6/solipsis/core/connector.py
7c238e3c4f8ee38c1c4cea2eb065137c8bca55c5
[ "LGPL-2.1-only", "LGPL-2.1-or-later", "LicenseRef-scancode-other-copyleft" ]
non_permissive
BackupTheBerlios/solipsis-svn
https://github.com/BackupTheBerlios/solipsis-svn
2cc814dffd086a8e9af585bd2e809f743b5a80db
12c2face0ed3398f3733595190abcecb372796e7
refs/heads/master
2016-09-06T12:21:26.278840
2005-11-29T14:47:40
2005-11-29T14:47:40
40,748,471
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from threading import Thread import logging from solipsis.util.container import NotificationQueue class Connector(Thread): """ Generic class for connecting to another component(peer or navigator) """ def __init__(self, parser, eventQueue): """ Constructor parser : the Parser object used to parse the events going in and out of this connector eventQueue : queue used to communicate with other thread. The Connector fills this queue with events. Other threads are responsible for reading and removing events from this queue """ Thread.__init__(self) # Event queue used to communicate with other threads # these events comes from other entities through this connector # and will be processed by the node self.incoming = eventQueue # Message to send queue # the node has created an event that will be sent using this connector # to another entity self.outgoing = NotificationQueue() # this flag is set to True when we want to stop this thread self.stopThread = False # Parser object self.parser = parser # logger self.logger = logging.getLogger('root') def stop(self): """ Stop the network thread """ self.stopThread = True def setParser(self, parser): """ Set the connector type. parser : the Parser object used to parse the events going through this connector """ self.parser = parser
UTF-8
Python
false
false
2,005
15,040,975,491,081
5f179e82eb293910240a5f08b933892b6b4edb10
7c3c2a1898a7f54dc0cab16135944dd5452b59de
/src/storage/StorageManager.py
43aa98175cd9fb4133da8d1b3324e6d8938d4bcb
[]
no_license
ahussein/FSTags
https://github.com/ahussein/FSTags
dd72bec3f3145d91c014b70e1a1063c2bc5dce17
a9b31e88f8a325365e32c3914177430ba0672941
refs/heads/master
2021-01-23T15:51:38.448036
2011-03-29T22:20:22
2011-03-29T22:20:22
1,538,950
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on Mar 26, 2011 @author: abdelrahman ''' from ConfigParser import ConfigParser import lightcloud class LightCloudManager(object): """ A wrapper for lightcloud key/value store that implement the storage interface """ CONFIG_FILE_PATH = '/etc/lightcloud.cfg' def __init__(self): self.config = dict() self._lookup_nodes = list() self._storage_nodes = list() def initialize(self, config_file_path = CONFIG_FILE_PATH): """ Initializes the lightcloud storage manager, load the configuration and creates the nodes @param config_file_path: path to the configuration file, which is formatted as INI file """ self.load_config(config_file_path) self._lookup_nodes, self._storage_nodes = lightcloud.generate_nodes(self.config) lightcloud.init(self._lookup_nodes, self._storage_nodes) def load_config(self, config_file_path = CONFIG_FILE_PATH): """ Loads the configuration of the lightcloud storage server(server ips/ports) and connect to the server @param config_file_path: path to the configuration file, which is formatted as INI file """ config_parser = ConfigParser() config_parser.read(config_file_path) for section in config_parser.sections(): self.config[section] = list() server_string = '%s:%s'%(config_parser.get(section, 'server'), config_parser.get(section, 'port')) self.config[section].append(server_string) def put(self, key, value): """ Put a key/value pairs into the lightcloud storage backend @return: True if the item put successfully, False otherwise """ #first try to get the the list of items stored for this key if any, then add the new value to the list otherwise add the new key/value to the store return lightcloud.list_add(key, [value]) == 'ok' def get(self, key): """ Retrieves the value of a key from the lightcloud store @param key: the key to retrieve @return: list of the value(s) if exist, empty list otherwise """ #try to retrieve a list of the values, if the key doesnt exist in the list keys then try to get it from the normal keys result = lightcloud.list_get(key) if isinstance(result, list): return result result = lightcloud.get(key) if result: return [result] return list() def remove(self, key): """ Removes a key and its value from the lightcloud store @param key: the key of the value to remove @return: True if the key/value removed successfully, False otherwise """ return lightcloud.delete(key) def remove_one(self, key, value): """ Removes one of the value from the values list of a key @param key: a key in the store @param value: value to remove from the values list @return: True if the key/value removed successfully, False otherwise """ return lightcloud.list_remove(key, value) == 'ok' class StorageManager(object): """ Generic key/value store interface """ SUPPORTED_STORES = {'LIGHTCLOUD': LightCloudManager,} def __init__(self, storagetype = 'LIGHTCLOUD'): """ Initialize storage manager based on the storage type provided, if none provided then it uses lightcloud as storage backend """ if storagetype not in self.SUPPORTED_STORES: raise ValueError('Unsupported storage type %s, the current supported storage types are %'%(storagetype, self.SUPPORTED_STORES.keys())) self.manager = self.SUPPORTED_STORES[storagetype]() self.manager.initialize() def put(self, key, value): """ Put a pair of key/value into the backend store @param key: the key of the object to store, currently we only support string keys @param value: the value of the object to store, currently we only support string values @return: True if the operation completed successfully, False otherwise """ return self.manager.put(key, value) def get(self, key): """ Retrieve a value from the backend store that match the requested key, None if the key doesnt exist @param key: the key of the value to retrieve @return: list of the value(s) if exist, empty list otherwise """ return self.manager.get(key) def remove(self, key): """ Removes a key and its value from the backend store @param key: the key of the value to remove @return: True if the key/value removed successfully, False otherwise """ return self.manager.remove(key) def remove_one(self, key, value): """ Removes one of the value from the values list of a key @param key: a key in the store @param value: value to remove from the values list @return: True if the key/value removed successfully, False otherwise """ return self.manager.remove_one(key, value)
UTF-8
Python
false
false
2,011
12,438,225,335,820
b928d369222cc7bedebfdeb85b1ef8b553e316c9
939be4562464e27d4dd969cc322390c5a1a09b7e
/wordcounter.py
65e3a5a701b9d5b54e2bcb09105a3ef6d64707cf
[]
no_license
jticknor/DocumentAnalysis
https://github.com/jticknor/DocumentAnalysis
b33d9874a54abec7f71ddff213cee89b164bedac
40b35560ad984972ad4e2d044883de6916666713
refs/heads/master
2020-05-18T00:34:47.232371
2013-12-21T23:54:26
2013-12-21T23:54:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # current release builds databases from which to compare and explore many documents # future release will provide methods for doing this analysis import numpy as np import sqlite3 from sys import argv def wordCount(file_name,docid): try: lst = [] # open file in read mode with open(file_name,'r') as f: # set up dictionary for word counts to be stored words = {} for line in f: # split words on the line for reading and counting linewords = line.split() # make all words lowercase to ensure duplicates are not created lowerwords = [x.lower() for x in linewords] for word in lowerwords: if word in words: words[word] += 1 else: words[word] = 1 # sort words in descending order final_words = sorted(words.items(), key=lambda x: x[1], reverse = True) # build word list to quickly add to database structure for point in final_words: lst.append((docid,point[0],point[1])) return lst # return error if user does not enter an available filename except IOError as e: print "Error: Invalid filename entered" def databaseCreate(dbname): conn = sqlite3.connect(str(dbname)) c = conn.cursor() c.execute('''CREATE TABLE words (id INT, word TEXT, count INT)''') c.execute('''CREATE TABLE docs (id INT PRIMARY KEY, name TEXT)''') conn.commit() conn.close() def dbUpdate(dbname,lst,documentname,docid): conn = sqlite3.connect(str(dbname)) c = conn.cursor() zz = (docid,str(documentname)) c.execute("INSERT INTO docs VALUES (?,?)",zz) c.executemany("INSERT INTO words VALUES (?,?,?)", lst) conn.commit() conn.close() if __name__ == '__main__': # determine if database has been constructed check = raw_input('Are you creating a new database? [0] no, [1] yes: ') if check == '1': dbname = raw_input('Enter name for new database (add .db extension): ') # build database for new document databaseCreate(dbname) else: dbname = raw_input('Enter database to update (add .db extension): ') # request data files val = 1 while (val == 1): # obtain filename for the text document datafile = raw_input('Enter file name (with .txt ending): ') # obtain document identification number docid = raw_input('Enter document id: ') # run word counter lst = wordCount(datafile,docid) # fill database with document data dbUpdate(dbname,lst,datafile,docid) # ask for additional file val = raw_input('New file? [1] for yes, [0] for no: ')
UTF-8
Python
false
false
2,013
5,231,270,205,323
0cf5d1616675624e9ca55803c3a98b89f5bbecc7
eabc69dccd938eb2aefe7277c21cfd9131378dfc
/testing.py
aedd7278cf6ea58ca7c9f3cac0a1f78113fb61ca
[]
no_license
nagyistoce/dbn-exchange-rate
https://github.com/nagyistoce/dbn-exchange-rate
4a66c3437a9e17e70329f549207c01f4b6fa9443
be895564ef0c23e364ee6e2ba5df9b915b7bc732
refs/heads/master
2016-09-06T21:21:47.294212
2014-11-02T16:47:54
2014-11-02T16:47:54
32,806,003
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Aug 27 20:14:37 2014 @author: John """ import sys ; print '\n'.join(sorted(sys.path))
UTF-8
Python
false
false
2,014
10,024,453,695,401
e67fcac46acbe48206789bd59347fa6d28b8dfe8
37438f36a42fba376d3274ddf5842aacc9483157
/projectile_w_res.py
3f7730d1c95948c4c8df7626ed15f58a1cc6be26
[]
no_license
kr1m/ClassicalMech
https://github.com/kr1m/ClassicalMech
8c60f0afd1e8cf972f8f075f2464bfb0566b0f87
a068caec94ddd8616aaf2d238599772ca2dd7706
refs/heads/master
2016-09-07T19:09:28.295255
2013-06-25T23:06:49
2013-06-25T23:06:49
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Ryan Thielke - Classical Mechanics # # # Numerical Integration to calculate the range of a projectile w/ air resistance # Velocity = 50 m/s # Projection Angle = 45 degrees = pi/4 # Gravity = 9.8 m/s**2 ################################################################################### from math import pi, cos, sin def Airtime(vel_y, air_res, gravity=9.8): return 2*(vel_y/gravity)*(1-((air_res*vel_y)/(3*gravity))) def Range(vel_x, vel_y, air_res, gravity=9.8): return 2*vel_x*vel_y/gravity * (1-(4*air_res*vel_y)/(3*gravity)) def main(): # Velocities v_x = 50*cos(pi/4) v_y = 50*sin(pi/4) ideal_range = 2*v_x*v_y/9.8 # Air resistances from .001 to .999 air_res = [i/1000.0 for i in range(1,1000)] ranges = [] range_times = [] for resistance in air_res: range_times.append(Airtime(v_y, resistance)) ranges.append(Range(v_x, v_y, resistance)) print "Velocity in x: %.3f m/s\nVelocity in y: %.3f m/s\n" % (v_x, v_y) print "Ideal Range: %.3f meters\n\n" % ideal_range print "Air Resistance\tRange(m)\tTime of Flight(s)\n" print "-------------------------------------------------------" for i in range(len(air_res)): print "%.3f\t\t%.3f\t\t%.3f" % (air_res[i],ranges[i],range_times[i]) main()
UTF-8
Python
false
false
2,013
1,047,972,046,551
82497047e4bd3ec299c9c693c60e7a791edec27f
c56689e1096a61be2ba99fc4c522fbe85178efeb
/apps/accountlinker/models.py
618fdb65454f87a07508f9594824ffc8087a98f5
[ "AGPL-3.0-only" ]
non_permissive
bargaorobalo/unisubs
https://github.com/bargaorobalo/unisubs
2624ef348210067628eefe6e662299b52bab6436
9f910a3f65f7eeebce94b1e0cec471cb989b8982
HEAD
2017-12-03T10:15:04.833185
2013-03-17T18:44:34
2013-03-17T18:44:34
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Amara, universalsubtitles.org # # Copyright (C) 2013 Participatory Culture Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see # http://www.gnu.org/licenses/agpl-3.0.html. import logging from django.db import models from django.conf import settings from django.core.exceptions import ImproperlyConfigured, ValidationError from django.utils import translation from videos.models import VIDEO_TYPE, VIDEO_TYPE_YOUTUBE from .videos.types import ( video_type_registrar, UPDATE_VERSION_ACTION, DELETE_LANGUAGE_ACTION, VideoTypeError, YoutubeVideoType ) from teams.models import Team from auth.models import CustomUser as User from utils.metrics import Meter logger = logging.getLogger(__name__) # for now, they kind of match ACCOUNT_TYPES = VIDEO_TYPE AMARA_CREDIT = translation.ugettext("Subtitles by the Amara.org community") AMARA_DESCRIPTION_CREDIT = translation.ugettext( "Help us caption and translate this video on Amara.org") def youtube_sync(video, language): """ Used on debug page for video. Simplified version of what's found in ``ThirdPartyAccount.mirror_on_third_party``. It doesn't bother checking if we should be syncing this or not. Only does the new Youtube/Amara integration syncing. """ version = language.get_tip() always_push_account = ThirdPartyAccount.objects.always_push_account() for vurl in video.videourl_set.all(): vt = video_type_registrar.video_type_for_url(vurl.url) try: vt.update_subtitles(version, always_push_account) Meter('youtube.push.success').inc() except: Meter('youtube.push.fail').inc() logger.error('Always pushing to youtoube has failed.', extra={ 'video': video.video_id, 'vurl': vurl.pk }) finally: Meter('youtube.push.request').inc() def get_linked_accounts_for_video(video): yt_url = video.videourl_set.filter(type=VIDEO_TYPE_YOUTUBE) if yt_url.exists(): accounts = [ThirdPartyAccount.objects.resolve_ownership(u) for u in yt_url] return filter(None, accounts) return None def check_authorization(video): """ Make sure that a video can have its subtitles synced to Youtube. This doesn't take into account any language/version information. Return a tuple of (is_authorized, ignore_new_syncing_logic). """ team_video = video.get_team_video() linked_accounts = get_linked_accounts_for_video(video) if not linked_accounts: return False, False if all([a.is_team_account for a in linked_accounts]): if not team_video: return False, False tpas_for_team = team_video.team.third_party_accounts.all() if any(tpa in tpas_for_team for tpa in linked_accounts): return True, True if all([a.is_individual_account for a in linked_accounts]): return True, True return False, False def can_be_synced(version): """ Return whether a subtitle version can be synced to Youtube. A version must be public, synced and complete. TODO: take visibility into account We can't sync a version if it's the only version in that language and it has the "From youtube" note. """ if version: if not version.is_public() or not version.is_synced(): # We can't mirror unsynced or non-public versions. return False if not version.subtitle_language.subtitles_complete: # Don't sync incomplete languages return False if version.subtitle_language.is_imported_from_youtube_and_not_worked_on: return False return True def translate_string(string, language='en'): """ If a translation for the specified language doesn't exist, return the English version. """ cur_language = translation.get_language() try: translation.activate(language) text = translation.ugettext(string) finally: translation.activate(cur_language) return text def get_amara_credit_text(language='en'): return translate_string(AMARA_CREDIT, language) def add_amara_description_credit(old_description, video_url, language='en', prepend=False): """ Prepend the credit to the existing description. """ credit = "%s\n\n%s" % (translate_string(AMARA_DESCRIPTION_CREDIT, language), video_url) if credit in old_description: return old_description temp = "%s\n\n%s" if prepend: return temp % (credit, old_description or "") else: return temp % (old_description or "", credit) class ThirdPartyAccountManager(models.Manager): def always_push_account(self): """ Get the ThirdPartyAccount that is able to push to any video on Youtube. Raise ``ImproperlyConfigured`` if it can't be found. """ username = getattr(settings, 'YOUTUBE_ALWAYS_PUSH_USERNAME', None) try: return self.get(username=username) except ThirdPartyAccount.DoesNotExist: raise ImproperlyConfigured("Can't find youtube account") def mirror_on_third_party(self, video, language, action, version=None): """ Does the specified action (video.types.UPDATE_VERSION_ACTION or video.types.DELETE_LANGUAGE_ACTION) on the original account (e.g. Youtube video). For example, to update a given version to Youtube: ThirdPartyAccountManager.objects.mirror_on_third_party( video, language, "update_subtitles", version) For deleting, we only delete languages, so it should be ThirdPartyAccountManager.objects.mirror_on_third_party( video, language, "delete_subtitles") This method is 'safe' to call, meaning that we only do syncing if there are matching third party credentials for this video. The update will only be done if the version is synced """ if action not in [UPDATE_VERSION_ACTION, DELETE_LANGUAGE_ACTION]: raise NotImplementedError( "Mirror to third party does not support the %s action" % action) if not version and action == UPDATE_VERSION_ACTION: raise ValueError("You need to pass a version when updating subs") if not can_be_synced(version): return is_authorized, ignore_new_syncing_logic = check_authorization(video) if not is_authorized: return try: rule = YoutubeSyncRule.objects.all()[0] should_sync = rule.should_sync(video) always_push_account = self.always_push_account() except IndexError: should_sync = False for vurl in video.videourl_set.all(): already_updated = False try: vt = video_type_registrar.video_type_for_url(vurl.url) except VideoTypeError, e: logger.error('Getting video from youtube failed.', extra={ 'video': video.video_id, 'vurl': vurl.pk, 'gdata_exception': str(e) }) return if should_sync: try: vt.update_subtitles(version, always_push_account) already_updated = True Meter('youtube.push.success').inc() except Exception, e: Meter('youtube.push.fail').inc() logger.error('Pushing to youtoube has failed.', extra={ 'video': video.video_id, 'vurl': vurl.pk, 'gdata_exception': str(e) }) finally: Meter('youtube.push.request').inc() username = vurl.owner_username if not username: continue account = self.resolve_ownership(vurl) if not account: return if hasattr(vt, action): if action == UPDATE_VERSION_ACTION and not already_updated: vt.update_subtitles(version, account) elif action == DELETE_LANGUAGE_ACTION: vt.delete_subtitles(language, account) def resolve_ownership(self, video_url): """ Given a VideoUrl, return the ThirdPartyAccount that is supposed to be the owner of this video. """ # youtube username is a full name. but sometimes. yeah. if video_url.type == 'Y': return self._resolve_youtube_ownership(video_url) else: try: return ThirdPartyAccount.objects.get(type=video_url.type, username=video_url.owner_username) except ThirdPartyAccount.DoesNotExist: return None def _resolve_youtube_ownership(self, video_url): """ Give a youtube video url, returns a TPA that is the owner of the video. We need this because there could be two """ try: return ThirdPartyAccount.objects.get(type=video_url.type, full_name=video_url.owner_username) except ThirdPartyAccount.DoesNotExist: return None except ThirdPartyAccount.MultipleObjectsReturned: type = YoutubeVideoType(video_url.url) uri = type.entry.author[0].uri.text # we can easily extract the username from the uri, since it's the last # part of the path. this is much easier than making yet another api # call to youtube to find out. # i.e. https://gdata.youtube.com/feeds/api/users/gdetrez > gdetrez username = uri.split("/")[-1] # we want to avoid exception handling inside exception handling tpa = ThirdPartyAccount.objects.filter(type=video_url.type, username=username)[:1] return tpa[0] if tpa else None class ThirdPartyAccount(models.Model): """ Links a third party account (e.g. YouTube's') to a certain video URL This allows us to push changes in Unisubs back to that video provider. The user links a video on unisubs to his youtube account. Once edits to any languages are done, we push those back to Youtube. For know, this only supports Youtube, but nothing is stopping it from working with others. """ type = models.CharField(max_length=10, choices=ACCOUNT_TYPES) # this is the third party account user name, eg the youtube user username = models.CharField(max_length=255, db_index=True, null=False, blank=False) # user's real/full name, like Foo Bar full_name = models.CharField(max_length=255, null=True, blank=True, default='') oauth_access_token = models.CharField(max_length=255, db_index=True, null=False, blank=False) oauth_refresh_token = models.CharField(max_length=255, db_index=True, null=False, blank=False) objects = ThirdPartyAccountManager() class Meta: unique_together = ("type", "username") def __unicode__(self): return '%s - %s' % (self.get_type_display(), self.full_name or self.username) @property def is_team_account(self): return self.teams.exists() @property def is_individual_account(self): return self.users.exists() class YoutubeSyncRule(models.Model): """ An instance of this class determines which Youtube videos should be synced back to Youtube via the new integration. There should only ever be one instance of this class in the database. You should run a query and then call it like this: rule = YoutubeSyncRule.objects.all()[0] rule.should_sync(video) Where ``video`` is a ``videos.models.Video`` instance. ``team`` should be a comma-separated list of team slugs that you want to sync. ``user`` should be a comma-separated list of usernames of users whose videos should be synced. ``video`` is a list of video ids of videos that should be synced. You can also specify a wildcard "*" to any of the above to match any teams, any users, or any videos. """ team = models.TextField(default='', blank=True, help_text='Comma separated list of slugs') user = models.TextField(default='', blank=True, help_text='Comma separated list of usernames') video = models.TextField(default='', blank=True, help_text='Comma separated list of video ids') def __unicode__(self): return 'Youtube sync rule' def team_in_list(self, team): if not team: return False teams = self.team.split(',') if '*' in teams: return True return team in teams def user_in_list(self, user): if not user: return False users = self.user.split(',') if '*' in users: return True return user.username in users def video_in_list(self, pk): pks = self.video.split(',') if '*' in pks: return True if len(pks) == 1 and pks[0] == '': return False return pk in pks def should_sync(self, video): tv = video.get_team_video() team = None if tv: team = tv.team.slug return self.team_in_list(team) or \ self.user_in_list(video.user) or \ self.video_in_list(video.video_id) def _clean(self, name): if name not in ['team', 'user']: return field = getattr(self, name) values = set(field.split(',')) values = [v for v in values if v != '*'] if len(values) == 1 and values[0] == '': return [] return values def clean(self): teams = self._clean('team') users = self._clean('user') if len(teams) != Team.objects.filter(slug__in=teams).count(): raise ValidationError("One or more teams not found") if len(users) != User.objects.filter(username__in=users).count(): raise ValidationError("One or more users not found")
UTF-8
Python
false
false
2,013
8,624,294,332,876
786e4d902793f27d67d9e0f4c9309ddee9f7550c
dc222b7713453f4653da00fa8ce7a76d89c51e68
/web_develop_test/implement_table_01.py
0e6137c7efab6c8b89e8165370a7cb98b1561f00
[]
no_license
aimeiyan/exercise
https://github.com/aimeiyan/exercise
d935d48ddab90c55b8b9ac89e821abf117d5f609
617261af69db836a649bd4044f97bec7ab3e845d
refs/heads/master
2020-05-20T11:36:34.698664
2014-02-18T11:27:44
2014-02-18T11:27:44
10,766,847
0
3
null
null
null
null
null
null
null
null
null
null
null
null
null
__author__ = 'nancy' from collections import namedtuple import sqlite3 Link = namedtuple('Link', ['id', 'submitter_id', 'submitted_time', 'vote', 'title', 'url']) # print Link._fields links = [Link(0, 12345, 1524525, 3252, "ujjiarojiojojpw", "jipqitjuiiooit"), Link(1, 23456, 2849393, 2384, "29ajhlwtgjhjhkk", "iqpouti9putqio"), Link(2, 73849, 2389498, 8923, "uhjhaihi8utiuaa", "uiatuihgfhuiui"), Link(3, 28439, 2984983, 2894, "2hiuhaipuitquiu", "iqu8t9uq98uuu9"), Link(4, 74383, 9939845, 2194, "uhiapur9ipuqrui", "ji8a9ur8q9tyu7u"), Link(5, 73849, 4264833, 323256, "uhiapur9ipuqrui", "ji8a9ur8q9tyu7u"), Link(6, 22222, 34555, 4532, "uhiapur9ipuqrui", "ji8a9ur8q9tyu7u") ] db = sqlite3.connect('/tmp/example.db') try: db.execute(''' create table links ( id interger, summitter_id integer ) ''') except Exception as e: print e links_from_db = db.execute('select * from links') for l in links_from_db: print l, 'before' print 'before------------------' for l in links: for i in range(1000): db.execute('insert into links values(?, ?)', (l.id, l.submitter_id)) db.commit() links_from_db = db.execute('select * from links') for l in links_from_db: print l def query(): for l in links: if l.id == 3: return l.vote def query_sort(): submit = [] for l in links: if l.submitter_id == 73849: submit.append(l) submit.sort(key=lambda x: x.submitted_time) # print sorted(submit,key=lambda y: y.submitted_time) return submit print query_sort() # print query()
UTF-8
Python
false
false
2,014
15,788,299,817,155
a5f09b5793c44223c61d8ee5bd071693934cc7aa
2568e4dd25684aeb35e2a68e26229b6929c21e55
/populous/photogalleries/admin.py
c690f7130bf4d27ab3fe4d979c8ebae816d6381d
[ "BSD-3-Clause" ]
permissive
caiges/populous
https://github.com/caiges/populous
9aa2ae8f0d520e8f4f4b30146bd4e0bac58dfb87
d07094f9d6b2528d282ed99af0063002480bc00b
refs/heads/master
2016-09-05T17:58:23.102856
2008-11-25T19:43:42
2008-11-25T19:43:42
122,859
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from populous.photogalleries.models import Gallery, GalleryPhoto, GallerySet class GallerySetAdmin(admin.ModelAdmin): fieldsets = ( (None, {'fields': ('name', 'slug', 'description', 'template_name', 'sites')}), ) list_display = ('name',) list_filter = ('date_created', 'sites',) date_hierarchy = 'date_created' search_fields = ('name',) prepopulate_fields = {'slug': ('name',)} class GalleryPhotoInlineAdmin(admin.TabularInline): model = GalleryPhoto extra = 10 raw_id_fields = ('photo',) class GalleryAdmin(admin.ModelAdmin): fieldsets = ( (None, {'fields': ('name', 'slug', 'description', 'template_prefix', 'sites', 'audio')}), ) list_display = ('name',) list_filter = ('date_created', 'sites') date_hierarchy = 'date_created' search_fields = ('name',) prepopulate_fields = {'slug': ('name',)} raw_id_fields = ('audio',) inlines = (GalleryPhotoInlineAdmin,) admin.site.register(Gallery, GalleryAdmin) admin.site.register(GallerySet, GallerySetAdmin)
UTF-8
Python
false
false
2,008
19,499,151,529,980
5fba62dae4aa34adc9928366e7745f82dc374646
80a3cc2ebd27f730bcced6f7bc5030017153a950
/code/profiles.py
36143423101b0f7725258474df8dae12a39a3dbd
[ "GPL-1.0-or-later" ]
non_permissive
nealholt/wild-black-yonder
https://github.com/nealholt/wild-black-yonder
cc2ecfa3c53a9666cbb364dcaccb64566ff68f37
bc490b99f52a013432d6304be83d4aca963ae63f
refs/heads/master
2021-01-01T17:15:56.434118
2014-01-17T15:08:16
2014-01-17T15:08:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import weapon import engine #Each method in this file takes a physical object and sets a profile for the object. For example, bulletProfile(pObject) will set pObject to have bullet characteristics. Then the bullet will call bulletProfile on itself. This, I think, will clean the code and reduce redundancy and voodoo constants. def shipProfile(ship, profile='default'): #Give enemy a random weapon temp = weapon.generateWeapon(rd.randint(0, len(weapon.weapon_class_names)-1)) temp.shooter = ship ship.gun = temp #Give enemy an engine ship.engine = engine.Engine() engine.setProfile('mk1', ship.engine) #return 'destroyer' #Return the image to use
UTF-8
Python
false
false
2,014
9,096,740,759,105
148ba3bc2904557a299867142b7c23f0db489da1
040e3deaf4e5a4c64de9874b06efc93b92889be8
/preprocessing/extract.py
91bc5be6322c373e16ba68b053a4f2d5efec8763
[]
no_license
clouizos/AIR
https://github.com/clouizos/AIR
ccba7c365ae3e86949bf19f42b6b2728728f5451
b7544f58fc7c7fa34b492b1293890fa1a67f1402
refs/heads/master
2021-01-15T11:31:03.156841
2014-03-31T19:40:19
2014-03-31T19:40:19
17,168,919
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- """ Created on Sat Feb 15 00:21:40 2014 @author: root """ import json import subprocess import multiprocessing as mp def getCurl(query): add = 'http://localhost:9876/search_expressie,search_selectie/_search?pretty=1' result = subprocess.Popen(['curl','-XGET',add, '-d',query],stdout=subprocess.PIPE) resultDic = json.loads(result.stdout.read()) return resultDic def saveToFile(text, ID): filename = '../../virdir/Scratch/extractedDocNew/'+ID+'.txt' f = open(filename, 'w') f.write(text.encode('utf8')) f.close() def saveJson(text,filename): f = open(filename,'w') f2 = open("b"+filename,'w') f.write(json.dumps(text,encoding='latin-1')) f2.write(str(text)) f.close() f2.close() def grabDoc(r): total = [] try : total.append('--titel-- '+r['_source']['expressie']['titel']['tekst']) except : pass try : total.append('--titel-- '+r['_source']['selectie']['titel']['tekst']) except : pass try : total.append('--opnamedatum-- '+r['_source']['expressie']['ext']['opnamedatum']) except : pass try : total.append('--opnamedatum-- '+r['_source']['selectie']['ext']['opnamedatum']) except : pass try : total.append('--geografische_namen-- '+r['_source']['expressie']['niveau']['geografische_namen']) except : pass try : total.append('--geografische_namen-- '+r['_source']['selectie']['niveau']['geografische_namen']) except : pass try : maker = r['_source']['expressie']['maker'] mn = '' for m in maker : mn += (m['naam']+' - ') if len(mn)>2: total.append('--maker-- '+mn[:-2]) #remove the last "- " except : pass try : maker = r['_source']['selectie']['maker'] mn = '' for m in maker : mn += (m['naam']+' ') if len(mn)>2: total.append('--maker-- '+mn[:-2]) except : pass try : total.append('--beschrijving-- '+r['_source']['expressie']['niveau']['beschrijving']) except : pass try : total.append('--beschrijving-- '+r['_source']['selectie']['niveau']['beschrijving']) except : pass try : total.append('--samenvatting-- '+r['_source']['expressie']['niveau']['samenvatting']) except : pass try : total.append('--samenvatting--'+r['_source']['selectie']['niveau']['samenvatting']) except : pass if len(total)>0: text = ' '.join(total) else: text = '' return text def process(li): if len(li)<1 : return searchID = li[-4] if li[3] in ['2','4','13','27']: #this action has searchID propery # save documents to file with itemId as filename #================================================ if searchedID.count(searchID)==0: searchedID.append(searchID) try : query = str(json.loads(shDic[searchID][-1])['jsonQuery']) except: print 'Search ID is not in searchHistory table' query = '' if query!='' : results = getCurl(query)['hits']['hits'] for r in results: if savedID.count(r['_id'])==0: savedID.append(r['_id']) text = grabDoc(r) saveToFile(text, r['_id']) #ub = [l.strip('\n') for l in open('BZI_tbl_UserBehavior.txt')] #sh = [l.strip('\n') for l in open('BZI_tbl_SearchHistory.txt')] if __name__ == '__main__': ubsplit = [filter(None, l.split('\t')) for l in [l.strip('\n') for l in open('BZI_tbl_UserBehavior.txt')]] shsplit = [filter(None, l.split('\t')) for l in [l.strip('\n') for l in open('BZI_tbl_SearchHistory.txt')]] shDic = {} for l in shsplit: if len(l)<1: continue shDic[l[0]]=l counter = 0 savedID = [] searchedID = [] pool = mp.Pool() for l in ubsplit: counter+=1 print str(counter)+' of '+str(len(ubsplit)) pool.apply_async(process, args = (l, )) pool.close() pool.join()
UTF-8
Python
false
false
2,014
14,499,809,624,523
dde34002d59fabd47b30819d3250f53ebbc7a7f3
8cf80fb7a4a0670e070c5c3d899bcc73477f8335
/youtube_dl/extractor/cmt.py
88e0e9aba9150cea2ccac5a6ea4be06b9d0700ba
[ "LicenseRef-scancode-public-domain", "Unlicense" ]
permissive
americanstone/youtube-dl
https://github.com/americanstone/youtube-dl
d1ffa6cc700afd12d260b77c3daefb45598763b7
91994c2c81302fede68ead037e3f3e7353b1b5d5
refs/heads/master
2020-03-05T17:30:27.986842
2014-05-16T22:17:40
2014-05-16T22:17:40
19,872,621
0
0
Unlicense
true
2020-04-05T19:55:50
2014-05-16T22:29:26
2020-04-05T15:55:59
2020-04-05T15:55:54
50,138
0
0
1
Python
false
false
from .mtv import MTVIE class CMTIE(MTVIE): IE_NAME = u'cmt.com' _VALID_URL = r'https?://www\.cmt\.com/videos/.+?/(?P<videoid>[^/]+)\.jhtml' _FEED_URL = 'http://www.cmt.com/sitewide/apps/player/embed/rss/' _TESTS = [ { u'url': u'http://www.cmt.com/videos/garth-brooks/989124/the-call-featuring-trisha-yearwood.jhtml#artist=30061', u'md5': u'e6b7ef3c4c45bbfae88061799bbba6c2', u'info_dict': { u'id': u'989124', u'ext': u'mp4', u'title': u'Garth Brooks - "The Call (featuring Trisha Yearwood)"', u'description': u'Blame It All On My Roots', }, }, ]
UTF-8
Python
false
false
2,014
5,162,550,704,776
eca94a29fe4574c4e4244f7514b23a09e1923901
a910acc0131883741a4db99848226f25aedfe32e
/tests.py
b04a413450d912cb131197be3ca2b89ebc9e800d
[]
no_license
lukasz-lysik/music-manager
https://github.com/lukasz-lysik/music-manager
45049a176ddb91148f5a9c28a6ecea1a96bd9a91
b688e4b2f5beaff7005129e92127fc4c508b6139
refs/heads/master
2021-01-20T11:57:03.558040
2013-11-02T19:20:11
2013-11-02T19:20:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def test(): pass def test_2(): pass
UTF-8
Python
false
false
2,013
16,106,127,370,288
fd25b9e11686e68daeedc861b43eb12bf2fc384b
cfc70b2ea6d18123135defd3d3a443042ef51e0d
/framework/bin/redis/stop.py
4eb59984f9e1a2b59628c1625e27038505a3243c
[ "MIT" ]
permissive
tempbottle/CoreJS
https://github.com/tempbottle/CoreJS
695a9a6398af12dfe874b46e05a809a68a830c40
2b3b2a9c78446527ab4896a87b1dd13455301707
refs/heads/master
2021-01-17T14:09:00.289889
2011-12-19T05:41:23
2011-12-19T05:41:23
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import os, status, time def main(): pids = status.getServerPids() if ( len(pids) ): plural = "s" if len(pids) > 1 else '' pids = " ".join(pids) cmd = "kill %s >/dev/null 2>&1; kill -9 %s >/dev/null 2>&1" % (pids, pids) output = os.popen(cmd) check = status.getServerPids() if len(check): check = status.getServerPids() # extra check, just to be sure if len(check): print "\nUnable to stop Redis Server with PID%s \033[0;31m%s\033[0m\n" % (plural, pids) else: print "\n\033[0;31mRedis Server stopped\033[0m\n" else: print "\n\033[0;31mRedis Server is not running\033[0m\n" if __name__ == '__main__': main()
UTF-8
Python
false
false
2,011
2,817,498,568,616
1a88f28772191f6c18bb77b5225a388d322ce64f
0b0da44f6614bb20f340d7eef26c23657680ec82
/src/SampleCode/OpenCVStuff/edgy.py
b88bf4cad13ab6ee8ee09f905cffde5024be0fb6
[]
no_license
HVisionSensing/100Bot
https://github.com/HVisionSensing/100Bot
df4df6df3ced9d2997d53623565e27aa619739b0
cd51393fbc203e284cbbb7faf469d03ccdcbf011
refs/heads/master
2021-01-15T14:11:28.259321
2012-04-05T05:22:00
2012-04-05T05:22:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on Nov 28, 2009 @author: trasa ''' #import os; print os.environ['PATH']; import Image import ImageFilter if __name__ == '__main__': img = Image.open('question_no_answer.tif') img2 = img.filter(ImageFilter.FIND_EDGES) img2.save('edge.tif')
UTF-8
Python
false
false
2,012
601,295,450,001
e0498db8e9de77da2e420c0f12bc94d46341d96d
e2cdd32ed1ffce130e4a696e03a7b2c33b97c0fc
/tools/jenkins-scripts/post-build.py
1b59a2c5997b4be7bfac589b610ffa0d89e4ca5c
[]
no_license
newnon/cocos2d-x
https://github.com/newnon/cocos2d-x
17494175adf08cbf848ec05463b9c07991d03755
07af7c0115e21abbe0a1e4f5583439c265bf8572
refs/heads/3.1-hotfix
2020-12-11T04:19:18.217076
2014-08-18T12:41:12
2014-08-18T12:41:12
15,003,647
6
9
null
true
2018-01-19T01:40:05
2013-12-07T10:07:15
2017-12-06T06:15:39
2018-01-18T19:54:59
1,032,833
4
7
0
C++
false
null
import os import json import requests import jenkinsapi from jenkinsapi.jenkins import Jenkins from jenkinsapi.constants import STATUS_SUCCESS payload_str = os.environ['payload'] payload_str = payload_str.decode('utf-8','ignore') #parse to json obj payload = json.loads(payload_str) #pr = payload['pull_request'] url = payload['html_url'] print "build pr:" + url #get statuses url statuses_url = payload['statuses_url'] J = Jenkins(os.environ['JENKINS_URL']) target_url = os.environ['BUILD_URL'] build_number = int(os.environ['BUILD_NUMBER']) data = {"state":"pending", "target_url":target_url} access_token = os.environ['GITHUB_ACCESS_TOKEN'] Headers = {"Authorization":"token " + access_token} result = J[os.environ['JOB_NAME']].get_build(build_number).get_status() if(result == STATUS_SUCCESS): data['state'] = "success" else: data['state'] = "failure" requests.post(statuses_url, data=json.dumps(data), headers=Headers)
UTF-8
Python
false
false
2,014
5,781,025,997,502
0bb93ff2c9271dcddacf4da8293567b4bdd5ef7f
d59a7f208b85c304b1dd5aeb0725e7c0bdc7a155
/HuC/tools/convert-roms
4cdc9428d8677b48456f85e3d1740b8f885280f0
[ "MIT" ]
permissive
metteo/chipce8
https://github.com/metteo/chipce8
e1c10c3d4961faae825aa6b8189ab20b46eb6871
657cc8753a964d54c20093a1a506524231e456ce
refs/heads/master
2021-02-24T20:24:36.414731
2014-12-12T01:11:26
2014-12-12T01:11:26
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python import os import re import md5 TEMPLATE = \ """\ #incbin({0},"{1}"); #define {0}_SIZE {2} """ CASE = \ """\ case {0}: fmemcpy(dest,{1},{1}_SIZE); break; """ SWITCHFUN = \ """\ void chip8_load_rom(char* dest, int rom) {{ switch(rom) {{ {0} default: break; }} }} """ def clean_var(name): m = md5.new() m.update(name) return 'r'+m.hexdigest() def calcgamedata(path): data = [] roms = os.listdir(path) roms.sort() for rom in roms: (filename,ext) = os.path.splitext(rom) if ext == '.ch8': fullpath = os.path.join(path,rom) size = os.stat(fullpath).st_size d = (filename,fullpath,size) data.append(d) return data data = calcgamedata('roms/chip8') with open('roms.c','w') as f: for (name,path,size) in data: output = TEMPLATE.format(clean_var(name), path, str(size)) f.write(output) f.write("\nconst char *roms[] =\n{\n") names = [' "'+name+'"' for (name,path,size) in data] f.write(',\n'.join(names)) f.write('\n};\n') names = [clean_var(name) for (name,path,size) in data] f.write('\n') output = "" for i in xrange(0,len(names)): output += CASE.format(i,names[i]) output = SWITCHFUN.format(output) f.write(output)
UTF-8
Python
false
false
2,014
4,913,442,608,190
bee138c86ea76bff4cbbfe536f96b266837f5288
337e39cb024ef57128d899bdabc43149d42c534a
/setup.py
025c2f4fe008af018252a3adc96040aa7bac1b2e
[]
no_license
vickleford/ephc
https://github.com/vickleford/ephc
45f00cbb30efa9ea7d39f0467cbbe5a4e4f955b6
35ecc752aa5b44b814c20eac2ab59fbfcd58a25d
refs/heads/master
2021-01-18T14:11:06.580952
2013-02-01T20:30:53
2013-02-01T20:30:53
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
try: from setuptools import setup except ImportError: from distutils.core import setup config = { 'description': 'End Point Health Checker', 'author': 'Vickleford', 'url': 'https://github.com/vickleford/', 'download_url': 'https://github.com/vickleford/', 'author_email': '[email protected]', 'version': '0.1', 'install_requires': ['PyYAML', 'python-memcached', 'MySQL-python', 'psycopg2'], 'packages': ['ephc'], 'name': 'ephc', 'entry_points': { 'console_scripts': [ 'ephc = ephc.script:run' ] } } setup(**config)
UTF-8
Python
false
false
2,013
8,340,826,531,797
db6c84598c78e817540910715242f8e34bc59bdd
f44b5fcc15409fea7378f2cdc63449f68666efe5
/pepperstack/__main__.py
67aba3a4fc555606e4158336a88f6d7e6f630f16
[ "MIT" ]
permissive
Korrigan/pepperstack
https://github.com/Korrigan/pepperstack
d584cb01a0cd9b23ca4b8e8ce307fe66ed5a4cca
53c1c091044df39d0efaf858e48f953d8c496b59
refs/heads/master
2016-09-06T19:10:14.534360
2014-09-16T09:29:06
2014-09-16T09:29:06
19,186,510
1
1
null
false
2015-01-18T16:26:10
2014-04-26T19:33:20
2015-01-18T16:25:13
2014-09-16T09:29:08
236
1
2
2
Python
null
null
""" Main module for pepperstack Calls pepperstack with formatted parameters """ import sys from . import cli def usage(exit=1): """ Prints a brief usage on stdout and exit """ print("Usage: pepper <command> [arg[=value], ...]") print("") print("Run `pepper help` for more info") sys.exit(exit) def main(): """ Main function for pepperstack """ args = [] kwargs = {} cmd = sys.argv.pop(0) for arg in sys.argv: if arg.find('=') != -1: (k, v) = arg.split('=', 1) kwargs[k] = v else: args.append(arg) try: cli(cmd, *args, **kwargs) except Exception as e: print("Error: {0}".format(e)) sys.exit(1) else: sys.exit(0) if __name__ == "__main__": if len(sys.argv) < 2: usage() sys.argv.pop(0) main()
UTF-8
Python
false
false
2,014
14,791,867,383,730
aeb7a20adc1a7ca9a62c831cd9899af590e38a18
9bb43731d47dee176eb714060b5ac0f6b91bdab4
/multistatus/admin.py
412ae27489c193b196b5a148ab9f971bd4f07a5e
[]
no_license
ryanhiebert/multistatus
https://github.com/ryanhiebert/multistatus
c38e489bff93fdef9024f8bc0f356bcd69ea1f7c
273764930295329afea20abd660f1cee86d9a926
refs/heads/master
2016-09-05T18:12:24.668282
2014-02-01T23:43:32
2014-02-01T23:43:32
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from .models import Repository, Requirement, Commit, Status admin.site.register(Repository) admin.site.register(Requirement) admin.site.register(Commit) admin.site.register(Status)
UTF-8
Python
false
false
2,014
18,614,388,301,103
a29de5126808d5d374f1697d9c2e0bcbc801aa91
532831dec62a44f811b692e559d649fce8a1e8e2
/apps/treeheaders/migrations/0001_initial.py
19f501817a6f200f83fcf79f04e4eaeacc676a7b
[]
no_license
naawha/tables
https://github.com/naawha/tables
a955581c17164ad36820bdc0966e47697d29c60a
227bd88305c6c7b878a2c376c863db5e75162384
refs/heads/master
2016-09-07T02:30:44.761129
2014-09-22T19:45:35
2014-09-22T19:45:35
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Header' db.create_table(u'treeheaders_header', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=255)), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['treeheaders.Header'], null=True, blank=True)), )) db.send_create_signal(u'treeheaders', ['Header']) def backwards(self, orm): # Deleting model 'Header' db.delete_table(u'treeheaders_header') models = { u'treeheaders.header': { 'Meta': {'object_name': 'Header'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treeheaders.Header']", 'null': 'True', 'blank': 'True'}) } } complete_apps = ['treeheaders']
UTF-8
Python
false
false
2,014
979,252,576,302
454721e3961452b7973b12eb9c31483bc39ada01
cf9cbbc85485832152b027f22de5c4ef6220a96d
/src/collective/openzoom/utility.py
6a3ab1c395188ba24a08aa7a302afe5d8f94c675
[]
no_license
mtrevor/collective.openzoom
https://github.com/mtrevor/collective.openzoom
783634a15d141f0b4b199e25d1572468a7fccfa4
28d381f68e672603c5fa33c4e7cac960987cd781
refs/heads/master
2016-05-30T22:46:44.779221
2013-08-09T06:08:56
2013-08-09T06:08:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import deepzoom import StringIO import transaction from Persistence import Persistent def safe_open(obj): return StringIO.StringIO(obj.getImageData()) def trigger_deepzoom(context, imgurl, imgloc): settings = getUtility(IOpenZoomTool).settings creator = deepzoom.ImageCreator( tile_size = settings.dz_tile_size, tile_overlap = settings.dz_tile_overlap, tile_format = settings.dz_tile_format, image_quality = 0.8, # TODO: fix this resize_filter = settings.dz_resize_filter, ) creator.create(imgurl, imgloc) from zope.interface import implements from .interfaces import IOpenZoomTool from zope.component import getUtility from plone.app.async.interfaces import IAsyncService import zc.async.dispatcher from plone.registry.interfaces import IRegistry from .interfaces import IOpenZoomSettings import os.path class OpenZoomTool(Persistent): implements(IOpenZoomTool) def zoomify(self, context, image_url): # TODO: remove context object requirement? # ...or move image path construction here? dzi_path = os.path.join('/tmp', context.image.filename + '.dzi') async = getUtility(IAsyncService) queue = async.getQueues()[''] dzi_job = async.queueJob(trigger_deepzoom, context, image_url, dzi_path) transaction.commit() transaction.begin() #add callbacks for success or failure here #import pdb; pdb.set_trace() @property def settings(self): """Quick access to registry settings""" return getUtility(IRegistry).forInterface(IOpenZoomSettings) from zope.component import adapter from zope.lifecycleevent.interfaces import IObjectAddedEvent from plone.app.contenttypes.interfaces import IImage from urlparse import urljoin @adapter(IImage, IObjectAddedEvent) def add_image(obj, event): tool = getUtility(IOpenZoomTool) settings = tool.settings width, height = obj.image.getImageSize() if height >= settings.minimum_image_height or width >= settings.minimum_image_width: obj_url = urljoin(obj.portal_url(), obj.absolute_url_path()) # XXX: check this behaviour is as expected image_url = obj_url + '/@@images/image' tool.zoomify(obj, image_url)
UTF-8
Python
false
false
2,013
12,670,153,563,423
7c09e6b4c14fd51b8d9da61159c3c9a9b1aa4633
8dd7e68879449792d5cba1f290da2777caf6f4ea
/test/test_collectionz.py
d62ee6269124a12c58b45b930db9731ec1482f52
[ "GPL-3.0-or-later", "Python-2.0", "MIT", "LicenseRef-scancode-biopython", "GPL-3.0-only", "GPL-1.0-or-later" ]
non_permissive
pziarsolo/seq_crumbs
https://github.com/pziarsolo/seq_crumbs
16b1a645c5420da918124d198ae1d8b8f9af1c12
67588e7838e94b774c24d11e6901be67711829b6
refs/heads/master
2021-01-01T18:55:44.625760
2014-12-03T09:39:54
2014-12-03T09:39:54
17,131,045
3
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia # This file is part of seq_crumbs. # seq_crumbs is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # seq_crumbs is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with seq_crumbs. If not, see <http://www.gnu.org/licenses/>. import unittest from crumbs.collectionz import OrderedSet, KeyedSet class TestCollections(unittest.TestCase): def test_ordered_set(self): in_list = [1, 2, 3, 4, 5, 8, 10] not_in_list = [6, 9, 11, 13] ordered_set = OrderedSet(in_list) for item in in_list: assert item in ordered_set assert ordered_set.check_add(7) assert ordered_set._items == [1, 2, 3, 4, 5, 7, 8, 10] assert not ordered_set.check_add(2) assert ordered_set._items == [1, 2, 3, 4, 5, 7, 8, 10] assert ordered_set.check_add(0) assert ordered_set._items == [0, 1, 2, 3, 4, 5, 7, 8, 10] for item in not_in_list: assert item not in ordered_set def test_unordered_set(self): in_set = [1, 2, 3, 4, 5, 8, 10] not_in_set = [6, 9, 11, 13] keyed_set = KeyedSet(in_set) for item in in_set: assert item in keyed_set assert keyed_set.check_add(7) assert keyed_set._items == set([1, 2, 3, 4, 5, 7, 8, 10]) assert not keyed_set.check_add(2) assert keyed_set._items == set([1, 2, 3, 4, 5, 7, 8, 10]) assert keyed_set.check_add(0) assert keyed_set._items == set([0, 1, 2, 3, 4, 5, 7, 8, 10]) for item in not_in_set: assert item not in keyed_set #with key a = 'a' in_set = [(1, a), (2, a), (3, a), (4, a), (5, a), (8, a), (10, a)] not_in_set = [(6, a), (9, a), (11, a), (13, a)] def key(item): return item[0] keyed_set = KeyedSet(in_set, key=key) for item in in_set: assert item in keyed_set assert keyed_set.check_add((7, a)) assert keyed_set._items == set([1, 2, 3, 4, 5, 7, 8, 10]) assert not keyed_set.check_add((2, a)) assert keyed_set._items == set([1, 2, 3, 4, 5, 7, 8, 10]) assert keyed_set.check_add((0, a)) assert keyed_set._items == set([0, 1, 2, 3, 4, 5, 7, 8, 10]) for item in not_in_set: assert item not in keyed_set if __name__ == "__main__": #import sys;sys.argv = ['', 'TestCollections'] unittest.main()
UTF-8
Python
false
false
2,014
9,672,266,358,853
e2385ee5733a3cf2926f1c76fb2657be8d27e78c
e58b0bbb10afa5346924cc2321aa776dde9d4cf3
/double_check/renaming_sb_ids.py
4e209c7d589c7d135eff7cb6cf305adc26548bbe
[]
no_license
GermanDemidov/NIR2
https://github.com/GermanDemidov/NIR2
1753e82d3ea1cda6b7fe1867c8f6e80ffe675938
497ba8b31dec44705c034691b0a42fa164fd9ead
refs/heads/master
2021-01-19T06:50:34.669183
2014-04-23T16:49:15
2014-04-23T16:49:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- __author__ = 'Sergey Aganezov Jr.' __email__ = '[email protected]' __status__ = 'develop' import sys def rename_sb_ids(source_file): cnt = 1 with open(source_file, "r") as source: data = source.readlines() with open(source_file, "w") as dest: for line in data: if line.startswith(">"): print(">" + str(cnt), file=dest) cnt += 1 else: print(line, end="", file=dest) if __name__ == "__main__": cmd_args = sys.argv[1:] source_files = cmd_args for file in source_files: rename_sb_ids(file)
UTF-8
Python
false
false
2,014
16,097,537,470,175
03ce993222a2f7fd3ace3d0c4c87004f3069583b
cdf8a3b2a0c605b9954018848a869b9fe35220e0
/pgpt/store.py
03624e8d1fc51477361099c1cf03d6a0f9028b43
[]
no_license
tsavola/postgresql-python-test
https://github.com/tsavola/postgresql-python-test
e34d4195e64b5f4568d3552f121e4317e254ab42
5a8608412e4fefb2cc53f710fe115a2cc0ae1282
refs/heads/master
2016-09-11T12:52:41.703831
2010-09-25T16:35:01
2010-09-25T16:35:01
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from . import database from . import plpy if database.client: import postgresql with database.open() as db: try: db.execute(""" CREATE TABLE store ( name serial NOT NULL PRIMARY KEY, data hstore NOT NULL )""") except postgresql.exceptions.DuplicateTableError: pass class Store(object): def __init__(self, name): self.name = name def __setitem__(self, key, value): _store_set(self.name, key, value) def __getitem__(self, key): return _store_get(self.name, key) def __delitem__(self, key): _store_del(self.name, key) def drop(self): _store_drop(self.name) @database.call def make_store(): for row in plpy.execute("INSERT INTO store (data) VALUES (''::hstore) RETURNING name"): return Store(row["name"]) @database.call def get_store(name): plan = plpy.prepare("SELECT EXISTS (SELECT 1 FROM store WHERE name = $1)", ["integer"]) for row in plpy.execute(plan, [name]): return Store(name) @database.call def _store_set(name, key, value): plan = plpy.prepare("UPDATE store SET data = data || hstore($2, $3) WHERE name = $1", ["integer", "text", "text"]) plpy.execute(plan, [name, key, value]) @database.call def _store_get(name, key): plan = plpy.prepare("SELECT data -> $2 AS value FROM store WHERE name = $1", ["integer", "text"]) for row in plpy.execute(plan, [name, key]): return row["value"] @database.call def _store_del(name, key): plan = plpy.prepare("SELECT data - $2 FROM store WHERE name = $1", ["integer", "text"]) plpy.execute(plan, [name, key]) @database.call def _store_drop(name): plan = plpy.prepare("DELETE FROM store WHERE name = $1", ["integer"]) plpy.execute(plan, [name])
UTF-8
Python
false
false
2,010
14,654,428,429,432
1b631e060abf96f88dee327e655289ced5048fd5
1f6c179c766ed538d132c393489efbe4b0312b2f
/core/enviroinment.py
d46dbb91b2c1a4e72c741a3514130fe4d8b3af05
[ "GPL-3.0-only" ]
non_permissive
h3rucutu/weevely-old
https://github.com/h3rucutu/weevely-old
e5a0c978d510799509e4de8458f81bc3d4369e34
4044a45dc6d0820ea1c89e7a55cdfcb3fbabb3d1
refs/heads/master
2016-08-08T13:56:11.793856
2013-04-14T18:19:38
2013-04-14T18:19:38
9,433,013
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on 22/ago/2011 @author: norby ''' from core.module import ModuleException import readline, atexit, os, re, shlex help_string = ':show' respace = re.compile('.*\s+$', re.M) set_string = ':set' load_string = ':load' class Enviroinment: def __init__(self): self.modhandler.set_verbosity(2) self.username = self.modhandler.load('system.info').run({ 0: "whoami" }) self.hostname = self.modhandler.load('system.info').run({ 0: "hostname"}) self.cwd = self.modhandler.load('system.info').run({ 0 : "basedir"}) if 'shell.sh' in self.modhandler.loaded_shells: self.prompt = "%s@%s:%s$ " else: self.prompt = "%s@%s:%s (PHP)> " try: self.safe_mode = int(self.modhandler.load('system.info').run({ 0: "safe_mode" })) except: self.safe_mode = None else: if self.safe_mode: print '[!] Safe mode is enabled' self.modhandler.set_verbosity() print '[+] List modules with <tab> and show help with %s [module name]\n' % help_string self.matching_words = self.modhandler.help_completion('') + [help_string, load_string, set_string] self.__init_completion() def __init_completion(self): try: readline.set_completer_delims(' \t\n;') readline.parse_and_bind( 'tab: complete' ) readline.set_completer( self.__complete ) readline.read_history_file( self.configs.historyfile ) except IOError: pass atexit.register( readline.write_history_file, self.configs.historyfile ) def _format_prompt(self): return self.prompt % (self.username, self.hostname, self.cwd) def _handleDirectoryChange(self, cd): cwd = cd[0].strip() path = self.cwd if cwd[0] == '/': path = cwd elif cwd == '..': dirs = path.split('/') dirs.pop() path = '/' + '/'.join(dirs)[1:] elif cwd == '.': pass elif cwd[0:3] == '../': path = cwd.replace( '../', path ) elif cwd[0:2] == './': path = cwd.replace( './', path ) else: path = (path + "/" + cwd).replace( '//', '/' ) cwd = self.modhandler.load('shell.php').cwd_handler(path) if cwd: self.cwd = cwd return True else: print "[!] Error changing directory to '%s', wrong path, incorrect permissions or safe mode enabled" % path return False def __complete(self, text, state): """Generic readline completion entry point.""" try: buffer = readline.get_line_buffer() line = readline.get_line_buffer().split() if ' ' in buffer: return [] # show all commandspath if not line: all_cmnds = [c + ' ' for c in self.matching_words] if len(all_cmnds) > state: return all_cmnds[state] else: return [] # account for last argument ending in a space if respace.match(buffer): line.append('') # resolve command to the implementation function cmd = line[0].strip() if cmd in self.matching_words: return [cmd + ' '][state] results = [c + ' ' for c in self.matching_words if c.startswith(cmd)] + [None] if len(results) == 2: if results[state]: return results[state].split()[0] + ' ' else: return [] return results[state] except Exception, e: print '[!] Completion error: %s' % e import traceback traceback.print_exc()
UTF-8
Python
false
false
2,013
16,389,595,243,279
e79aeeb3b0d8dd2084055594cc06c5929d769842
dbcaf17b39b4302abf2341dd68de31854f7d2989
/midware.py
3f7eb5fb5a3efa3c645e5e0275703e92bb5d11ef
[]
no_license
vissible/webserver
https://github.com/vissible/webserver
1d56e9db66f1a932a0144d326e8160980abb5f0a
ee9b146746dce59250328dd64d3972e027ff54df
refs/heads/master
2020-12-25T16:24:41.499221
2012-09-04T06:43:01
2012-09-04T06:43:01
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- ''' @date: 2012-09-03 @author: shell.xu ''' import re, time, heapq, random, urllib, cPickle, logging from http import * cache_logger = logging.getLogger('cache') class Dispatch(object): def __init__(self, urlmap=None): self.urlmap = [[re.compile(i[0]),] + list(i[1:]) for i in urlmap] def __call__(self, req): for o in self.urlmap: m = o[0].match(req.url.path) if not m: continue req.url_match = m.groups() req.url_param = o[2:] return o[1](req) return self.default_handler(req) def default_handler(req): return response_http(404, body='File Not Found') class Cache(object): def __call__(self, func): def inner(req): pd = self.get_data(req.url.path) if pd: cache_logger.info('cache hit in %s', req.url.path) return cPickle.loads(pd) res = func(req) if res is not None and res.cache and hasattr(res, 'body'): res.set_header('cache-control', 'max-age=%d' % res.cache) pd = cPickle.dumps(res, 2) self.set_data(req.url.path, pd, res.cache) return res return inner class ObjHeap(object): ''' 使用lru算法的对象缓存容器,感谢Evan Prodromou <[email protected]>。 thx for Evan Prodromou <[email protected]>. ''' class __node(object): def __init__(self, k, v, f): self.k, self.v, self.f = k, v, f def __cmp__(self, o): return self.f > o.f def __init__(self, size): self.size, self.f = size, 0 self.__dict, self.__heap = {}, [] def __len__(self): return len(self.__dict) def __contains__(self, k): return self.__dict.has_key(k) def __setitem__(self, k, v): if self.__dict.has_key(k): n = self.__dict[k] n.v = v self.f += 1 n.f = self.f heapq.heapify(self.__heap) else: while len(self.__heap) >= self.size: del self.__dict[heapq.heappop(self.__heap).k] self.f = 0 for n in self.__heap: n.f = 0 n = self.__node(k, v, self.f) self.__dict[k] = n heapq.heappush(self.__heap, n) def __getitem__(self, k): n = self.__dict[k] self.f += 1 n.f = self.f heapq.heapify(self.__heap) return n.v def __delitem__(self, k): n = self.__dict[k] del self.__dict[k] self.__heap.remove(n) heapq.heapify(self.__heap) return n.v def __iter__(self): c = self.__heap[:] while len(c): yield heapq.heappop(c).k raise StopIteration class MemoryCache(Cache): def __init__(self, size): super(MemoryCache, self).__init__() self.oh = ObjHeap(size) def get_data(self, k): try: o = self.oh[k] except KeyError: return None if o[1] >= time.time(): return o[0] del self.oh[k] return None def set_data(self, k, v, exp): self.oh[k] = (v, time.time() + exp) sess_logger = logging.getLogger('sess') random.seed() alpha = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/' def get_rnd_sess(): return ''.join(random.sample(alpha, 32)) def get_params_dict(data, sp = '&'): if not data: return {} rslt = {} for p in data.split(sp): i = p.strip().split('=', 1) rslt[i[0]] = urllib.unquote(i[1]) return rslt class Cookie(object): def __init__(self, cookie): if not cookie: self.v = {} else: self.v = get_params_dict(cookie, ';') self.m = set() def get(self, k, d): return self.v.get(k, d) def __contains__(self, k): return k in self.v def __getitem__(self, k): return self.v[k] def __delitem__(self, k): self.m.add(k) del self.v[k] def __setitem__(self, k, v): self.m.add(k) self.v[k] = v def set_cookie(self, res): for k in self.m: res.add_header('set-cookie', '%s=%s' % (k, urllib.quote(self.v[k]))) class Session(object): def __init__(self, timeout): self.exp = timeout def __call__(self, func): def inner(req): req.cookie = Cookie(req.get_header('cookie', None)) sessionid = req.cookie.get('sessionid', '') if not sessionid: sessionid = get_rnd_sess() req.cookie['sessionid'] = sessionid data = None else: data = self.get_data(sessionid) if data: req.session = cPickle.loads(data) else: req.session = {} sess_logger.info('sessionid: %s' % sessionid) sess_logger.info('session: %s' % str(req.session)) res = func(req) self.set_data(sessionid, cPickle.dumps(req.session, 2)) req.cookie.set_cookie(res) return res return inner class MemorySession(Session): def __init__(self, timeout): super(MemorySession, self).__init__(timeout) self.sessions = {} def get_data(self, sessionid): return self.sessions.get(sessionid, None) def set_data(self, sessionid, data): self.sessions[sessionid] = data
UTF-8
Python
false
false
2,012
4,604,204,978,884
7b5172d1cf0d692523c1192d24a81c9683d00464
f3975b49d532dba77b6ee19507de873b874ac8ad
/pythonlib/lib/command_interface.py
624dbcef2bec7bc8104bc0a8d8ee770d17b5bfc6
[]
no_license
ryanjulian/h2bird-tracking
https://github.com/ryanjulian/h2bird-tracking
563a3bdbe43b3a58e69e20e341eb644493ab971b
0ae69ee1fbad3ebf1544696e3c9d1d94b02d2112
refs/heads/master
2021-01-23T11:55:29.113776
2013-02-10T01:20:56
2013-02-10T01:20:56
5,632,259
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import time, sys, math from struct import * from serial import * from xbee import XBee from payload import Payload from dictionaries import * class CommandInterface(object): def __init__(self, address, callback): self.endpoint_addr = address self.tx_callback = callback self.debugPrint = False def close(self): pass def enableDebug(self): self.debugPrint = True def disableDebug(self): self.debugPrint = False def runGyroCalib(self, samples): data_pack = pack('H', samples) if self.debugPrint: print "Requesting gyro calibration of " + str(samples) + " samples." pld = Payload(data = data_pack, status = 0, type = Commands['RUN_GYRO_CALIB']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def getGyroCalibParam(self): data_pack = pack('H', 0) if self.debugPrint: print "Requesting gyro offsets..." pld = Payload(data = data_pack, status = 0, type = Commands['GET_GYRO_CALIB_PARAM']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setTelemetrySubsample(self, period): data_pack = pack('H', period) if self.debugPrint: print "Setting telemetry subsample period to " + str(period) pld = Payload(data = data_pack, status = 0, type = Commands['SET_TELEM_SUBSAMPLE']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def startSensorDump(self, datasets): data_pack = pack('H', datasets) if self.debugPrint: print "Requesting " + str(datasets) + " samples to be written to flash." pld = Payload(data = data_pack, status = 0, type = Commands['RECORD_SENSOR_DUMP']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def requestDumpData(self, start_page, end_page, tx_size): data_pack = pack('3H', start_page, end_page, tx_size) if self.debugPrint: print "Requesting memory from page " + str(start_page) + " to " + str(end_page) +\ ", " + str(tx_size) + " bytes at a time." pld = Payload(data = data_pack, status = 0, type = Commands['GET_MEM_CONTENTS']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def requestRawFrame(self): data_pack = pack('L', 0) if self.debugPrint: print "Requesting raw frame." pld = Payload(data = data_pack, status = 0, type = Commands['RAW_FRAME_REQUEST']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def requestTelemetry(self): data_pack = pack('L', 0) if self.debugPrint: print "Requesting telemetry." pld = Payload(data = data_pack, status = 0, type = Commands['REQUEST_TELEMETRY']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setBackgroundFrame(self): data_pack = pack('L', 0) if self.debugPrint: print "Capturing and setting background frame." pld = Payload(data = data_pack, status = 0, type = Commands['SET_BACKGROUND_FRAME']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def rotateRefGlobal(self, rotation): data_pack = pack(4*'f', *rotation) if self.debugPrint: print "Applying rotation to reference in global axes: " + str(rotation) pld = Payload(data = data_pack, status = 0, type = Commands['ROTATE_REF_GLOBAL']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def rotateRefLocal(self, rotation): data_pack = pack(4*'f', *rotation) if self.debugPrint: print "Applying rotation to reference in local axes: " + str(rotation) pld = Payload(data = data_pack, status = 0, type = Commands['ROTATE_REF_LOCAL']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRegulatorOffsets(self, offsets): data_pack = pack(3*'f', *offsets) #if self.debugPrint: # print "Setting offsets to: " + str(offsets) pld = Payload(data = data_pack, status = 0, type = Commands['SET_REGULATOR_OFFSETS']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRegulatorRef(self, ref): data_pack = pack(4*'f', *ref) #if self.debugPrint: #print "Setting quaternion reference to: " + str(ref) pld = Payload(data = data_pack, status = 0, type = Commands['SET_REGULATOR_REF']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRegulatorPid(self, coeffs): data_pack = pack(3*'7f', *coeffs) if self.debugPrint: print ("Setting PID coefficents to: \n" + \ "\tOffset Kp Ki Kd\n" + \ "Yaw: " + str(coeffs[1:5]) + "\n" + \ "Pitch: " + str(coeffs[8:12]) + "\n" + \ "Roll: " + str(coeffs[15:19])) pld = Payload(data = data_pack, status = 0, type = Commands['SET_REGULATOR_PID']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRegulatorRateFilter(self, filter_coeffs): data_pack = pack('2H8f', *filter_coeffs) if self.debugPrint: print "Setting filter coefficients of " + str(filter_coeffs) pld = Payload(data = data_pack, status = 0, type = Commands['SET_REGULATOR_RATE_FILTER']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRateMode(self, flag): data_pack = pack('B', flag) if self.debugPrint: print "Setting rate mode to: " + str(flag) pld = Payload(data = data_pack, status = 0, type = Commands['SET_RATE_MODE']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setGlobalRateSlew(self, rate): data_pack = pack('3f', *rate) if self.debugPrint: print "Setting rate slew to: " + str(rate) pld = Payload(data = data_pack, status = 0, type = Commands['SET_RATE_SLEW']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setEstimateRunning(self, mode): data_pack = pack('B', mode) if self.debugPrint: print "Setting pose estimation mode to: " + str(mode) pld = Payload(data = data_pack, status = 0, type = Commands['SET_ESTIMATE_RUNNING']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRegulatorMode(self, flag): data_pack = pack('B', flag) if self.debugPrint: print "Setting regulator state to " + str(flag) pld = Payload(data = data_pack, status = 0, type = Commands['SET_REGULATOR_MODE']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setRemoteControlValues(self, thrust, steer, elevator): data_pack = pack('3f', thrust, steer, elevator) if self.debugPrint: print "Setting RC values to thrust: " + str(thrust) + "\tsteer: " + str(steer) + \ "\televator: " + str(elevator) pld = Payload (data = data_pack, status = 0, type = Commands['SET_RC_VALUES']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def getTelemetryData(self): data_pack = pack('H', 0) if self.debugPrint: print "Requesting telemetry data..." pld = Payload(data = data_pack, status = 0, type = Commands['REQUEST_TELEMETRY']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def sendPing(self): data_pack = pack('H', 0) if self.debugPrint: print "Pinging..." pld = Payload(data = data_pack, status = 0, type = Commands['PING']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def sendEcho(self): data_pack = pack('H', 0) if self.debugPrint: print "Requesting echo..." pld = Payload(data = data_pack, status = 0, type = Commands['ECHO']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def requestCamParams(self): data_pack = pack('H', 0) if self.debugPrint: print "Requesting camera parameters..." pld = Payload(data = data_pack, status = 0, type = Commands['CAM_PARAM_REQUEST']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def requestDirDump(self, addr, pan): data_pack = pack('HH', addr, pan) if self.debugPrint: print "Requesting directory dump of: " + str(addr) + " " + str(pan) pld = Payload(data = data_pack, status = 0, type = Commands['DIR_DUMP_REQUEST']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setHP(self, flag): data_pack = pack('H', flag) if self.debugPrint: print "Setting high pass to " + str(flag) pld = Payload(data = data_pack, status = 0, type = Commands['SET_HP']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def zeroEstimate(self): data_pack = pack('H', 0) if self.debugPrint: print "Zeroing attitude estimate." pld = Payload(data = data_pack, status = 0, type = Commands['ZERO_ESTIMATE']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def requestAttitude(self): data_pack = pack('H', 0) if self.debugPrint: print "Requesting attitude." pld = Payload(data = data_pack, status = 0, type = Commands['REQUEST_ATTITUDE']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def setSlewLimit(self, limit): data_pack = pack('f', limit) if self.debugPrint: print "Setting slew rate limit to: " + str(limit) + " radians/sec." pld = Payload(data = data_pack, status = 0, type = Commands['SET_SLEW_LIMIT']) self.tx_callback(dest = self.endpoint_addr, packet = str(pld)) def processPacket(self, packet): print "Command interface objects don't need to process packets." pass
UTF-8
Python
false
false
2,013
16,346,645,550,558
c3e9089fccf50af4cae75ffed08edeee286ce125
d0ef8203195b20de063b2d65879de8ca878d269b
/woodland/configuration/directory.py
51284e83e498e71a6bf9bdae166049526dac7b78
[ "Apache-2.0" ]
permissive
Ogrodniczek/Woodland
https://github.com/Ogrodniczek/Woodland
7bc4e368178b6e068c9da313a8ef99f164a16d84
9282626dee204c2f45cac4a4be9e93ae766d12da
refs/heads/master
2015-08-09T04:14:54.481731
2013-10-10T21:21:55
2013-10-10T21:21:55
13,480,924
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
'''This module handle directory structure for configuration'''
UTF-8
Python
false
false
2,013
1,563,368,106,245
38b28417e4f20ec2679d5835434875be04ded102
45b78f414458c14f61d45aaa02ed09337a64d4ef
/develop/Karrigell/data/muireception/cache/admin/translation/index.py
ccd78ccad35a1f8304b238b4e13296af5978fae3
[ "BSD-3-Clause" ]
permissive
broader/EasyReception
https://github.com/broader/EasyReception
3c891b3ea141c698c484552c9e5e5d3f18266f30
ed330b168a8759a92b9b295518d1f56730040c67
refs/heads/master
2021-01-10T18:58:00.987286
2011-05-03T13:15:38
2011-05-03T13:15:38
364,094
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
[] from HTMLTags import * Login(role=["admin"],valid_in="/") frameset = FRAMESET(cols="25%,*",borderwidth=0) frameset <= FRAME(src="fileMenu.pih") frameset <= FRAME(name="right") PRINT( frameset)
UTF-8
Python
false
false
2,011
7,103,875,931,876
607cb04dc52b90ee14568c2a7eabcfc19ab1e150
dff479ed152084e6636e987b71d244f986b31d75
/consolegui/application/client_class.py
e629ab55a7ad10ff31986e9222e72e4638ccc292
[ "Apache-2.0" ]
permissive
nocl/calculate-3-console-gui
https://github.com/nocl/calculate-3-console-gui
46a69cc0a93ef6c6ab5a853cd79f684f3d3ad452
6b6628783cf97ca49286a0a6e4010bc7a85b5329
refs/heads/master
2016-09-05T16:13:58.123858
2013-02-26T11:38:38
2013-02-26T11:38:38
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#-*- coding: utf-8 -*- # Copyright 2012 Calculate Ltd. http://www.calculate-linux.org # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from PySide import QtGui, QtCore import urllib2 as u2 import os, re, sys from calculate.core.datavars import DataVarsCore from sudsds.transport.http import HttpTransport, SUDSHTTPRedirectHandler, \ CheckingHTTPSConnection, CheckingHTTPSHandler, \ PYOPENSSL_AVAILABLE, PyOpenSSLSocket from sudsds.transport import Transport from sudsds.properties import Unskin from cookielib import CookieJar, DefaultCookiePolicy import socket, ssl import OpenSSL, hashlib from sudsds.client import Client from logging import getLogger from calculate.core.client.cert_verify import verify, VerifyError from more import show_msg, show_question, LabelWordWrap flag = 0 log = getLogger(__name__) class AddServerCert (QtGui.QDialog): def __init__(self, parent, ClientObj, cert): super(AddServerCert, self).__init__() self.ClientObj = ClientObj self.parent = parent self.cert = cert self.grid = QtGui.QGridLayout(self) self.lbl_list = [] self.grid.addWidget(LabelWordWrap(_('Untrusted Server Certificate!'), \ self), 0, 1, 1, 2) certobj = OpenSSL.crypto.load_certificate \ (OpenSSL.SSL.FILETYPE_PEM, cert) self.grid.addWidget(LabelWordWrap (_('Fingerprint = %s') \ % certobj.digest('SHA1'), self), 1, 0, 1, 3) self.grid.addWidget(LabelWordWrap (_('Serial Number = %s') \ % certobj.get_serial_number(), self), 2, 0, 1, 3) self.tab = QtGui.QTabWidget(self) # add Issuer tab self.issuer_wgt = QtGui.QWidget(self) self.issuer_layout = QtGui.QVBoxLayout() Issuer = certobj.get_issuer().get_components() for i in Issuer: self.issuer_layout.addWidget(LabelWordWrap \ ("%s : %s" %(i[0], i[1]),self)) self.issuer_wgt.setLayout(self.issuer_layout) self.tab.addTab(self.issuer_wgt, _('Issuer')) # add Subject tab self.subject_wgt = QtGui.QWidget(self) self.subject_layout = QtGui.QVBoxLayout() Subject = certobj.get_subject().get_components() for item in Subject: self.subject_layout.addWidget(LabelWordWrap \ ("%s : %s" %(item[0], item[1]),self)) self.subject_wgt.setLayout(self.subject_layout) self.tab.addTab(self.subject_wgt, _('Subject')) # add certificate # self.cert_lbl = LabelWordWrap (certobj,self) # self.tab.addTab(self.cert_lbl, 'Certificate') self.grid.addWidget(self.tab, 3, 0, 3, 3) self.lbl_list.append(LabelWordWrap \ (_("Add this server certificate to trusted or ") +\ _('try adding the CA and root certificates to trusted?'),self)) self.pix_lbl = QtGui.QLabel(self) pi = QtGui.QPixmap() pi.load('/usr/share/icons/oxygen/48x48/status/security-medium.png') self.pix_lbl.setPixmap(pi) self.grid.addWidget(self.pix_lbl, 0,0) for num_lbl in range(len(self.lbl_list)): self.grid.addWidget(self.lbl_list[num_lbl], num_lbl + 8, 0, 1, 3) x = len (self.lbl_list) + 8 self.server_but = QtGui.QPushButton(_('Add the server certificate'), self) self.server_but.clicked.connect(self.add_server) self.server_but.clicked.connect(self.close) self.grid.addWidget(self.server_but, x, 0) self.ca_but = QtGui.QPushButton(_("Add the CA and root certificates"), self) self.ca_but.clicked.connect(self.add_ca) self.ca_but.clicked.connect(self.add_server) self.ca_but.clicked.connect(self.close) self.grid.addWidget(self.ca_but, x, 1) self.cancel_but = QtGui.QPushButton(_('Cancel'), self) self.cancel_but.clicked.connect(self.close) self.grid.addWidget(self.cancel_but, x, 2) # self.setLayout(self.grid) self.setWindowTitle (_('Add the certificate to trusted')) # move to center prim_screen = ClientObj.app.desktop().primaryScreen() x = ClientObj.app.desktop().screenGeometry(prim_screen).width()/2 - \ self.sizeHint().width()/2 y = ClientObj.app.desktop().screenGeometry(prim_screen).height()/2 - \ self.sizeHint().height()/2 self.move(x, y) self.setFixedSize(self.sizeHint()) self.setAttribute(QtCore.Qt.WA_ShowModal) self.flag = 4 def add_server(self): ca_certs = self.parent.trusted_path + "cert.list" if not os.path.exists(ca_certs): fc = open(ca_certs,"w") fc.close() if self.parent.host == '127.0.0.1': host = 'localhost' else: host = self.parent.host filename = host fc = open(self.parent.trusted_path + filename,"w") fc.write(self.cert) fc.close() with open(ca_certs) as fd: t = fd.read() # for each line for line in t.splitlines(): # Split string into a words list words = line.split() if len(words) > 1: # if first word... if words[0] == host: self.flag = 3 return 0 # Open file with compliance server certificates and server hostname fcl = open(ca_certs,"a") fcl.write(host + ' ' + filename + '\n') fcl.close() show_msg (_('Server certificate added to trusted') +'\n'+ \ (self.parent.trusted_path + filename),_('Certificate added')) self.flag = 3 from conf_connection import FrameConnection self.ConnectWidget = FrameConnection(self, self.ClientObj) self.ConnectWidget.connect_to_host(host, self.ClientObj.port) def add_ca(self): cl_client_cert_dir = \ self.ClientObj.VarsApi.Get('core.cl_client_cert_dir') homePath = self.ClientObj.VarsApi.Get('ur_home_path') cl_client_cert_dir = cl_client_cert_dir.replace("~",homePath) root_cert_dir = cl_client_cert_dir + "/ca" if not os.path.exists(root_cert_dir): try: os.makedirs(root_cert_dir) except OSError: show_msg (_('Error creating directory %s') %root_cert_dir) return 1 self.parent.list_ca_certs = [] self.parent.add_ca_cert(self.cert, self.parent.list_ca_certs) ############################################################################### class Client_suds(Client): def set_parameters (self, path_to_cert, CERT_FILE, PKEY_FILE): self.path_to_cert = path_to_cert if not CERT_FILE: CERT_FILE = '' self.CERT_FILE = CERT_FILE self.REQ_FILE = path_to_cert + 'client.csr' self.PKEY_FILE = PKEY_FILE self.SID_FILE = path_to_cert + 'sids' self.CRL_PATH = path_to_cert + 'ca/crl/' if not os.path.exists(self.CRL_PATH): os.makedirs(self.CRL_PATH) class CheckingClientHTTPSConnection(CheckingHTTPSConnection): """based on httplib.HTTPSConnection code - extended to support server certificate verification and client certificate authorization""" def __init__(self, ClientObj, cert_path, host, ca_certs=None, cert_verifier=None, keyobj=None, certobj=None, **kw): """cert_verifier is a function returning either True or False based on whether the certificate was found to be OK, keyobj and certobj represent internal PyOpenSSL structures holding the key and certificate respectively. """ CheckingHTTPSConnection.__init__(self, host, ca_certs, cert_verifier, keyobj, certobj, **kw) self.ClientObj = ClientObj self.cert_path = cert_path self.CRL_PATH = os.path.join(cert_path, 'ca/crl/') def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if hasattr(self, '_tunnel_host') and self._tunnel_host: self.sock = sock self._tunnel() user_root_cert = self.ClientObj.VarsApi.Get('core.cl_user_root_cert') homePath = self.ClientObj.VarsApi.Get('ur_home_path') user_root_cert = user_root_cert.replace("~",homePath) result_user_root = 1 while True: if os.path.exists(user_root_cert): result_user_root = self.connect_trusted_root(sock, \ user_root_cert, self.CRL_PATH) if result_user_root == 1: glob_root_cert = self.ClientObj.VarsApi.Get \ ('core.cl_glob_root_cert') result_root_con = 1 if os.path.exists(glob_root_cert): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) if self._tunnel_host: self.sock = sock self._tunnel() result_root_con = self.connect_trusted_root(sock, \ glob_root_cert, self.CRL_PATH) if result_root_con == 1: sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) if self._tunnel_host: self.sock = sock self._tunnel() result_server_con = self.connect_trusted_server \ (sock, self.CRL_PATH) if result_server_con in [1,2]: raise Exception (1) elif result_server_con == 3: continue elif result_server_con == 4: raise Exception (_('This server is not trusted')) elif result_root_con == 2: raise Exception (1) elif result_user_root == 2: raise Exception (1) break # get filename store cert server def cert_list (self, host, ca_certs, server_cert): if host == '127.0.0.1': host = 'localhost' if not os.path.exists(self.trusted_path): try: os.makedirs(self.trusted_path) except OSError: pass if not os.path.exists(ca_certs): fc = open(ca_certs,"w") fc.close() filename = None try: with open(ca_certs) as fd: t = fd.read() # for each line for line in t.splitlines(): # Split string into a words list words = line.split() if len(words) > 1: # if first word... if words[0] == host: filename = words[1] if not filename: return None except: msg = _("Certificate not found on the client's side") show_msg (msg) # self.parent.MainWidget.bottom.addMessage(msg) return None try: fd = open(self.trusted_path + filename, 'r') store_cert = fd.read() fd.close() if store_cert == server_cert: return filename except: msg = _('Error opening file') show_msg (msg + ' %s%s' %(self.trusted_path, filename)) # self.parent.MainWidget.bottom.addMessage(msg) return None def add_all_ca_cert(self, list_ca_certs): # so root cert be first, ca after homePath = self.ClientObj.VarsApi.Get('ur_home_path') cl_client_cert_dir = \ self.ClientObj.VarsApi.Get('core.cl_client_cert_dir') cl_client_cert_dir = cl_client_cert_dir.replace("~",homePath) root_cert_md5 = cl_client_cert_dir + "/ca/cert_list" list_ca_certs.reverse() for cert in list_ca_certs: system_ca_db = \ self.ClientObj.VarsApi.Get('core.cl_glob_root_cert') if os.path.exists(system_ca_db): if cert in open(system_ca_db, 'r').read(): continue user_root_cert = \ self.ClientObj.VarsApi.Get('core.cl_user_root_cert') user_root_cert = user_root_cert.replace("~",homePath) if os.path.exists(user_root_cert): if cert in open(user_root_cert, 'r').read(): continue md5 = hashlib.md5() md5.update(cert) md5sum = md5.hexdigest() if not os.path.exists(root_cert_md5): fc = open(root_cert_md5,"w") fc.close() filename = None with open(root_cert_md5) as fd: t = fd.read() # for each line for line in t.splitlines(): # Split string into a words list words = line.split(' ',1) if words[0] == md5sum: filename = words[1] if not filename: certobj = OpenSSL.crypto.load_certificate \ (OpenSSL.SSL.FILETYPE_PEM, cert) Issuer = certobj.get_issuer().get_components() for item in Issuer: if item[0] == 'CN': filename = item[1] fc = open(root_cert_md5,"a") fc.write('%s %s\n' %(md5sum, filename)) fc.close() if not filename: show_msg (_('Field "CN" not found in the certificate!')) return 1 fd = open(cl_client_cert_dir + '/ca/' + filename, 'w') fd.write(cert) fd.close() fa = open(user_root_cert, 'a') fa.write(cert+'\n') fa.close() show_msg (_("Filename = %s") %filename, _('Certificate added')) else: show_msg (_('A file with the CA certificate now exists')) get_CRL(cl_client_cert_dir) def add_ca_cert(self, cert, list_ca_certs): url = 'https://%s:%s/?wsdl' %(self.host, self.port) client = Client_suds(url, \ transport = HTTPSClientCertTransport(None,None, \ self.cert_path, parent = self.ClientObj)) client.wsdl.services[0].setlocation(url) try: cert = client.service.get_ca() except u2.URLError, e: _print ('client.service.get_ca in client_class Exception') cert = '0' if cert == '1': msg = _("Invalid server certificate!") show_msg (msg) return if cert == '2': msg = _('CA certificate not found on the server') show_msg (msg) return if cert == '0': show_msg (e, _("Not connected!")) return try: certobj = OpenSSL.crypto.load_certificate \ (OpenSSL.SSL.FILETYPE_PEM, cert) except: msg = _('Error. Certificate not added to trusted') show_msg (msg) # self.parent.MainWidget.bottom.addMessage(msg) return inf_text = '' inf_text += _("Fingerprint = %s") % certobj.digest('SHA1') inf_text += '\n'+_("Serial Number = %s") \ %str(certobj.get_serial_number()) Issuer = certobj.get_issuer().get_components() inf_text += '\n'+_("Issuer") for i in Issuer: inf_text += "\n %s : %s" %(i[0], i[1]) Subject = certobj.get_subject().get_components() inf_text += '\n'+_("Subject") for subj in Subject: inf_text += "\n %s : %s" %(subj[0], subj[1]) text = _("Add the CA certificate to trusted? ") reply = show_question(self.ClientObj.MainWidget, text, inf_text, title = _('Adding the certificate')) if reply == QtGui.QMessageBox.No: show_msg (_('Certificate not added to trusted')) elif reply == QtGui.QMessageBox.Yes: list_ca_certs.append(cert) self.add_all_ca_cert(list_ca_certs) return # add certificate server in trusted def add_server_cert(self, cert): self.add_cert = AddServerCert(self, self.ClientObj, cert) self.add_cert.exec_() return self.add_cert.flag def connect_trusted_root(self, sock, root_cert, crl_certs): self.ca_path = self.cert_path + "ca/" server_cert = ssl.get_server_certificate(addr = (self.host, self.port)) global flag if self.cert_file: f = verify(server_cert, crl_certs, flag) if not f: flag = 1 elif f == 1: sys.exit() else: import time time.sleep(0.1) try: if self.FORCE_SSL_VERSION: add = {'ssl_version': self.FORCE_SSL_VERSION} else: add = {} add['cert_reqs'] = ssl.CERT_REQUIRED # try to use PyOpenSSL by default if PYOPENSSL_AVAILABLE: wrap_class = PyOpenSSLSocket add['keyobj'] = self.keyobj add['certobj'] = self.certobj add['keyfile'] = self.key_file add['certfile'] = self.cert_file else: wrap_class = ssl.SSLSocket self.sock = wrap_class(sock, ca_certs=self.ca_certs, **add) return 0 except: return 1 def connect_trusted_server(self, sock, crl_certs): self.trusted_path = self.cert_path + "trusted/" ca_cert_list = self.trusted_path + "cert.list" server_cert = ssl.get_server_certificate(addr = (self.host, self.port)) global flag if self.cert_file: f = verify(server_cert, crl_certs, flag) if not f: flag = 1 elif f == 1: sys.exit() HTTPSClientCertTransport.filename = self.cert_list \ (self.host, ca_cert_list, server_cert) if HTTPSClientCertTransport.filename: try: if self.FORCE_SSL_VERSION: add = {'ssl_version': self.FORCE_SSL_VERSION} else: add = {} add['cert_reqs'] = ssl.CERT_NONE # try to use PyOpenSSL by default if PYOPENSSL_AVAILABLE: wrap_class = PyOpenSSLSocket add['keyobj'] = self.keyobj add['certobj'] = self.certobj add['keyfile'] = self.key_file add['certfile'] = self.cert_file else: wrap_class = ssl.SSLSocket self.sock = wrap_class(sock, ca_certs=self.ca_certs, **add) return 0 except OpenSSL.SSL.Error, e: if type(e.message) == list: if type(e.message[0]) == tuple: for i in e.message[0]: sys.stdout.write(i+' ') sys.stdout.flush() sys.stdout.write('\n') sys.stdout.flush() else: _print (e.message) else: _print (e.message) HTTPSClientCertTransport.filename = None return 1 except Exception, e: _print (e) HTTPSClientCertTransport.filename = None return 1 else: return self.add_server_cert(server_cert) class CheckingClientHTTPSHandler(CheckingHTTPSHandler): def __init__(self, ClientObj, cert_path, ca_certs=None, cert_verifier=None, client_certfile=None, client_keyfile=None, client_keyobj=None, client_certobj=None, *args, **kw): """cert_verifier is a function returning either True or False based on whether the certificate was found to be OK""" CheckingHTTPSHandler.__init__(self, ca_certs, cert_verifier, client_keyfile, client_certfile, client_keyobj, client_certobj) self.ClientObj = ClientObj self.cert_path = cert_path def https_open(self, req): def open(*args, **kw): new_kw = dict(ca_certs=self.ca_certs, cert_verifier=self.cert_verifier, cert_file=self.client_certfile, key_file=self.client_keyfile, keyobj=self.keyobj, certobj=self.certobj) new_kw.update(kw) return CheckingClientHTTPSConnection(self.ClientObj, self.cert_path, *args, **new_kw) return self.do_open(open, req) https_request = u2.AbstractHTTPHandler.do_request_ class HTTPSClientCertTransport(HttpTransport): def __init__(self, key, cert, path_to_cert, parent, password = None, ca_certs=None, cert_verifier=None, client_keyfile=None, client_certfile=None, client_keyobj=None, client_certobj=None, cookie_callback=None, user_agent_string=None, **kwargs): Transport.__init__(self) self.ClientObj = parent self.key = key self.cert = cert self.cert_path = path_to_cert if key: client_certobj = OpenSSL.crypto.load_certificate \ (OpenSSL.SSL.FILETYPE_PEM, file(cert).read()) if password: client_keyobj = OpenSSL.crypto.load_privatekey \ (OpenSSL.SSL.FILETYPE_PEM, file(key).read(), str(password)) else: import M2Crypto bio = M2Crypto.BIO.openfile(key) rsa = M2Crypto.m2.rsa_read_key(bio._ptr(),lambda *unused: None) if not rsa: raise OpenSSL.crypto.Error client_keyobj = OpenSSL.crypto.load_privatekey \ (OpenSSL.SSL.FILETYPE_PEM, file(key).read()) Unskin(self.options).update(kwargs) self.cookiejar = CookieJar(DefaultCookiePolicy()) self.cookie_callback = cookie_callback self.user_agent_string = user_agent_string log.debug("Proxy: %s", self.options.proxy) from dslib.network import ProxyManager proxy_handler = ProxyManager.HTTPS_PROXY.create_proxy_handler() proxy_auth_handler = \ ProxyManager.HTTPS_PROXY.create_proxy_auth_handler() if ca_certs or (client_keyfile and client_certfile) \ or (client_keyobj and client_certobj): https_handler = CheckingClientHTTPSHandler(parent, cert_path=path_to_cert, ca_certs=ca_certs, cert_verifier=cert_verifier, client_keyfile=client_keyfile, client_certfile=client_certfile, client_keyobj=client_keyobj, client_certobj=client_certobj) else: https_handler = u2.HTTPSHandler() self.urlopener = u2.build_opener(SUDSHTTPRedirectHandler(), u2.HTTPCookieProcessor(self.cookiejar), https_handler) if proxy_handler: self.urlopener.add_handler(proxy_handler) if proxy_auth_handler: self.urlopener.add_handler(proxy_auth_handler) self.urlopener.addheaders = [('User-agent', self.user_agent_string)] ############################################################################### def get_CRL(path_to_cert): """ get new CRL (Certificate Revocation List) from all CA """ # local CRL CRL_path = os.path.join(path_to_cert, 'ca/crl/') if not os.path.exists(CRL_path): if not os.path.exists(os.path.join(path_to_cert, 'ca')): if not os.path.exists(path_to_cert): try: os.makedirs(path_to_cert) except OSError: _print (_("Error creating directory %s") %path_to_cert) sys.exit() try: os.makedirs(os.path.join(path_to_cert, 'ca')) except OSError: _print (_("Error creating directory %s") \ %(os.path.join(path_to_cert, 'ca'))) sys.exit() os.makedirs(CRL_path) clVars = DataVarsCore() clVars.importCore() clVars.flIniFile() # user and system ca and root certificates user_root_cert = clVars.Get('core.cl_user_root_cert') homePath = clVars.Get('ur_home_path') user_root_cert = user_root_cert.replace("~",homePath) glob_root_cert = clVars.Get('core.cl_glob_root_cert') if os.path.exists(user_root_cert): user_ca_certs = open(user_root_cert, 'r').read() else: user_ca_certs = '' if os.path.exists(glob_root_cert): glob_ca_certs = open(glob_root_cert, 'r').read() else: glob_ca_certs = '' # get certificates list fron text p = re.compile('[-]+[\w ]+[-]+\n+[\w\n\+\\=/]+[-]+[\w ]+[-]{5}\n?') user_ca_certs_list = p.findall(user_ca_certs) glob_ca_certs_list = p.findall(glob_ca_certs) # association in one list all_ca_certs_list = user_ca_certs_list + glob_ca_certs_list for ca in all_ca_certs_list: certobj = OpenSSL.crypto.load_certificate \ (OpenSSL.SSL.FILETYPE_PEM, ca) # get url from certificates url = None CN = None Subject = certobj.get_subject().get_components() for subj in Subject: if subj[0] == 'L': url = "https://" + subj[1] +"/?wsdl" if subj[0] == 'CN': CN = subj[1] if url: # connect to ca server (url get from certificates) try: client = Client_suds(url,\ transport = HTTPSClientCertTransport(None, None, \ path_to_cert)) client.set_parameters (path_to_cert, None, None) new_crl = client.service.get_crl() except VerifyError, e: _print (e.value) #rm_ca_from_trusted(ca) sys.exit() except: pass if 'new_crl' in locals(): if new_crl: if CN and len(CN) > 2: CRL_file = CRL_path + CN else: host = subj[1].split(':')[0] CRL_file = CRL_path + host if new_crl == ' ': open(CRL_file, 'w') #if os.path.exists(CRL_file): #os.unlink(CRL_file) continue if os.path.exists(CRL_file): if open(CRL_file, 'r').read() == new_crl: continue fd = open(CRL_file, 'w') fd.write(new_crl) fd.close() _print (_("CRL added")) find_ca_in_crl (CRL_path, all_ca_certs_list) def find_ca_in_crl (CRL_path, all_ca_certs_list): # CRL_name_list = glob.glob(CRL_path + '*') for ca in all_ca_certs_list: certobj = OpenSSL.crypto.load_certificate \ (OpenSSL.SSL.FILETYPE_PEM, ca) Issuer = certobj.get_issuer().get_components() for item in Issuer: if item[0] == 'CN': CN = item[1] serverSerial = certobj.get_serial_number() CRL = CRL_path + CN if not os.path.exists(CRL): continue with open(CRL, 'r') as _crl_file: crl = "".join(_crl_file.readlines()) try: crl_object = OpenSSL.crypto.load_crl \ (OpenSSL.crypto.FILETYPE_PEM, crl) except: continue revoked_objects = crl_object.get_revoked() for rvk in revoked_objects: if serverSerial == int(rvk.get_serial(), 16): rm_ca_from_trusted(ca) def rm_ca_from_trusted(ca_cert): clVars = DataVarsCore() clVars.importCore() clVars.flIniFile() user_ca_dir = clVars.Get('core.cl_client_cert_dir') homePath = clVars.Get('ur_home_path') user_ca_dir = user_ca_dir.replace("~",homePath) user_ca_dir = os.path.join(user_ca_dir, 'ca') user_ca_list = os.path.join(user_ca_dir, 'cert_list') user_ca_db = clVars.Get('core.cl_user_root_cert') homePath = clVars.Get('ur_home_path') user_ca_db = user_ca_db.replace("~",homePath) # system_ca_dir = clVars.Get('cl_core_cert_path') # system_ca_list = os.path.join(system_ca_dir, 'cert_list') system_ca_db = clVars.Get('cl_glob_root_cert') import hashlib md5 = hashlib.md5() md5.update(ca_cert) md5sum = md5.hexdigest() # search ca certificate in user ca list with open(user_ca_list) as fd: t = fd.read() # See each line for line in t.splitlines(): newfile = '' # and each word in line words = line.split() if words[0] == md5sum: filename = os.path.join(user_ca_dir, words[1]) if ca_cert == open(filename, 'r').read(): os.unlink(filename) else: newfile += (line + '\n') else: newfile += (line + '\n') fd.close() fn = open(user_ca_list, 'w') fn.write(newfile) fn.close() p = re.compile('[-]+[\w ]+[-]+\n+[\w\n\+\\=/]+[-]+[\w ]+[-]+\n?') # open, write and split user ca certificates user_ca_certs = open(user_ca_db, 'r').read() user_ca_certs_list = p.findall(user_ca_certs) if ca_cert in user_ca_certs_list: new_user_ca_certs = [] for cert in user_ca_certs_list: if ca_cert != cert: new_user_ca_certs.append(cert) else: _print (_("CA certificate removed from user trusted")) fd = open(user_ca_db, 'w') for cert in new_user_ca_certs: fd.write(cert) fd.close() if not os.path.exists(system_ca_db): open(system_ca_db, 'w') system_ca_certs = open(system_ca_db, 'r').read() system_ca_certs_list = p.findall(system_ca_certs) if ca_cert in system_ca_certs_list: new_system_ca_certs = [] for cert in system_ca_certs_list: if ca_cert != cert: new_system_ca_certs.append(cert) else: _print (_("CA certificate removed from system trusted")) fd = open(system_ca_db, 'w') for cert in new_system_ca_certs: fd.write(cert) fd.close() return 0
UTF-8
Python
false
false
2,013
13,546,326,883,206
ef67217d28c5ece64614a02c7a6dee2c45dd7c1f
bbcc02da2ac5c3f88670ec40d024411c531e6d94
/Wiki/src/page_management.py
6c309094d82f6ef2427e4c316990bac40acc3d98
[]
no_license
dzlab/GAE-projects
https://github.com/dzlab/GAE-projects
e042db1845af2d925d6bca6f0ef508aa15cc2bb6
31acefcf6d38f8155b03ecddd01cbd931054d690
refs/heads/master
2016-09-09T22:50:25.584554
2012-06-09T08:43:05
2012-06-09T08:43:05
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from google.appengine.ext import webapp import os import re import logging import time from google.appengine.api import memcache from google.appengine.ext.webapp import template from google.appengine.ext import db class Page(db.Model): path = db.StringProperty(multiline=False) content = db.StringProperty(multiline=True) edited = db.DateTimeProperty(auto_now_add = True) class EditPage(webapp.RequestHandler): def get(self, path): user = self.request.cookies.get('name', '') if user == "": self.redirect('/login') #login = 'login' #signup = 'signup' login = user signup = 'logout' content = "" pages = db.GqlQuery("SELECT * FROM Page") for page in pages: if page.path==path: content = page.content template_values = { "login": login, "signup": signup, "action": "/_edit"+path, "content": content } path = os.path.join(os.path.dirname(__file__), '../www/newpage.html') self.response.out.write(template.render(path, template_values)) def post(self, path): content = self.request.get('content') page = Page(path= path, content= content) page.put() self.redirect(path) class WikiPage(webapp.RequestHandler): def get(self, path): pages = db.GqlQuery("SELECT * FROM Page") found = False content = "" for page in pages: if page.path==path: found = True content = page.content if not found: self.redirect("/_edit"+path) else: template_values = { "path": path, "content": content } path = os.path.join(os.path.dirname(__file__), '../www/page.html') self.response.out.write(template.render(path, template_values)) class HistoryPage(webapp.RequestHandler): def get(self, path): user = self.request.cookies.get('name', '') if user == "": login = "login" signup = 'signup' else: login = user signup = 'logout' history = db.GqlQuery("SELECT * FROM Page " "ORDER BY edited DESC ") history = list(history) """ for update in history: if path != update.path: history.remove(update) """ template_values = { "login": login, "signup": signup, "path": path, "history": history } path = os.path.join(os.path.dirname(__file__), '../www/history.html') self.response.out.write(template.render(path, template_values))
UTF-8
Python
false
false
2,012
8,358,006,372,553
97a3d90b3d3637db4cd64a6d452b401089d2a41f
7d88216baa57aa86e8b7a316f1a08be368f2b8c7
/guiHello.py~
dd83d1155abefb94d0c68ac1c17e6a8a7bebc0e5
[]
no_license
B-Rich/vtk
https://github.com/B-Rich/vtk
095ba274cd9b28562a4099bc25f0c3b57df1f5a1
3286ab43656b64c333b805a4bde0b2a4df81484a
refs/heads/master
2021-01-21T00:11:21.886820
2014-05-30T16:41:57
2014-05-30T16:41:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Tkinter tutorial """ from Tkinter import * root = Tk() top = Frame(root) top.pack(side='top') L = Label(text="Hi") L.pack(side='left') root.mainloop()
UTF-8
Python
false
false
2,014
4,664,334,520,801
42a7e64a960fbae3c8e36fd24bc2b78bbf872266
c41c15eb5649510e092f7aa3c539bbb9dc085d11
/jingen/tests/resources/bad_vars.py
5d5732549e37869082c717a236907cd9a87c2b7f
[ "Apache-2.0" ]
permissive
nir0s/jingen
https://github.com/nir0s/jingen
717962663bd69dc5d10056cc75f474862aafaf0f
7d4c8c952aa90d2fa26bdb6e4272f8b66e4167ab
refs/heads/master
2016-08-04T19:54:40.711952
2014-10-11T19:01:18
2014-10-11T19:01:18
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# flake8: NOQA VARS = { "test_var: "vars_file_test_value" }
UTF-8
Python
false
false
2,014
5,927,054,887,781
c4999b2387622a70afa4a4be2dd27ddf62667908
39286f87611285ab7d60cfe30bbb4cd021727744
/getnetv1.py
cd0bc6e1d96892ce9211eb77c64e7e97edb94093
[ "LicenseRef-scancode-warranty-disclaimer" ]
non_permissive
geramirez/pythonmodules
https://github.com/geramirez/pythonmodules
adafc91ad7e7c73c32c00ac29c85367b535df456
8d2139ba7417c555383e3e812dd43300a4af0ff7
refs/heads/master
2021-01-18T01:19:27.243273
2014-06-06T02:30:28
2014-06-06T02:30:28
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- import oauth2 as oauth import urllib2 as urllib import json import datetime from random import seed, randrange, shuffle from math import sqrt import sys import regex import csv class twitterapi: def __init__(self,anon): self.anon = anon self.cipher = self.cipher() self.exempt = [] self.key = 1 self.sinceid = "-1" def getkeys(self,keyfile = "consumer_keys.csv"): #set a key holder self.keys = {} #set key counter i = 1 #open csv file and read off keys with open(keyfile,'rb') as csvfile: keyfile = csv.reader(csvfile,delimiter=',') for row in keyfile: self.keys[i] = row def twitterreq(self,originalurl): consumer_key = self.keys[self.key][0] consumer_secret = self.keys[self.key][1] access_token_key = self.keys[self.key][2] access_token_secret = self.keys[self.key][3] _debug = 0 oauth_token= oauth.Token(key=access_token_key, secret=access_token_secret) oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret) signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() http_method = "GET" http_handler= urllib.HTTPHandler(debuglevel=_debug) https_handler = urllib.HTTPSHandler(debuglevel=_debug) req = oauth.Request.from_consumer_and_token(oauth_consumer, token=oauth_token, http_method=http_method, http_url=originalurl, parameters=self.parameters) req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token) headers = req.to_header() if http_method == "POST": encoded_post_data = req.to_postdata() else: encoded_post_data = None url = req.to_url() opener = urllib.OpenerDirector() opener.add_handler(http_handler) opener.add_handler(https_handler) response = opener.open(url, encoded_post_data) response = json.load(response) if 'errors' in response.keys(): print(response) if response['errors'][0]['code'] == 88: print( "moving to key: " + str(self.key)) self.key += 1 return self.twitterreq(originalurl) else: print(response) exit() else: return response def cipher(self): now = datetime.datetime.now() seed(now.day + now.year + now.month) alpha13 = ['z', 'x', 'c', 'v', 'b', 'n', 'm', 'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '0', '9', '8', '7', '6', '5', '4', '3', '2', '1'] shuffle(alpha13) alpha13 = "".join(alpha13) extra5 = randrange(10000,99999) return [alpha13,extra5] def encoder(self,user): """ str -> str >>> encoder("user") sfjisjdfl323 (something like that) """ #break the codes into 2 comps cipher = self.cipher[0] extra5 = self.cipher[1] user = regex.sub("\W","",user.lower()) alpha = 'abcdefghijklmnopqrstuvwxyz1234506789' encoded13 = 1 i = 1.0 for c in user: if c in alpha: # use the index c = cipher[alpha.index(c)] encoded13 = encoded13 * ord(c) * i/139.0 i += 1.0 encoded = str(sqrt(encoded13*extra5*1000000)) encoded = encoded.replace("e+","") return "@" + encoded.replace(".","") def snaprint(self, tweet_dic): with open(self.filename,'ab') as csvfile: elements = csv.writer(csvfile, delimiter = ',') elements.writerow(['tweet',''] + tweet_dic.values()) if tweet_dic['user_mentions'] != []: for connection in tweet_dic['user_mentions']: elements.writerow(['user',connection] \ + tweet_dic.values()) if tweet_dic['hashtags'] != []: for connection in tweet_dic['hashtags']: elements.writerow(['hashtags',connection] \ + tweet_dic.values()) if tweet_dic['urls'] != []: for connection in tweet_dic['urls']: elements.writerow(['urls',connection] \ + tweet_dic.values()) def snaprint_anon(self, tweet_dic): with open(self.filename,'ab') as csvfile: elements = csv.writer(csvfile, delimiter = ',') elements.writerow(['tweet',''] + tweet_dic.values()) if tweet_dic['user_mentions'] != []: for connection in tweet_dic['user_mentions']: elements.writerow(['user',connection] \ + tweet_dic.values()) if tweet_dic['hashtags'] != []: for connection in tweet_dic['hashtags']: elements.writerow(['hashtags',connection] \ + tweet_dic.values()) if tweet_dic['urls'] != []: for connection in tweet_dic['urls']: elements.writerow(['urls',connection] \ + tweet_dic.values()) def DBmaker(self,main_dic): #makes an empty tweet dictionary to hold info tweet_dic = {} #itterates over each tweet for tweet in main_dic[u'statuses']: #created at tag tweet_dic['created_at'] = tweet[u'created_at'].encode('utf-8') #extracts from_user tweet_dic['from_user'] = "@" + tweet['user']['screen_name'].encode('utf-8') #extracts tweet id tweet_dic['id'] = tweet[u'id_str'].encode('utf-8') #extract source tweet_dic['source'] = tweet[u'source'].encode('utf-8') #extract withheld if tweet.has_key('withheld_in_countries'): tweet_dic['withheld_in_countries'] = tweet['withheld_in_countries'] else: tweet_dic['withheld_in_countries'] = "" #get the max id that will be put back into the api call self.max_id = tweet['id'] - 1 #extract follower count and other user info tweet_dic['followers'] = str(tweet['user']['followers_count']) tweet_dic['user_created_at'] = str(tweet['user']['created_at']) tweet_dic['user_lang'] = str(tweet['user']['lang']) tweet_dic['user_statuses'] = str(tweet['user']['statuses_count']) tweet_dic['user_utc_offset'] = str(tweet['user']['utc_offset']) #extract urls tweet_dic['urls'] = "" if tweet['entities'].has_key('urls'): tweet_dic['urls'] = [] for url in tweet[u'entities'][u'urls']: tweet_dic['urls'] = tweet_dic['urls'] + [url[u'expanded_url'].encode('utf-8')] #extracts hashtags tweet_dic['hashtags'] = '' if tweet['entities'].has_key('hashtags'): tweet_dic['hashtags'] = [] for tag in tweet[u'entities'][u'hashtags']: tweet_dic['hashtags'] = tweet_dic['hashtags'] + [tag[u'text'].encode('utf-8')] #extract time zone tweet_dic['time_zone'] = "" if tweet['user']['time_zone'] != None: tweet_dic['time_zone'] = tweet['user']['time_zone'].encode('utf-8') #extract retweets tweet_dic['retweet_count'] = str(tweet['retweet_count']) #extract favorites tweet_dic['favorite_count'] = "" if tweet['metadata'].has_key('favorite_count'): tweet_dic['favorite_count'] = str(tweet['metadata']['favorite_count']) #extracts text into string tweet_text = tweet[u'text'].encode('utf-8') tweet_dic['text'] = regex.sub('\t|\n|\r|,|"',"",tweet_text) #extracts user_mentions tweet_dic['user_mentions'] = '' if tweet[u'entities'][u'user_mentions'] != []: tweet_dic['user_mentions'] = [] for user in tweet[u'entities'][u'user_mentions']: tweet_dic['user_mentions'] = tweet_dic['user_mentions'] + ["@" + user[u'screen_name'].encode('utf-8')] #extracts media type tweet_dic['media_type'] = '' tweet_dic['media_url'] = '' if tweet['entities'].has_key('media'): tweet_dic['media_type'] = [] tweet_dic['media_url'] = [] for media in tweet['entities'][u'media']: tweet_dic['media_type'] = tweet_dic['media_type'] + [media[u'type'].encode('utf-8')] tweet_dic['media_url'] = tweet_dic['media_url'] + [media[u'media_url'].encode('utf-8')] #extract reply tweet_dic['in_reply_to_screen_name'] = str(tweet['in_reply_to_screen_name']) #extract reply status tweet_dic['in_reply_to_status_id_str'] = str(tweet['in_reply_to_status_id_str']) #extract language tweet_dic['lang'] = tweet['lang'] if self.anon == True: tweet_dic['text'] = tweet_dic['text'].replace('"',"'") ##anon the from user tweet_dic['from_user'] = self.encoder(tweet_dic['from_user']) tweet_dic['in_reply_to_screen_name'] = self.encoder(tweet_dic['in_reply_to_screen_name']) #annon the rest if tweet_dic['user_mentions'] != []: i = 0 for mention in tweet_dic['user_mentions']: #anon the mentio mention_anon = self.encoder(mention) #replace in text tweet_dic['text'] = regex.sub(mention,mention_anon,tweet_dic['text']) #replace in mentions and move to the next one tweet_dic['user_mentions'][i] = mention_anon i += 1 self.snaprint(tweet_dic) else: self.snaprint(tweet_dic) def search(self,searchterm): #tell it to keep going keepgoing = True # set url + parameters url = "https://api.twitter.com/1.1/search/tweets.json" self.parameters = {'q':searchterm,'count':'500','result_type':'recent', 'include_entities':'true','since_id':str(self.sinceid)} main_dic = self.twitterreq(url) while keepgoing == True: if "errors" in main_dic.keys(): print(main_dic) break elif main_dic['statuses'] == []: print("There are no more Statuses") break else: self.DBmaker(main_dic) self.parameters['max_id'] = self.max_id main_dic = self.twitterreq(url) def initexportfile(self,filename = "export.csv"): self.filename = filename with open(filename,'wb') as csvfile: headerwriter = csv.writer(csvfile, delimiter = ',') headerwriter.writerow(['type','connection','user_lang', 'text', 'hashtags', 'user_utc_offset', 'id', 'favorite_count', 'source', 'in_reply_to_screen_name', 'followers', 'retweet_count', 'media_type', 'media_url', 'user_mentions', 'withheld_in_countries', 'from_user', 'lang', 'user_created_at', 'created_at', 'time_zone', 'user_statuses', 'in_reply_to_status_id_str','url']) def decoder(self,decoderfile="decoder.csv",propfile="dosholder.txt"): f = open(propfile,'r') export = open(decoderfile,'w') export.write('"' + "user" + '","' + "user_anon" + '"\n') for user in f: user = user.replace("\W","").lower() user_anon = self.encoder(user) export.write('"' + user + '","' + user_anon + '"\n') f.close() export.close() if __name__ == '__main__': #edit for searching in non english languages reload(sys) sys.setdefaultencoding("utf-8") #if true the output will be anonymized if false it will not instance = twitterapi(False) #iniciate file instance.initexportfile() #get keys instance.getkeys() #set search term instance.search("russia")
UTF-8
Python
false
false
2,014
6,073,083,779,290
214e0a548915684574505f01062a25108b0e532d
28f3978b3be025392fd6b89ebc3739c12138afca
/src/progresslist.py
96e5518e3a9d1261eb4bf37d55d8fa207567cc8f
[]
no_license
takuyozora/acirpe-install
https://github.com/takuyozora/acirpe-install
56091d73056d6ceac46c03acdfc006ddf3a460d5
b016edf1e3c48eb40f7ef7ed1ccee145493a02ec
refs/heads/master
2016-09-05T21:59:28.901708
2012-07-04T14:20:04
2012-07-04T14:20:04
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from gi.repository import Gtk, Gdk class ProgressList(Gtk.VBox): def __init__(self,width=200,mtop=30,mside=10,mbottom=100,on_change=None,*args,**kwargs): Gtk.VBox.__init__(self,spacing=2,*args,**kwargs) self.set_property("width_request",width) self.set_property("margin-top",mtop) self.set_property("margin-right",mside) self.set_property("margin-left",mside) self.set_property("margin-bottom",mbottom) self.on_change = on_change self._elems = list() self._old = 0 self._active = 0 self._order = dict() self.connect("map",self._update) def place(self): sep = Gtk.VBox() sep.pack_start(Gtk.HSeparator(height_request=1),False,False,0) sepBox = Gtk.HBox() box = Gtk.EventBox() box.override_background_color(Gtk.StateFlags.NORMAL,Gdk.RGBA(0.9,0.9,0.9,1)) box.add(self) sepBox.pack_start(box,False,False,0) sepBox.pack_start(Gtk.VSeparator(width_request=1),False,False,0) sep.pack_start(sepBox,True,True,0) return sep def pack_end(self,title,*args,**kwargs): label = Gtk.Label(label=title,wrap=True,justify=Gtk.Justification.LEFT,xalign=0,yalign=0) self._elems.insert(0,{"widget":label,"label":label.get_label()}) Gtk.VBox.pack_end(self,label,False,False,0) def pack_start(self,title,*args,**kwargs): label = Gtk.Label(label=title,wrap=True,justify=Gtk.Justification.LEFT,xalign=0,yalign=0) self._elems.append({"widget":label,"label":label.get_label()}) Gtk.VBox.pack_start(self,label,False,False,0) def pack_list(self,liste): for elem in liste: self.pack_start(elem) def gtk_widget_draw(self): self._update() Gtk.VBox.gtk_widget_draw(self) def get_current_label(self): return elf._elems[self._active]["label"] def next_active(self,*args): self._old = self._active self._active += 1 if self._active >= len(self._elems): self._active = 0 self._update() def next_step(self,*args): self.next_active(*args) def prev_active(self,*args): self._old = self._active self._active -= 1 if self._active < 0: self._active = len(self._elems)-1 self._update() def _update(self,*args): self._elems[self._old]["widget"].set_label(self._elems[self._old]["label"]) self._elems[self._active]["widget"].set_markup("<b>"+self._elems[self._active]["label"]+"</b>") if self.on_change is not None: self.on_change(self._elems[self._active]["label"])
UTF-8
Python
false
false
2,012
2,740,189,154,562
29f3ef5de1f5ab7c71077de376dc6cdc0d19d290
21d3e99e57f7ea00214bc495172084d950f7815a
/Desktop/desktop.py
a113052aa9f67eb5438f723f85b92b997055db9f
[]
no_license
philikon/ios-jpake-test
https://github.com/philikon/ios-jpake-test
b63882d503d18539d64ddc5255b14df617ae1c92
511642140318f4bd7d1c07b11f2e48b56bfe142b
refs/heads/master
2021-01-18T08:39:58.942258
2010-10-13T11:09:00
2010-10-13T11:09:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python # We are the Desktop import sys, time, urllib2, hmac, base64, binascii try: import json as simplejson except ImportError: import simplejson from jpake import JPAKE, params_80, params_112, params_128 from M2Crypto.EVP import Cipher, hmac from hashlib import sha256, sha1 def get(url, etag = None): headers = {} if etag: headers['If-None-Match'] = etag request = urllib2.Request(url, None, headers) response = urllib2.urlopen(request) data = response.read() return simplejson.loads(data) def put(url, data): opener = urllib2.build_opener(urllib2.HTTPHandler) json = simplejson.dumps(data) request = urllib2.Request(url, data=json) request.add_header('Content-Type', 'application/json') request.get_method = lambda: 'PUT' response = urllib2.urlopen(request) return response.info().getheader('Etag') def delete(url): opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(url) request.get_method = lambda: 'DELETE' response = urllib2.urlopen(request) def encrypt(data, key, iv): cipher = Cipher(alg='aes_256_cbc', key=key, iv=iv, op=1) res = cipher.update(data) res += cipher.final() return res def decrypt(data, key, iv): cipher = Cipher(alg='aes_256_cbc', key=key, iv=iv, op=0) res = cipher.update(data) res += cipher.final() return res s = sys.argv[1] (password,channel) = s[:4],s[4:] url = "http://localhost:5000/%s" % channel print "X Password = %s" % password print "X URL = %s" % url j = JPAKE(password, signerid="sender", params=params_80) # Get Server.Message1 print "X Getting Server.Message1" server_one = get(url) print "X Got Server.Message1: %s" % str(server_one) # Put Client.Message1 print "X Putting Client.Message1" client_one = { 'type': 'sender1', 'payload': j.one() } client_one_etag = put(url, client_one) print "X Put Client.Message1 (etag=%s) %s" % (client_one_etag, client_one) # Get Server.Message2 print "X Getting Server.Message2" while True: try: server_two = get(url, client_one_etag) break except urllib2.HTTPError, e: if e.code == 304: print "X Did not get right response yet. Trying again." pass else: raise time.sleep(1) print "X Got Server.Message2: %s" % server_two # Put Client.Message2 print "X Putting Client.Message2" client_two = { 'type': 'sender2', 'payload': j.two(server_one['payload']) } client_two_etag = put(url, client_two) print "X Put Client.Message2 (etag=%s) %s" % (client_two_etag, client_two) # Get Server.Message3 print "X Getting Server.Message3" while True: try: server_three = get(url, client_two_etag) break except urllib2.HTTPError, e: if e.code == 304: print "X Did not get right response yet. Trying again." pass else: raise time.sleep(1) print "X Got Server.Message3: %s" % server_three # COMPARE KEYS print "X Generating key" key = j.three(server_two['payload']) print "X Generated key: %s" % key print "X Comparing keys" print "X Desktop H(K) = %s" % sha256(sha256(key).digest()).hexdigest() print "X Mobile H(K) = %s" % server_three['payload'] if server_three['payload'] != sha256(sha256(key).digest()).hexdigest(): print "X KEY FAIL" delete(url) sys.exit(1) # Put Client.Message3 iv = '0123456780abcdef' cleartext = simplejson.dumps({ 'message': sys.argv[2] }) ciphertext = base64.b64encode(encrypt(cleartext, key, iv)) hmac_hex = binascii.hexlify(hmac(key, cleartext, algo="sha256")) payload = {'ciphertext': ciphertext, 'IV': base64.b64encode(iv), 'hmac': hmac_hex} print "X Putting Client.Message3" client_three = { 'type': 'sender3', 'payload': payload } client_three_etag = put(url, client_three) print "X Put Client.Message3 (etag=%s) %s" % (client_three_etag, client_three)
UTF-8
Python
false
false
2,010
18,511,309,084,011
d992f35550fc7b3055b6ddd389f3b7c6a445576f
93e8ee76fcebb4ff087fa1c49a3417d1dd464b4c
/src/systemmap.py
3bf26e73c71358ad50c13f44a3cb7a9a30716e4e
[]
no_license
flyrain/data_layout
https://github.com/flyrain/data_layout
006833e9b3fe78a319a6776ea693aff02afe2915
355ca3b20eb9ac7d4d98a9ef764d9f3c2207e8c5
refs/heads/master
2020-06-09T06:57:57.321426
2014-05-09T18:00:08
2014-05-09T18:00:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys filename1 = sys.argv[1] filename2 = sys.argv[2] f1 = open(filename1,'r') f2 = open(filename2, 'r') lines1 = f1.readlines() lines2 = f2.readlines() begin1 =0 begin1 =0 for i in range(len(lines1)): content1 = lines1[i].split()[3] content2 = lines2[i].split()[3] if content1 != content2: print lines1[i], print lines2[i]
UTF-8
Python
false
false
2,014
12,987,981,154,023
4f0c70e00f1f97010e0af19f3c3c1518eeef9ff9
60faa819fb1d680ca100a90e08ed58bf6725e135
/pyon/core/management/commands/clearcache.py
2b45258479e49ad23229f52fc9a698c7832a186d
[]
no_license
ShaneDrury/pyon
https://github.com/ShaneDrury/pyon
709c7605a34dad5bbea1ef4c25a601ff879b0fd2
938a7f9654aadd640dff658701ac2956c0da8f02
refs/heads/master
2020-05-20T15:52:39.519447
2014-09-08T13:10:22
2014-09-08T13:10:22
19,145,930
0
0
null
false
2014-08-14T13:20:11
2014-04-25T12:55:41
2014-07-04T23:22:44
2014-08-14T13:20:11
3,004
1
0
10
Python
null
null
import logging from django.core.management import BaseCommand from pyon.core.cache import cache log = logging.getLogger(__name__) class Command(BaseCommand): args = '<key1 key2 ...>' help = 'Clears the specific caches' def handle(self, *args, **options): if 'all' in args or args is (): self.clear_all() else: log.info("Clearing {}".format(args)) cache.delete_many(args) def clear_all(self): log.info("Clearing entire cache") cache.clear()
UTF-8
Python
false
false
2,014
11,484,742,591,807
27b16e9515c7f6b354e55dea43e38c9caa7be241
7fe51a5eceb9105c403e24cc81980495c613857f
/salt/apps/dictionary/models.py
d1566b422fdd52fe5b09ff5513e632aae29c4eb3
[]
no_license
rootart/salt-dict
https://github.com/rootart/salt-dict
df3c895c132fa38453483723a59a9b8e0f22005c
52dd075459f8f4b528533051d6970d514d44737a
refs/heads/master
2016-09-05T10:37:42.998259
2013-11-20T11:44:08
2013-11-20T11:44:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from datetime import datetime from django.db import models from django.utils.translation import ugettext as _ class Source(models.Model): name = models.CharField(_('source name'), blank=True, max_length=100) full_name = models.CharField(_('source full name'), blank=True, max_length=100) def __unicode__(self): return self.name class Meta: verbose_name=_("Source") verbose_name_plural=_('Sources') class Definition(models.Model): name_ru = models.CharField(_('name RU'), blank=True, max_length=255, db_index=True ) name_en = models.CharField(_('name EN'), blank=True, max_length=255, db_index=True ) name_de = models.CharField(_('name DE'), blank=True, max_length=255, db_index=True ) created = models.DateTimeField(auto_now_add=True, default=datetime.now) modefied = models.DateTimeField(auto_now=True, default=datetime.now) def __unicode__(self): return "%s %s %s" % (self.name_ru, self.name_en, self.name_de) class Meta: verbose_name=_("Definition") verbose_name_plural=_('Definitions') ordering=('name_en',) class DefinitionSource(models.Model): definition = models.ForeignKey(Definition, verbose_name=_('definition'), related_name='sources' ) text = models.TextField(blank=True, null=True, verbose_name=_('definition text') ) source = models.ForeignKey(Source, verbose_name=_('source'), blank=True, null=True ) bib_info = models.CharField(_('bib info'), blank=True, max_length=255 ) position = models.PositiveIntegerField( default=0, verbose_name=_('position') ) def __unicode__(self): return "%s - %s" % (self.definition.name_ru, self.source.name if self.source else '') class Meta: verbose_name=_("Definition source") verbose_name_plural=_('Definition sources') ordering = ['position',]
UTF-8
Python
false
false
2,013
11,055,245,839,653
c78b1856fe62c8cc1cf44ceaaa9a932432262a31
84f1b15acad80992a71faa18dbbc1afdd284deb4
/sucks/tests/test_views.py
d23d33dc8180e38f91c4981e67e225e359988f6b
[]
no_license
alejnaser/pass-battleship
https://github.com/alejnaser/pass-battleship
9611b82e4a356cbd4d8ed0cef2e073dc2175d62a
ca757018db1853d5d55049615157406145b5881b
refs/heads/master
2018-05-12T22:01:48.398066
2013-11-24T13:36:06
2013-11-24T13:36:06
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- encoding: utf-8 -*- from mock import Mock from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.test import TestCase from django.contrib.auth.forms import UserCreationForm from sucks.models import Match, Battleship from sucks.forms import StandardMatchForm, AlternativeMatchForm from sucks.helpers import createShip class ViewsTest(TestCase): fixtures = ['users.json', 'fleet_configs.json', 'sq_board_configs.json', 'match_configs.json', 'matches.json'] def setUp(self): super(ViewsTest, self).setUp() self.client.post('/login/', { 'username': 'AleX', 'password': 'tester'}) def test_index(self): response = self.client.get(reverse('sucks:index')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/index.html') self.assertContains(response, "matchesList") def test_match(self): match = Match.find(1) match.add_player = Mock(return_value=None) user = User.objects.get(pk=1) response = self.client.get( reverse('sucks:match', args=(1,))) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/match.html') match.add_player.assert_called_once(user) response = self.client.get( reverse('sucks:match', args=(2,)), follow=True) self.assertRedirects(response, reverse("index")) self.assertContains(response, "La partida seleccionada no existe") self.client.post('/login/', { 'username': 'NoeX', 'password': 'tester'}) response = self.client.get( reverse('sucks:match', args=(1,))) self.client.post('/login/', { 'username': 'Luli', 'password': 'tester'}) response = self.client.get( reverse('sucks:match', args=(1,)), follow=True) self.assertRedirects(response, reverse("index")) self.assertContains( response, "No tiene permitido el acceso a esta partida") def test_add_match(self): response = self.client.get(reverse('sucks:add-match')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/add_match.html') self.assertTrue(response.context['standard_match']) self.assertEqual(StandardMatchForm, response.context['standard_match_form'].__class__) self.assertEqual(AlternativeMatchForm, response.context['alternative_match_form'].__class__) response = self.client.post(reverse('sucks:add-match'), { 'standard_match': "true", 'number_of_players': 2, }, follow=True) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/match.html') self.assertContains( response, "Partida creada exitosamente") response = self.client.post(reverse('sucks:add-match'), { 'standard_match': "false", 'players': 2, 'patrols': 2, 'frigates': 2, 'carriers': 2, 'board_size': 11, 'submarines': 2, 'battleships': 2 }, follow=True) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/match.html') self.assertContains( response, "Partida creada exitosamente") response = self.client.post(reverse('sucks:add-match'), { 'standard_match': "false", 'patrols': 2, 'frigates': 2, 'carriers': 2, 'board_size': 11, 'submarines': 2, 'battleships': 2 }, follow=True) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/add_match.html') self.assertContains( response, "Error al crear partida") response = self.client.post(reverse('sucks:add-match'), { 'standard_match': "true", 'number_of_players': -1, }, follow=True) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'sucks/add_match.html') self.assertContains( response, "Error al crear partida") def test_sign_up(self): response = self.client.get(reverse('sign-up')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'registration/sign_up.html') self.assertEqual(UserCreationForm, response.context['form'].__class__) response = self.client.post(reverse('sign-up'), { 'username': "Coti", 'password1': "tester", 'password2': "tester" }, follow=True) self.assertEqual(response.status_code, 200) self.assertRedirects(response, reverse("login")) response = self.client.post(reverse('sign-up'), { 'username': "Laura", 'password1': "tester1", 'password2': "tester2" }, follow=True) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'registration/sign_up.html') self.assertContains( response, "The two password fields didn&#39;t match.") def test_create_ship(self): response = self.client.post(reverse('sucks:add-ship'), { 'match': "1", }, follow=True) self.assertEqual(response.status_code, 200) self.assertRedirects(response, reverse('sucks:match', args=(1,))) self.assertTemplateUsed(response, 'sucks/match.html') self.assertContains(response, "Error al crear barco") response = self.client.post(reverse('sucks:add-ship'), { 'match': "1", 'coord_x': 1, 'coord_y': 1, 'ship_type': 0, 'orientation': True }, follow=True) self.assertEqual(response.status_code, 200) self.assertRedirects(response, reverse('sucks:match', args=(1,))) self.assertTemplateUsed(response, 'sucks/match.html') self.assertNotContains(response, "Error al crear barco") def test_start_match(self): Match.is_ready = Mock(return_value=True) response = self.client.get( reverse('sucks:start-match', args=(1,)), follow=True) self.assertEqual(response.status_code, 200) self.assertRedirects(response, reverse('sucks:match', args=(1,))) self.assertTemplateUsed(response, 'sucks/match.html') self.assertContains(response, "La partida ha iniciado exitosamente") Match.is_ready = Mock(return_value=False) response = self.client.get( reverse('sucks:start-match', args=(1,)), follow=True) self.assertEqual(response.status_code, 200) self.assertRedirects(response, reverse('sucks:match', args=(1,))) self.assertTemplateUsed(response, 'sucks/match.html') self.assertContains(response, "La partida aún no puede comenzar") def test_turn_action(self): match = Match.find(1) alex = User.objects.get(pk=1) noe = User.objects.get(pk=2) match.add_player(alex) match.add_player(noe) for player in match.players.all(): ship_data = { 'match': 1, 'coord_x': 1, 'coord_y': 1, 'ship_type': 0, 'orientation': True } createShip(match, ship_data, player) ship_data['coord_x'] = 3 ship_data['ship_type'] = 1 createShip(match, ship_data, player) ship_data['coord_x'] = 5 ship_data['ship_type'] = 2 createShip(match, ship_data, player) ship_data['coord_x'] = 7 ship_data['ship_type'] = 3 createShip(match, ship_data, player) ship_data['coord_x'] = 3 ship_data['coord_y'] = 8 ship_data['ship_type'] = 4 ship_data['orientation'] = False createShip(match, ship_data, player) self.assertTrue(match.setup_complete(alex)) self.assertTrue(match.setup_complete(noe)) match.start() self.assertTrue(match.is_started()) self.assertEqual(alex, match.player_with_turn()) turn_data = { "attacked_x": 1, "attacked_y": 1, "opponent_attacked": 2, "ofensive_action": 1, "defensive_action": 2, "coord_x": 1, "coord_y": 1, "movement_direction": 0, "match": 1, } response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(200, response.status_code) self.assertRedirects(response, reverse('sucks:match', args=(1,))) self.assertEqual(noe, response.context["player_with_turn"]) turn_data["ofensive_action"] = 0 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(200, response.status_code) self.assertRedirects(response, reverse('sucks:match', args=(1,))) self.assertEqual(alex, response.context["player_with_turn"]) turn_data["ofensive_action"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(alex, response.context["player_with_turn"]) self.assertContains( response, "La maquinaria de sus acorazados aún debe enfriarse") turn_data["ofensive_action"] = '' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(alex, response.context["player_with_turn"]) self.assertContains( response, "Debe seleccionar una acción ofensiva válida") alex_board = match.board_for_player(alex) battleship = Battleship.objects.get(board=alex_board) battleship.sunk = True battleship.save() turn_data["ofensive_action"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(alex, response.context["player_with_turn"]) self.assertContains( response, "No cuenta con acorazados no hundidos, por lo\ tanto no es capáz de realizar este ataque") battleship.delete() response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(alex, response.context["player_with_turn"]) self.assertContains( response, "Debe contar con un bote acorazado\ para realizar este ataque") turn_data["ofensive_action"] = 0 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(noe, response.context["player_with_turn"]) turn_data["ofensive_action"] = 2 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(alex, response.context["player_with_turn"]) response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(noe, response.context["player_with_turn"]) response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertEqual(noe, response.context["player_with_turn"]) self.assertContains( response, "Ya ha realizado todos los ataques de radar\ que tenía disponible contra este jugador") turn_data["ofensive_action"] = 1 turn_data["opponent_attacked"] = '' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Debe seleccionar un oponente a ser atacado") turn_data["opponent_attacked"] = 5 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Ha seleccionado a un oponente inválido") turn_data["opponent_attacked"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "No puede atacarse a sí mismo") turn_data["opponent_attacked"] = 'a' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Ingrese información válida sobre el\ oponente a atacar") turn_data["opponent_attacked"] = 2 turn_data["attacked_x"] = '' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Seleccione una celda a ser atacada") turn_data["attacked_x"] = 10 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Eliga una fila entre los límites del tablero") turn_data["attacked_x"] = 1 turn_data["attacked_y"] = 10 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Eliga una columna entre los límites del tablero") turn_data["attacked_y"] = 'a' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Ingrese información válida sobre la celda a atacar") turn_data["ofensive_action"] = 0 turn_data["defensive_action"] = 1 turn_data["attacked_y"] = 1 turn_data["coord_x"] = '' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Seleccione una celda para la acción defensiva") turn_data["coord_x"] = 10 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Eliga una fila entre los límites del tablero") turn_data["coord_x"] = 1 turn_data["coord_y"] = 10 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Eliga una columna entre los\ límites del tablero") turn_data["coord_y"] = 'a' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Ingrese información válida sobre la\ celda para la acción defensiva") turn_data["coord_y"] = 1 turn_data["defensive_action"] = 0 turn_data["movement_direction"] = 6 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Seleccione una dirección de desplazamiento válida") turn_data["movement_direction"] = '' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Debe seleccionar una dirección de desplazamiento") turn_data["defensive_action"] = '' response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Debe seleccionar una acción defensiva válida") turn_data["defensive_action"] = 2 turn_data["ofensive_action"] = 0 turn_data["attacked_x"] = 1 turn_data["attacked_y"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["coord_x"] = 0 turn_data["coord_y"] = 0 turn_data["defensive_action"] = 0 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Selección inválida del bote para la acción defensiva") turn_data["coord_x"] = 1 turn_data["coord_y"] = 1 turn_data["defensive_action"] = 7 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Debe seleccionar una acción defensiva válida") turn_data["defensive_action"] = 2 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.client.post('/login/', { 'username': 'NoeX', 'password': 'tester'}) turn_data["defensive_action"] = 0 turn_data["movement_direction"] = 2 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "No puede desplazar ni sumergir un bote\ que se encuentra dañado") turn_data["coord_x"] = 5 turn_data["coord_y"] = 1 turn_data["defensive_action"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Sólo los submarinos pueden\ sumergirse") turn_data["opponent_attacked"] = 1 turn_data["coord_x"] = 7 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["defensive_action"] = 2 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["defensive_action"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["defensive_action"] = 2 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["defensive_action"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["defensive_action"] = 2 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) turn_data["defensive_action"] = 1 response = self.client.post( reverse('sucks:turn-action'), turn_data, follow=True) self.assertContains( response, "Ya ha realizado los 3 sumergimientos\ que tenía disponible para esta partida")
UTF-8
Python
false
false
2,013
2,327,872,302,120
67c5f099f579585ebde021521647d631002d26fd
67d450f8b14e278c48ca160cce8c1d92b4d20196
/cob_calibration_executive/src/cob_calibration_executive/generate_positions.py
b15097b4964cd24c52b6781c516fa45fb802358f
[]
no_license
ipa-jsf-jy/cob_calibration
https://github.com/ipa-jsf-jy/cob_calibration
b52cdbc8fef49931c27a68d5991c983a0ee26105
d0b93a4434fd82bb92d07b446022fc534c3c05b5
refs/heads/master
2020-12-25T00:55:33.597254
2013-07-05T08:50:25
2013-07-05T08:50:25
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python ################################################################# ##\file # # \note # Copyright (c) 2011-2012 \n # Fraunhofer Institute for Manufacturing Engineering # and Automation (IPA) \n\n # ################################################################# # # \note # Project name: care-o-bot # \note # ROS stack name: cob_calibration # \note # ROS package name: cob_calibration_executive # # \author # Author: Sebastian Haug, email:[email protected] # \author # Supervised by: Florian Weisshardt, email:[email protected] # # \date Date of creation: January 2012 # ################################################################# # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. \n # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. \n # - Neither the name of the Fraunhofer Institute for Manufacturing # Engineering and Automation (IPA) nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. \n # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License LGPL as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License LGPL for more details. # # You should have received a copy of the GNU Lesser General Public # License LGPL along with this program. # If not, see <http://www.gnu.org/licenses/>. # ################################################################# PKG = 'cob_calibration_executive' NODE = 'collect_robot_calibration_data_node' import roslib roslib.load_manifest(PKG) import rospy from kinematics_msgs.srv import GetPositionIK, GetPositionIKRequest from geometry_msgs.msg import PoseStamped from pr2_controllers_msgs.msg import JointTrajectoryControllerState import tf import numpy as np import yaml import os from cob_calibration_executive.torso_ik import TorsoIK from simple_script_server import simple_script_server #board = Checkerboard(self.pattern_size, self.square_size) #checkerboard_detector=CheckerboardDetector() #latest_image=Image() def getIk(arm_ik, (t, q), link, seed=None): ''' query arm_ik server for joint_position which put arm_7_link to pose @param arm_ik: arm_ik service proxy @param t: translation @param q: rotation as quaternion @param link: frame in which pose (t, q) is defined @param seed: initial joint positions for ik calculation, in None current joint pos of arm is used. @return: tuple of joint_positions or None if ik was not found ''' #print link # get joint names for arm from parameter server joint_names = None try: joint_names = rospy.get_param( "arm_controller/joint_names") # real hardware except KeyError: pass try: joint_names = rospy.get_param( "arm_controller/joints") # simulation except KeyError: pass if joint_names is None: print "Could not get arm joint_names from parameter server." return None msg = rospy.wait_for_message( "/arm_controller/state", JointTrajectoryControllerState) if seed is None: seed = msg.actual.positions # create and send ik request req = GetPositionIKRequest() req.timeout = rospy.Duration(1.0) req.ik_request.ik_link_name = link req.ik_request.ik_seed_state.joint_state.position = seed req.ik_request.ik_seed_state.joint_state.name = msg.joint_names req.ik_request.pose_stamped.header.frame_id = 'arm_0_link' req.ik_request.pose_stamped.pose.position.x = t[0] req.ik_request.pose_stamped.pose.position.y = t[1] req.ik_request.pose_stamped.pose.position.z = t[2] req.ik_request.pose_stamped.pose.orientation.x = q[0] req.ik_request.pose_stamped.pose.orientation.y = q[1] req.ik_request.pose_stamped.pose.orientation.z = q[2] req.ik_request.pose_stamped.pose.orientation.w = q[3] # try to get inverse kinecmatics for at least 3 times for i in range(3): resp = arm_ik(req) if resp.error_code.val == resp.error_code.SUCCESS: break # report sucess or return None on error if resp.error_code.val == resp.error_code.SUCCESS: result = list(resp.solution.joint_state.position) return result def calculate_ik(pose, arm_ik, seed=[0.7, -1.6, 4.4, 0.5, 1.2, 1.5, 3.0]): via_home = False for seed in [seed]: joint_positions = getIk(arm_ik, pose, "sdh_palm_link", seed) if joint_positions is not None: if seed is [0, 0, 0, 0, 0, 0, 0]: via_home = True #print 'Found IK @ %s' % joint_positions break else: print "--> ERROR no IK solution was found..." return joint_positions, via_home def get_cb_pose_head(listener, base_frame): return listener.lookupTransform(base_frame, '/chessboard_center', rospy.Time(0)) def get_cb_pose(listener, base_frame): return listener.lookupTransform(base_frame, '/chessboard_position_link', rospy.Time(0)) def main(): rospy.init_node(NODE) print "==> %s started " % NODE chessboard_pose = rospy.Publisher( '/cob_calibration/chessboard_pose', PoseStamped) print 'chessboard_pose publisher activated' listener = tf.TransformListener() rospy.sleep(1.5) arm_ik = rospy.ServiceProxy('/cob_ik_wrapper/arm/get_ik', GetPositionIK) ''' (t,r)=listener.lookupTransform('/arm_0_link','arm_7_link',rospy.Time(0)) a=calculate_ik((t,r), arm_ik) print a[0] ''' # init print "--> initializing sss" sss = simple_script_server() sss.init("base") sss.init("torso") sss.init("head") sss.recover("base") sss.recover("torso") sss.recover("head") print "--> setup care-o-bot for capture" sss.move("head", "back") calibration_seed = rospy.get_param("/script_server/arm/calibration") #sss.move("arm",[a[0]]) nextPose = PoseStamped() torso = TorsoIK() torso.set_camera_viewfield(rospy.get_param('~camera_view_angle')) # lissajous like figure for rotation cb_tip_offset = 3 cb_tip_positions = [(0, 0), (1, 0), (0, 1), (-1, 0), (0, -1)] quaternion = [] for cb_tip_p in cb_tip_positions: temp = list(cb_tip_p) temp.append(cb_tip_offset) vector_to = np.matrix(temp) vector_from = np.matrix([0, 0, 1]) a = vector_to + vector_from a = a / np.linalg.norm(a) #print a ''' #print '*'*20 print type(a) print a.T.shape print type(vector_from) print vector_from.T.shape ''' w = np.dot(vector_from, a.T) vector_from = vector_from.tolist()[0] a = a.tolist()[0] #print vector_from #print a x = vector_from[1] * a[2] - vector_from[2] * a[1] y = vector_from[2] * a[0] - vector_from[0] * a[2] z = vector_from[0] * a[1] - vector_from[1] * a[0] quaternion.append(tuple(tf.transformations.quaternion_multiply( [0, 0, 0, 1], [x, y, z, w]).reshape(1, 4).tolist()[0])) # define cuboid for positions # limits from base_link frame limits = {'x': (-0.5, -1.2), 'y': (-0.3, 0.3), 'z': (0.5, 1.0)} sample_density = {'x': 5, 'y': 5, 'z': 5} sample_positions = {'x': [], 'y': [], 'z': []} for key in limits.keys(): limits[key] = sorted(limits[key]) sample_positions[key].append(limits[key][0]) diff = limits[key][1] - limits[key][0] step = 1.0 * diff / (sample_density[key] - 1) # print key, ' ',diff,' ',step while sample_positions[key][-1] + step <= (limits[key][1] + 0.01): sample_positions[key].append(sample_positions[key][-1] + step) joint_states = [] for x in sample_positions['x']: for y in sample_positions['y']: for z in sample_positions['z']: for q in quaternion: nextPose.header.frame_id = '/base_link' nextPose.pose.position.x = x nextPose.pose.position.y = y nextPose.pose.position.z = z # (0,0,0,1) for cob3-6 nextPose.pose.orientation.x = q[0] nextPose.pose.orientation.y = q[1] nextPose.pose.orientation.z = q[2] nextPose.pose.orientation.w = q[3] chessboard_pose.publish(nextPose) rospy.sleep(0.2) (t, r) = get_cb_pose_head( listener, '/head_color_camera_l_link') angles = calculate_angles(t) if not torso.in_range(angles): continue #print t #(t, r) = get_cb_pose(listener, '/head_cam3d_link') #print t (t, r) = get_cb_pose(listener, '/arm_0_link') js = calculate_ik(( t, r), arm_ik, calibration_seed[0]) if js[0] is not None: print 'IK solution found' else: continue torso_state = [-a for a in torso.calculate_ik(angles)] for torso_js in [torso_state, [0] * len(torso_state)]: joint_states.append({'joint_position': js[ 0], 'torso_position': list(torso_js)}) print joint_states[-1] path = rospy.get_param('~output_path', None) directory = os.path.dirname(path) if path is not None: if not os.path.exists(directory): os.makedirs(directory) with open(path, 'w') as f: f.write('# autogenerated: Do not edit #\n') f.write(yaml.dump(joint_states)) else: print yaml.dump(joint_states) print '%s ik solutions found' % len(joint_states) def calculate_angles(t): ''' computes pan and tilt angles for camera like translations z-axis: optical axis y-axis: vertical x-axis: horizontal ''' angles = {} angles['p'] = np.arctan2(t[0], t[2]) angles['t'] = np.arctan2(t[1], t[2]) return angles if __name__ == '__main__': main() print "==> done exiting"
UTF-8
Python
false
false
2,013
8,967,891,753,733
598993998dd940061c58d8809b74cf5ecb1f3bf4
8fcb579e4d5f69df5fc018fc23b6b0bb7dc52244
/mytheme1/views.py
0696f200102ffbf7cb14184a47ccd9042f5dbf00
[]
no_license
toway/chuanda
https://github.com/toway/chuanda
32be35f4f127de31232cb1b6e285efc4e60e9e59
591c85ed5636986186f8bda98c11364be21cd249
refs/heads/master
2020-05-16T00:32:38.627239
2014-08-18T08:54:19
2014-08-18T08:54:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from calendar import month_name from django.http import Http404 from django.shortcuts import get_object_or_404 from mezzanine.blog.models import BlogPost, BlogCategory from mezzanine.blog.feeds import PostsRSS, PostsAtom from mezzanine.conf import settings from mezzanine.generic.models import Keyword from mezzanine.utils.views import render, paginate from mezzanine.utils.models import get_user_model from social_auth import __version__ as version User = get_user_model() '''def show_login(request): if request.user.is_authenticated(): return HttpResponseRedirect('done') else: return render_to_response('logins.html', {'version': version}, RequestContext(request))''' def blog_post_home(request, template="blog/blog_post_list.html"): settings.use_editable() templates = [] category = BlogCategory.objects.get(slug='ads') blog_posts = BlogPost.objects.published(for_user=request.user) ads_posts = None if category: blog_posts = blog_posts.exclude(categories=category) ads_posts = BlogPost.objects.published(for_user=request.user) ads_posts = ads_posts.filter(categories=category) blog_posts = paginate(blog_posts, request.GET.get("page", 1), settings.BLOG_POST_PER_PAGE, settings.MAX_PAGING_LINKS) context = {"blog_posts": blog_posts, "ads_posts":ads_posts, "year": None, "month": None, "tag": None, "category": None, "author": None} templates.append(template) return render(request, templates, context)
UTF-8
Python
false
false
2,014
1,537,598,328,129
8dfe44f049ddcec3981ac33b0c42564b0eff31f2
f11b387d32c41dc9c3cfbadee0784f59762e1e84
/Chapter 2/7_5Leaderboard_cyclopeptide.py
8523261297f381646e52caca41cc4804659e3162
[]
no_license
y12uc231/Bioinformatics-Algorithms-part-I-
https://github.com/y12uc231/Bioinformatics-Algorithms-part-I-
14934583bb5fa930f97669dc5e5171bc91b911c1
a6f2e3076aaf9109af6c2cbf35c2316692a96e64
refs/heads/master
2021-01-11T15:27:22.284413
2014-10-25T03:34:55
2014-10-25T03:34:55
80,347,591
0
1
null
true
2017-01-29T13:31:56
2017-01-29T13:31:56
2014-10-25T03:33:24
2014-10-25T03:34:55
7,220
0
0
0
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Nov 1 21:04:11 2013 ODE CHALLENGE: Implement CYCLOPEPTIDESEQUENCING (pseudocode reproduced below). Note: After the failure of the first brute-force algorithm we considered, you may be hesitant to implement this algorithm for fear that it will take a long time to run. The potential problem with CYCLOPEPTIDESEQUENCING is that it may generate incorrect k-mers at intermediate stages (i.e., k-mers that are not subpeptides of a correct solution). You may wish to wait to implement CYCLOPEPTIDESEQUENCING until after the next section, where we will analyze this algorithm. CYCLOPEPTIDESEQUENCING(Spectrum) List ← {0-peptide} while List is nonempty List ← Expand(List) for each peptide Peptide in List if Cyclospectrum(Peptide) = Spectrum output Peptide remove Peptide from List else if Peptide is not consistent with Spectrum remove Peptide from List Sample Input: 0 113 128 186 241 299 314 427 Sample Output: 186-128-113 186-113-128 128-186-113 128-113-186 113-186-128 113-128-186 """ from heapq import nlargest from mass import mass_of_AA from collections import Counter #import itertools #from product import gen_list #from masssubp import masssubpeptides temp = mass_of_AA () f = open('7_7.txt', 'r') Spectrum1 = f.read() f.close #print Spectrum1 Spectrum1 = Spectrum1.translate(None, '\n') Spectrum1 = Spectrum1.split() Spectrum = [] N=180 for num in Spectrum1: Spectrum.append(int(num)) #print Spectrum[-1], type(Spectrum[0]) #Spectrum = [0, 71, 113, 129, 147, 200, 218, 260, 313, 331, 347, 389, 460] #Spectrum = [0,71,113,115,129,131,137,147,156,163,186,227,244,244,250,257,262,284,292,317,319,381,388,390,391,397,399,407,413,430,448,501,512,519,528,528,544,554,563,567,576,634,638,641,643,657,691,705,707,710,714,772,781,785,794,804,820,820,829,836,847,900,918,935,941,949,951,957,958,960,967,1029,1031,1056,1064,1086,1091,1098,1104,1104,1121,1162,1185,1192,1201,1211,1217,1219,1233,1235,1277,1348] counter = dict(Counter(Spectrum)) seed = {} length = 0 for element in Spectrum: for AA in temp: if element == temp[AA]: seed[AA] = element length+=1 print seed, length def submass(peptide): intpeptide = [] #print peptide for AA in peptide: intpeptide.append(temp[AA]) l=1 #length of subpeptide #subpeptide={} subpeptide1 = [0] while l<len(intpeptide): i=0 #Sum=0 while i<=len(intpeptide)-l: #if str(intpeptide[i:i+l]) not in subpeptide: #subpeptide[str(intpeptide[i:i+l])] = sum(intpeptide[i:i+l]) #print subpeptide subpeptide1.append(sum(intpeptide[i:i+l]))#subpeptide[str(intpeptide[i:i+l])]) i+=1 while len(intpeptide)-l<i<len(intpeptide): #if str(intpeptide[i:]+intpeptide[:(l+i-len(peptide))]) not in subpeptide: #subpeptide[str(intpeptide[i:]+intpeptide[:(l+i-len(peptide))])] = sum(intpeptide[i:]+intpeptide[:(l+i-len(peptide))]) subpeptide1.append(sum(intpeptide[i:]+intpeptide[:(l+i-len(peptide))]))#subpeptide[str(intpeptide[i:]+intpeptide[:(l+i-len(peptide))])]) i+=1 l+=1 #if str(intpeptide) not in subpeptide: # subpeptide[str(intpeptide)] = sum(intpeptide) subpeptide1.append(sum(intpeptide))#subpeptide[str(intpeptide)]) #print subpeptide[str([156, 71])] return subpeptide1 def is_circle(s1, List): for item in List: if len(s1) == len(item) and s1 in item*2: return True return False def gen_list(a, b): # function to grow list list1 = {} for i in a: for j in b: #if not is_circle(i+j, list1): list1[i+j] = submass(i+j) #list1[j+i] = submass(i+j) #list1[j+i]=submass(j+i) return list1 def score(peptidemass): score1 = 0 counter1= dict(Counter(peptidemass)) for el in counter1: if el in counter: if counter[el] > counter1[el]: score1+=counter1[el] else: score1+=counter[el] return score1 def CYCLOPEPTIDESEQUENCING(Spectrum): List = seed #seed #i = 0 list1 = {} #print length, seed,len(seed), type(list1) #seed = str(seed) #print a, sum(a), len(a) while List: List = gen_list(List, seed) #score3 = [] score3 = [score(List[peptide]) for peptide in List ] #print score3#, submass(peptide) score4 = list(score3) score6 = nlargest(N, score3) #score3 = list (score3) j = 0 print len(score4), max(score4), min(score6)#, len(List), score6 for peptide in List.keys(): if score4[j] not in score6: del List[peptide] else: #print AA_to_mass(peptide) Mass = List[peptide] if Mass[-1] > Spectrum[-1]: del List[peptide] if Mass[-1] == Spectrum[-1]: list1[peptide] = List[peptide] del List[peptide] j+=1 #i+=1 #len(List), j #print List score5 = [score(list1[peptide]) for peptide in list1] for item in list1: if score(list1[item]) == 736: print AA_to_mass(item) print '\n' if score(list1[item]) == max(score5): print AA_to_mass(item),max(score5) def AA_to_mass(peptide): mass_p = '' for AA in peptide: mass_p+=str(temp[AA]) + '-' return mass_p CYCLOPEPTIDESEQUENCING(Spectrum) #print score(submass('156 71 113 114 131 156 113 101 129 128 128 114 128 103 97 131 131 113 131 113 128 115 128 113'))
UTF-8
Python
false
false
2,014
15,522,011,822,464
34697150d381cdf1204cd5a2f4b9f6a5239d13df
62af9e0f6447f900d2f42c5ab3678d436ff36da1
/graficador.py
5cbe3ed266e68e7a10dd6511fa3a8215d0ea775d
[]
no_license
lrios954/Tarea8
https://github.com/lrios954/Tarea8
8630027eea78bfd74642e1a11df705814346d849
e59640e3f4b2c614a2c2e6dc42461019781bcb6f
refs/heads/master
2021-01-15T18:08:47.280087
2013-04-30T01:33:59
2013-04-30T01:33:59
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import pylab import numpy time_interval = 3.0/10.0 n_points = 1000; for i in range(20): if(i < 11): data = numpy.loadtxt(open(str(i)+'.dat', 'r')) u = data[:, 0] x = linspace(0.0, 1.0, n_points) pylab.plot(x, u) pylab.xlabel('x') pylab.ylabel('u(x)') pylab.title('x-u_1') pylab.savefig('x-u_1.png') else: data = numpy.loadtxt(open(str(i)+'.dat', 'r')) u = data[:, 0] x = linspace(-5.0, 5.0, n_points) pylab.plot(x, u) pylab.xlabel('x') pylab.ylabel('u(x)') pylab.title('x-u_2') pylab.savefig('x-u_2.png')
UTF-8
Python
false
false
2,013
19,335,942,791,922
add4925f8d842af51241d60e3e24e56fcdb9684f
2d560970a5d14b59ab8e1ec0de3a953a56a4f6f2
/python_db/db_save.py
8a9f08dad8b5d3243afffdbe91a929f793e5cbd6
[]
no_license
the0s/DriverSim
https://github.com/the0s/DriverSim
c4d6715181c930f36dd3fd9bec00364c22b1c5f1
a1ff748ba21a7ac15c08501d980207b8b76ba02e
refs/heads/master
2016-09-06T16:58:34.884676
2012-06-19T10:19:33
2012-06-19T10:19:33
3,593,689
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from db_connect import * from mongoengine import * from db_model import * from tools import * class SaveDB: def __init__(self): self.tools = Tools() def _saveModel(self,data,model): data = data.split(';') check = self.tools._inTrack(data) model.add_data(data, check) def _addUser(self, email): user = User() user.email = email user.first_name = raw_input("Enter Firstname: ") user.last_name = raw_input("Enter Surname: ") user.gender = raw_input("Enter Gender (m/f): ") user.save() return user
UTF-8
Python
false
false
2,012
197,568,537,582
f464bf06d04eb51cafe8aef01f1e823923af6ce4
8a7aec7253dd1a0c3560cb71d354c1bcb1102c66
/tools/telemetry/telemetry/core/chrome/android_browser_backend.py
e1ea36cd9a959a09264ed6696e643f92bfe9fd4a
[ "BSD-3-Clause", "LGPL-2.1-only", "MPL-1.1", "MIT", "LicenseRef-scancode-unknown-license-reference", "GPL-2.0-only", "Apache-2.0", "LicenseRef-scancode-unknown" ]
non_permissive
loopCM/chromium
https://github.com/loopCM/chromium
78e60fe2945f40d5ab3a77a6fd36c667ca323b0f
8db1d931e4e1609d7d8f021ecb4fd2db0b92cb87
HEAD
2019-07-18T09:18:52.643862
2013-05-21T00:44:40
2013-05-21T00:44:40
10,188,303
7
5
null
null
null
null
null
null
null
null
null
null
null
null
null
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import os import subprocess import sys import tempfile import time from telemetry.core import exceptions from telemetry.core.chrome import adb_commands from telemetry.core.chrome import browser_backend class AndroidBrowserBackendSettings(object): def __init__(self, adb, activity, cmdline_file, package, pseudo_exec_name): self.adb = adb self.activity = activity self.cmdline_file = cmdline_file self.package = package self.pseudo_exec_name = pseudo_exec_name def GetDevtoolsRemotePort(self): raise NotImplementedError() def RemoveProfile(self): self.adb.RunShellCommand( 'su -c rm -r "%s"' % self._profile_dir) def PushProfile(self, _): logging.critical('Profiles cannot be overriden with current configuration') sys.exit(1) def SetDevToolsPreferences(self): pass @property def is_content_shell(self): return False @property def _profile_dir(self): raise NotImplementedError() class ChromeBackendSettings(AndroidBrowserBackendSettings): # Stores a default Preferences file, re-used to speed up "--page-repeat". _default_preferences_file = None def __init__(self, adb, package): super(ChromeBackendSettings, self).__init__( adb=adb, activity='com.google.android.apps.chrome.Main', cmdline_file='/data/local/chrome-command-line', package=package, pseudo_exec_name='chrome') def GetDevtoolsRemotePort(self): return 'localabstract:chrome_devtools_remote' def PushProfile(self, new_profile_dir): self.adb.Push(new_profile_dir, self._profile_dir) def SetDevToolsPreferences(self): # TODO(bulach): Once --enable-remote-debugging flag makes its way to the # oldest version under test (m27 goes to stable), remove this function. if (not self.adb.Adb().CanAccessProtectedFileContents()): return prefs_file = self._profile_dir + 'Default/Preferences' # Reuse the previous preferences if available, otherwise take the slow path # (launch chrome and wait for it to be created). if ChromeBackendSettings._default_preferences_file: self.adb.Adb().SetProtectedFileContents( prefs_file, ChromeBackendSettings._default_preferences_file) return # Make sure we can find the apps' prefs file if not self.adb.FileExistsOnDevice(prefs_file): # Start it up the first time so we can tweak the prefs. self.adb.StartActivity(self.package, self.activity, True, None, None) retries = 0 timeout = 3 time.sleep(timeout) while not self.adb.Adb().GetProtectedFileContents(prefs_file): time.sleep(timeout) retries += 1 timeout *= 2 if retries == 3: logging.critical('android_browser_backend: Could not find ' 'preferences file %s for %s', prefs_file, self.package) raise exceptions.BrowserGoneException('Missing preferences file.') self.adb.CloseApplication(self.package) preferences = json.loads(''.join( self.adb.Adb().GetProtectedFileContents(prefs_file))) changed = False if 'devtools' not in preferences: preferences['devtools'] = {} changed = True if not preferences['devtools'].get('remote_enabled'): preferences['devtools']['remote_enabled'] = True changed = True ChromeBackendSettings._default_preferences_file = json.dumps( preferences, indent=2) if changed: logging.warning('Manually enabled devtools protocol on %s' % self.package) self.adb.Adb().SetProtectedFileContents( prefs_file, ChromeBackendSettings._default_preferences_file) @property def _profile_dir(self): return '/data/data/%s/app_chrome/' % self.package class ContentShellBackendSettings(AndroidBrowserBackendSettings): def __init__(self, adb, package): super(ContentShellBackendSettings, self).__init__( adb=adb, activity='org.chromium.content_shell_apk.ContentShellActivity', cmdline_file='/data/local/tmp/content-shell-command-line', package=package, pseudo_exec_name='content_shell') def GetDevtoolsRemotePort(self): return 'localabstract:content_shell_devtools_remote' @property def is_content_shell(self): return True @property def _profile_dir(self): return '/data/data/%s/app_content_shell/' % self.package class ChromiumTestShellBackendSettings(AndroidBrowserBackendSettings): def __init__(self, adb, package): super(ChromiumTestShellBackendSettings, self).__init__( adb=adb, activity='org.chromium.chrome.testshell.ChromiumTestShellActivity', cmdline_file='/data/local/tmp/chromium-testshell-command-line', package=package, pseudo_exec_name='chromium_testshell') def GetDevtoolsRemotePort(self): return 'localabstract:chromium_testshell_devtools_remote' @property def is_content_shell(self): return True @property def _profile_dir(self): return '/data/data/%s/app_chromiumtestshell/' % self.package class WebviewBackendSettings(AndroidBrowserBackendSettings): def __init__(self, adb, package): super(WebviewBackendSettings, self).__init__( adb=adb, activity='com.android.webview.chromium.shell.TelemetryActivity', cmdline_file='/data/local/tmp/webview-command-line', package=package, pseudo_exec_name='webview') def GetDevtoolsRemotePort(self): # The DevTools socket name for WebView depends on the activity PID's. retries = 0 timeout = 1 pid = None while True: pids = self.adb.ExtractPid(self.package) if (len(pids) > 0): pid = pids[-1] break time.sleep(timeout) retries += 1 timeout *= 2 if retries == 4: logging.critical('android_browser_backend: Timeout while waiting for ' 'activity %s:%s to come up', self.package, self.activity) raise exceptions.BrowserGoneException('Timeout waiting for PID.') return 'localabstract:webview_devtools_remote_%s' % str(pid) @property def _profile_dir(self): return '/data/data/%s/app_webview/' % self.package class AndroidBrowserBackend(browser_backend.BrowserBackend): """The backend for controlling a browser instance running on Android. """ def __init__(self, options, backend_settings): super(AndroidBrowserBackend, self).__init__( is_content_shell=backend_settings.is_content_shell, supports_extensions=False, options=options) if len(options.extensions_to_load) > 0: raise browser_backend.ExtensionsNotSupportedException( 'Android browser does not support extensions.') # Initialize fields so that an explosion during init doesn't break in Close. self._options = options self._adb = backend_settings.adb self._backend_settings = backend_settings self._saved_cmdline = None if not options.keep_test_server_ports: adb_commands.ResetTestServerPortAllocation() self._port = adb_commands.AllocateTestServerPort() # Kill old browser. self._adb.CloseApplication(self._backend_settings.package) self._adb.KillAll('device_forwarder') if self._adb.Adb().CanAccessProtectedFileContents(): if not options.dont_override_profile: self._backend_settings.RemoveProfile() if options.profile_dir: self._backend_settings.PushProfile(options.profile_dir) # Set up the command line. self._saved_cmdline = ''.join(self._adb.Adb().GetProtectedFileContents( self._backend_settings.cmdline_file) or []) args = [backend_settings.pseudo_exec_name] args.extend(self.GetBrowserStartupArgs()) def QuoteIfNeeded(arg): # Escape 'key=valueA valueB' to 'key="valueA valueB"' # Already quoted values, or values without space are left untouched. # This is required so CommandLine.java can parse valueB correctly rather # than as a separate switch. params = arg.split('=') if len(params) != 2: return arg key, values = params if ' ' not in values: return arg if values[0] in '"\'' and values[-1] == values[0]: return arg return '%s="%s"' % (key, values) args = map(QuoteIfNeeded, args) self._adb.Adb().SetProtectedFileContents( self._backend_settings.cmdline_file, ' '.join(args)) backend_settings.SetDevToolsPreferences() # Start it up with a fresh log. self._adb.RunShellCommand('logcat -c') self._adb.StartActivity(self._backend_settings.package, self._backend_settings.activity, True, None, 'chrome://newtab/') self._adb.Forward('tcp:%d' % self._port, backend_settings.GetDevtoolsRemotePort()) try: self._WaitForBrowserToComeUp() self._PostBrowserStartupInitialization() except exceptions.BrowserGoneException: logging.critical('Failed to connect to browser.') if not self._adb.IsRootEnabled(): logging.critical( 'Ensure web debugging is enabled in Chrome at ' '"Settings > Developer tools > Enable USB Web debugging".') sys.exit(1) except: import traceback traceback.print_exc() self.Close() raise def GetBrowserStartupArgs(self): args = super(AndroidBrowserBackend, self).GetBrowserStartupArgs() args.append('--enable-remote-debugging') args.append('--disable-fre') return args @property def pid(self): return int(self._adb.ExtractPid(self._backend_settings.package)[0]) def __del__(self): self.Close() def Close(self): super(AndroidBrowserBackend, self).Close() if self._saved_cmdline: self._adb.Adb().SetProtectedFileContents( self._backend_settings.cmdline_file, self._saved_cmdline) else: self._adb.RunShellCommand('rm %s' % self._backend_settings.cmdline_file) self._adb.CloseApplication(self._backend_settings.package) def IsBrowserRunning(self): pids = self._adb.ExtractPid(self._backend_settings.package) return len(pids) != 0 def GetRemotePort(self, local_port): return local_port def GetStandardOutput(self): # If we can find symbols and there is a stack, output the symbolized stack. symbol_paths = [ os.path.join(adb_commands.GetOutDirectory(), 'Release', 'lib.target'), os.path.join(adb_commands.GetOutDirectory(), 'Debug', 'lib.target')] for symbol_path in symbol_paths: if not os.path.isdir(symbol_path): continue with tempfile.NamedTemporaryFile() as f: lines = self._adb.RunShellCommand('logcat -d') for line in lines: f.write(line + '\n') symbolized_stack = None try: logging.info('Symbolizing stack...') symbolized_stack = subprocess.Popen([ 'ndk-stack', '-sym', symbol_path, '-dump', f.name], stdout=subprocess.PIPE).communicate()[0] except Exception: pass if symbolized_stack: return symbolized_stack # Otherwise, just return the last 100 lines of logcat. return '\n'.join(self._adb.RunShellCommand('logcat -d -t 100')) def CreateForwarder(self, *port_pairs): return adb_commands.Forwarder(self._adb, *port_pairs)
UTF-8
Python
false
false
2,013
2,972,117,379,126
2d9cc6a410fb758b38293a7e87f7d8a91711531a
5405d5435f0d25b3041428d84136e16661ea4281
/AppliGreenWave/runner.py
a9e155b3077ed8c3d4733481cbe8d625c801f8bd
[]
no_license
KiBorg0/Social-Vehicular-Network
https://github.com/KiBorg0/Social-Vehicular-Network
3e84130060ec313842ee1d3196489a50b90a868d
1d98912d4594db94c2a0db507add903c7bf821dd
refs/heads/master
2017-03-29T01:13:12.227602
2013-10-21T09:44:10
2013-10-21T09:44:10
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python import os,subprocess,sys,shutil from sumolib import checkBinary import traci, libxml2, random from libxml2 import xmlAttr from CreateListe import number, createListe, coordinateJunction, edgeInJunction, edgeOutJunction from ProgramTrafficLights import initialisation, presenceVehEmergency, vehInJunction, changeTrafficLightState, vehNearVehEmergency1, vehNearVehEmergency2, updateTrafficlights PORT = 8813 sumoBinary = checkBinary('sumo-gui') sumoConfig = "data/map.sumocfg" if not traci.isEmbedded(): #run simulation sumoProcess = subprocess.Popen(" %s -c %s" % (sumoBinary,sumoConfig),shell=True, stdout=sys.stdout) traci.init(PORT) #Creation des Listes de donnees utiles de la carte CoordXJunction=createListe("data/map.net.xml")[0] CoordYJunction=createListe("data/map.net.xml")[1] TypeJunction=createListe("data/map.net.xml")[2] IdJunction=createListe("data/map.net.xml")[3] IdEdge=createListe("data/map.net.xml")[4] EdgeInJunction=createListe("data/map.net.xml")[5] EdgeFromJunction=createListe("data/map.net.xml")[6] #Nombre de vehicule dans la simulation print 'le nombre de vehicule est de : ' + str(number("data/map.rou.xml")) #construction de la liste des coordonnees des intersections CoordJunctionList=coordinateJunction(CoordXJunction,CoordYJunction) #construction de la liste des liens entrant sur chaque intersection EdgeInJunction=edgeInJunction(EdgeInJunction) #construction des routes sortant pour chaque intersection EdgeOutJunction=edgeOutJunction(IdJunction,EdgeFromJunction,IdEdge) #pas de temps step=0 #premiere presence du vehicule d'urgence sur le reseau check='' #fin de la presence du vehicule d'urgence sur le reseau finish='' #initialisation des listes : ProgramTrafficlights=initialisation(CoordJunctionList) ListeVehIDJunc=initialisation(CoordJunctionList) #permet de savoir si un feu a ete change apres passage d'un vehicule d'urgence Initialize=initialisation(CoordJunctionList) #permet de garder le feu modifie le temps du passage du vehicule d'urgence Done=initialisation(CoordJunctionList) #debut de la simulation while step == 0 or traci.simulation.getMinExpectedNumber() > 0: traci.simulationStep() k=0 if finish=='ok': step=step+1 if finish!='ok': #enregistrement des parametres du vehicule d'urgence ParameterVehEmergency=presenceVehEmergency(check) coordxVehEmergency=ParameterVehEmergency[1] coordyVehEmergency=ParameterVehEmergency[2] RoutevehEmergency=ParameterVehEmergency[4] LastRoadVehEmergency=ParameterVehEmergency[5] Last2RoadVehEmergency=ParameterVehEmergency[6] vehIdEmergency=ParameterVehEmergency[3] check=ParameterVehEmergency[0] #fin du parcours du vehicule d'urgence if check=='ok' and (traci.vehicle.getRoadID(vehIdEmergency)==LastRoadVehEmergency or traci.vehicle.getRoadID(vehIdEmergency)==Last2RoadVehEmergency): finish='ok' #pour chaque intersection for CoordJunction in CoordJunctionList: index=CoordJunctionList.index(CoordJunction) coordXJunction=float(CoordJunction[0]) coordYJunction=float(CoordJunction[1]) #le vehicule d'urgence est proche de l'intersection (30 metres de rayon) if (coordxVehEmergency>=coordXJunction-30) and (coordxVehEmergency<=coordXJunction+30) and (coordyVehEmergency>=coordYJunction-30) and (coordyVehEmergency<=coordYJunction+30) and (traci.vehicle.getRoadID(vehIdEmergency) in EdgeInJunction[index]): ListeVehIDJunc=vehInJunction(index,ListeVehIDJunc,coordXJunction,coordYJunction,EdgeInJunction,IdJunction) #changement du feu if TypeJunction[index]=='traffic_light' and Done[index]!='1': ChangeTrafficLightState=changeTrafficLightState(vehIdEmergency,IdJunction,index,Done,Initialize,ProgramTrafficlights) Done=ChangeTrafficLightState[0] Initialize=ChangeTrafficLightState[1] IdTrafficLights=ChangeTrafficLightState[2] ProgramTrafficlights=ChangeTrafficLightState[3] #changement du comportement des vehicules vehNearVehEmergency1(ListeVehIDJunc,index,vehIdEmergency) else: #ralentissement des vehicules pour les intersections sans feux vehNearVehEmergency2(ListeVehIDJunc,index,vehIdEmergency) #remise a jour des feux if TypeJunction[index]=='traffic_light'and Initialize[index]=='1': UpdateTrafficlights=updateTrafficlights(vehIdEmergency, EdgeInJunction,EdgeOutJunction,IdJunction,index,IdTrafficLights,ProgramTrafficlights,Initialize) ProgramTrafficlights=UpdateTrafficlights[0] Initialize=UpdateTrafficlights[1] step=step+1 traci.close() sys.stdout.flush()
UTF-8
Python
false
false
2,013
7,997,229,131,706
a6a2b6a637f295b57850ae7ca295a8f347cb7eef
06e44aff33cc178653c1b5fd08e4b50a286e70f1
/loraefesta/loraweb/admin.py
eab3b4700a9177f65605b2d05f6628a4dd5dbde7
[]
no_license
maurodoglio/loraefesta
https://github.com/maurodoglio/loraefesta
5bbce14582bdb2c5174527d33ef63505d744d464
143d70fb51a9bc3802623cc715777d056b410fb1
refs/heads/master
2020-04-14T09:34:25.799156
2013-11-02T13:04:14
2013-11-02T13:04:14
14,065,703
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from models import Collection, Yarn, News from modeltranslation.admin import TranslationAdmin class NewsAdmin(TranslationAdmin): pass admin.site.register(News, NewsAdmin) class CollectionAdmin(TranslationAdmin): pass admin.site.register(Collection, CollectionAdmin) class YarnAdmin(TranslationAdmin): pass admin.site.register(Yarn, YarnAdmin)
UTF-8
Python
false
false
2,013
3,788,161,201,795
c39468f43d753beb8c70ef18d5a8b9b91ad8822b
7c1a46a457dfc5b16bc31f04b2033a44aa5686f6
/SAMethods/NaiveBayes.py
53de1e29514c87148dde59b82fc41ff66204ac22
[]
no_license
muratovv/Sentiment_Analysis
https://github.com/muratovv/Sentiment_Analysis
a8bd8ad1d7a1bccd828db9059e535f8919a9a18e
53944f582ac26e5c8b1c12c916887bae7a125547
refs/heads/master
2016-09-15T20:31:05.809133
2014-12-22T09:13:55
2014-12-22T09:13:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 __author__ = 'muratov' from collections import defaultdict from PrepareData.Normalize import normalizeText from SAMethods.Abstract import AbstractMethod class NaiveBayes(AbstractMethod): def __init__(self): self.sampleDict = {1: defaultdict(int), 2: defaultdict(int)} # встречаемость слова в классе self.vocabulary = 0 # количество уникальных слов self.classWords = {1: 0, 2: 0} # количество слов в классе. self.aprioriProbability = {1: 0, 2: 0} # априорная вероятность def setTrainingSamples(self, samples): testQuantity = 0 for sample in samples: text = normalizeText(sample[0]).split() for word in text: self.sampleDict[sample[1]][word] += 1 self.classWords[sample[1]] += 1 testQuantity += 1 self.aprioriProbability[sample[1]] += 1 self.vocabulary = len(set(self.sampleDict[1]) | set(self.sampleDict[2])) self.aprioriProbability[1] /= testQuantity self.aprioriProbability[2] /= testQuantity def predict(self, text): probs = {1: self.aprioriProbability[1], 2: self.aprioriProbability[2]} # вероятности нахождения в классах key dDict = defaultdict(int) # bag of words for word in normalizeText(text).split(): dDict[word] += 1 for unicWord in dDict.keys(): probs[1] *= self.evaluateProbabilityInClass(unicWord, 1) ** dDict[unicWord] probs[2] *= self.evaluateProbabilityInClass(unicWord, 2) ** dDict[unicWord] if probs[1] > probs[2]: return 1 else: return 2 def evaluateProbabilityInClass(self, word, cls): return (1 + self.sampleDict[cls][word]) / (self.classWords[cls] + self.vocabulary) if __name__ == '__main__': nb = NaiveBayes() nb.setTrainingSamples([("китай пекин китай", 1), ("китай китай шанхай", 1), ("китай макао", 1), ("токио япония китай", 2), ]) print(nb.predict("китай китай китай токио япония"))
UTF-8
Python
false
false
2,014
10,290,741,647,358
986f482d71fda9ed1c251578391b0f94bd989c9f
f1596535f2e9f0f9e710d79d93830a03c381592d
/computer-security/elgamal-attack/adversary.py
d1ea8f65e8fa2d1d578cc59b537e00760d95c86d
[ "MPL-2.0" ]
non_permissive
mmgmoh/homework
https://github.com/mmgmoh/homework
34802f6da5049fdcf7fa47fcc69e3f7332081ba4
cbaa0a35ed41c97ffe8ba4a1cb133546a6522a96
refs/heads/master
2021-01-22T15:35:22.234814
2013-09-26T13:35:18
2013-09-26T13:36:37
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python # IDEA: start a new process (which will play the role of the challenger # for the CPA game), and communicate with it via stdin and stdout. import sys from subprocess import * from Crypto import Random from Crypto.Random import random from Crypto.PublicKey import ElGamal import cPickle as pcl import numbthy import random p1 = Popen(["./chall.py"], stdin=PIPE, stdout=PIPE, close_fds=True) # start the conversation with a "hello" message hellomesg = "hello" pcl.dump(hellomesg, p1.stdin) p1.stdin.flush() # now read the key. key = pcl.load(p1.stdout) # NOTE: the public key consists of 3 parts: # g is the generator # y is g^x (x is secret) # p is the prime. # TODO: you need to find two messages that you can distinguish via # their ciphertext. They have to be of equal length. Note that # you can just use long integers instead of strings (recommended). m1 = '' # a message with 1 m2 = '' # a message with -1 print key.g start = random.randint(1, key.p) p_minus = (key.p-1)/2 # ensure I am not cheating while not m1: ans = numbthy.powmod(start, p_minus, key.p) if ans == 1: m1 = start print 'm1 got something ', m1 start = random.randint(1, key.p) # Thanks to Linda reminding wes wrote this in his note! m2 = key.g # send the pair of messages: #mesgList = ["message0", "message1"] mesgList = [m1, m2] print 'm1 is: ', m1 print 'm2 is: ', m2 pcl.dump(mesgList, p1.stdin) p1.stdin.flush() # now get the challenge ciphertext. ct = pcl.load(p1.stdout) # TODO: you should be able to guess the right message with probability 1 lv_gr = numbthy.powmod(ct[0], (key.p-1)/2, key.p) lv_mgak = numbthy.powmod(ct[1], (key.p-1)/2, key.p) if lv_gr/lv_mgak == 1: guess = 0 else: guess = 1 #guess = 1 # now report our guess pcl.dump(guess, p1.stdin) p1.stdin.flush() p1.stdin.close() sys.exit()
UTF-8
Python
false
false
2,013
14,302,241,101,880
bad2c66e35ed777dc38590f7f45ac3d6484e9f39
b8f0f7404e65ae3bdc2f783d9d4594a0eeeb469a
/summarizer.py
88839e71278bc0034af66148dfe8a8311a7ab7b2
[]
no_license
screename/web-page-summarizer
https://github.com/screename/web-page-summarizer
5ba25aa93995d864a871245c25a437b2f335a69d
e5d20c070f94f7b0df4413449eb856705bf5f9f3
refs/heads/master
2018-09-21T20:51:49.911572
2013-09-11T03:39:14
2013-09-11T03:39:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import division import re from goose import Goose import nltk.data from nltk.corpus import stopwords from nltk.stem.porter import * from nltk.tokenize import word_tokenize import sys # summarizer class - adopted from - http://thetokenizer.com/, https://gist.github.com/shlomibabluki/5473521 # the idea behind the class is as following # 1. split the text into sentences # 2. create a clean version of the text - remove stop words, and stem the words # so when we compare the words we compear between similar words # even if originally they were written in different forms # 3. create sentence dictionary where each word in each sentence get a rank # based on the number of times it appear in the whole text, the idea is if a word # appear many times in the text it must be an important word (thats why we # remove the stop words at step 2 so words like 'The', 'a', 'and', 'or' would not affect the result # 4. run a loop on each paragraph and see if it has sentences with high rank, # if so - consider them apart of the summarized text class summarizer( object ): # split a paragraph into sentences. # you can use the following replace and split functions or the nltk sentence tokenizer # content = content.replace("\n", ". ") # return content.split(". ") def splitToSentences(self, content): tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') return tokenizer.tokenize(content) # split text into paragraphs # need to find out if there is a better way to do this with nltk def splitToParagraphs(self, content): return content.split("\n\n") # get the intersection between two sentences # using python native intersection function # to find if an item of a set exist in another set def getIntersection(self, sent1, sent2): # if you are passing an unsplited sentece this could be done in this form # s1 = set( sent1.split(" ") ) # s2 = set( sent2.split(" ") ) s1 = set( sent1 ) s2 = set( sent2 ) # if the senteces are empty the rank of this will be 0 if ( len(s1) + len(s2) ) == 0: return 0 # return the length of the intersection divided by half the length of the two original sentences return len( s1.intersection(s2) ) / ( (len(s1) + len(s2) ) / 2 ) # purify a sentence, we use this to create a key for an object form a sentence, # therefor it could not contain spaces or panctuation def purifySentence(self, sentence): sentence = re.sub( r'\W+', '', sentence) return sentence # stem a sentence # loop through each word and find the stem of it using the # nltk PorterStemmer. you can use a different stemmer if you like # like the LancasterStemmer or RegexpStemmer('ing') to remove specific # attributes from a word def stemSentence(self, sentence, stemmer): words = [] for word in sentence: w = stemmer.stem(word) words.append( w ) return words # steam and remove any stop words form a sentence # this will remove words such as 'The', 'and', 'or' that has # no value in regard to the value of the sentence def steamAndRemoveStopWords(self, sentence, stemmer ): s = word_tokenize(sentence) s1 = [w for w in s if not w in stopwords.words('english')] s2 = self.stemSentence( s1, stemmer ) return s2 # this function is the heart of the summerize # here we'll give each sentence rank based on # how many words it has in similarity to other sentences def rankSentences(self, content ): # create a list of all sentences paragraphs = self.splitToParagraphs(content) sentences = [] for p in paragraphs: s = self.splitToSentences(p) for x in s: sentences.append(x) n = len( sentences ) # stem and remove stopwords stemmer = PorterStemmer() clean_sentences = [ self.steamAndRemoveStopWords(x, stemmer) for x in sentences ] # create an empty values set and fill it with the # intersection value with all the other sentences values = [ [0 for x in xrange(n)] for x in xrange(n) ] for i in range(0, n): for j in range(0, n): values[i][j] = self.getIntersection( clean_sentences[i], clean_sentences[j] ) # create sentece dictionary set and fill it with the accumulated value of each sentence sentence_dictionary = {} for i in range(0, n): score = 0 for j in range(0, n): if j == i: continue score += values[i][j] sentence_dictionary[ self.purifySentence( sentences[i]) ] = score return sentence_dictionary # get the best sentence from each paragraph def getBestSentence(self, paragraph, sentence_dictionary): sentences = self.splitToSentences( paragraph ) # ignore sentences that are too short if len( sentences ) < 2: return "" best_sentence = "" max_score = 0 # loop through each sentence and find it its value # in the sentence dictionary is the highest in the paragraph for s in sentences: striped = self.purifySentence(s) if striped: if sentence_dictionary[striped] > max_score: max_score = sentence_dictionary[striped] best_sentence = s return best_sentence # summarize the text def summarize(self, content, sentence_dictionary, title): paragraphs = self.splitToParagraphs( content ) summary = [] # this is actually not recommanded as the title many times is not relevant for the topic # or written in a provocative way, many times a title will be the opposite of the subject if title: summary.append( title.strip() ) summary.append("") for p in paragraphs: sentence = self.getBestSentence(p, sentence_dictionary) if sentence: summary.append( sentence ) return ("\n").join(summary) #---------------------------------end of summarizer class ---------------------------------# # using the Goose library to extract the content of a url # returing the title of it and the content of the page. # The great thing about using Goose it that it already take # care of everything related to striping tags and such def get_content( url ): g = Goose() article = g.extract( url=url ) title = article.title content = article.cleaned_text return title, content # Summarize the content def summarize( content, title, summarizer, max_len ): sentence_dictionary = summarizer.rankSentences( content ) summary = summarizer.summarize(content, sentence_dictionary, title) # if the content is still too long, lets summarize it again if len( summary ) > max_len: return summarize( summary, False, summarizer, max_len ) else: return summary # grab the content of a url and return a summarized version of it def URLSummarizer( url, max_len ): title, content = get_content( url ) sm = summarizer() summary = summarize( content, title, sm, max_len ) # # Print the ratio between the summary length and the original length # print "" # print "Original Length %s" % (len(title) + len(content)) # print "Summary Length %s" % len(summary) # print "Summary Ratio: %s" % (100 - (100 * (len(summary) / (len(title) + len(content))))) return summary if __name__ == '__main__': if len( sys.argv ) > 1: url = sys.argv[1] max_len = sys.argv[2] else: url = "http://en.wikipedia.org/wiki/Flatiron_Building" max_len = 160 summary = URLSummarizer( url, max_len ) print summary
UTF-8
Python
false
false
2,013
10,505,490,021,816
4f4e6af76ffeb762fb2fbbc4303a1c7fe132da7b
06ea837b960f93db39c4a325a437365a308b643e
/wiki-new/views.py
852aabc6d1cf5007b84f59917dc037ac31e52c0a
[]
no_license
vladigoal/wiki-new
https://github.com/vladigoal/wiki-new
abfccb7a6c6e144ee47f620b0d3db497bf994ce9
52b1cae9d67efbd0392db31575871502fe8957fe
refs/heads/master
2020-12-24T16:06:16.117777
2013-10-19T10:31:57
2013-10-19T10:31:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# -*- coding: utf-8 -*- from django.contrib.auth.forms import AuthenticationForm from django.contrib.auth import authenticate, login, logout from django.shortcuts import render_to_response from django.template import RequestContext from django.http import HttpResponseRedirect from django.shortcuts import redirect def auth_check(request): msg = "" try: username = request.POST['username'] except: username = None try: password = request.POST['password'] except: password = None user = authenticate(username=username, password=password) if user is not None: if user.is_active: login(request, user) response = render_to_response('login.html', context_instance=RequestContext(request)) response = redirect('/index.html') response.set_cookie("username", username) return response #else: # Return a 'disabled account' error message elif request.POST: msg = "Не верный логин/пароль" response = render_to_response('login.html', {'form': AuthenticationForm, "msg": msg}, context_instance=RequestContext(request)) if request.COOKIES.get('sessionid'): response = redirect('/index.html') return response
UTF-8
Python
false
false
2,013
4,818,953,343,397
fc2d5ff46a1260d23c48d6d63701c27ea84652d1
1ed88f39ee47240c54f3a17fbbb1c8d790c2b180
/crowdcomputer/init_db.py
e4bab6c11b0520100f3322bb11ac28abb5ebd0bb
[ "Apache-2.0" ]
permissive
Crowdcomputer/CC
https://github.com/Crowdcomputer/CC
fa38e8a6de07eb2a94a6d44ee23deaac7c6314be
957d1dc7f7e663ca3018ca98539e6066025fb8e4
refs/heads/master
2020-05-02T20:56:16.091773
2014-12-16T16:35:02
2014-12-16T16:35:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on Nov 26, 2012 @author: stefanotranquillini ''' from django.contrib.auth.models import User, Group from rest_framework.authtoken.models import Token from general.models import Application from uuid import uuid4 def init(): initAppsAndCC() def initAppsAndCC(): try: user, c = User.objects.get_or_create(username='crowdcomputer',email="[email protected]",password="this.is.spam") user.save() print "%s %s"%(user.username,c) app, c = Application.objects.get_or_create(name="crowdcomputer",url="http://www.crowdcomputer.org",user=user) if c: app.token=str(uuid4()).replace('-','') app.save() print "%s %s" %(app.name, app.token) app, c = Application.objects.get_or_create(name="bpmn",url="http://www.crowdcomputer.org",user=user) if c: app.token=str(uuid4()).replace('-','') print "%s %s" %(app.name, app.token) app.save() bpmn, c = Group.objects.get_or_create(name='bpmn') bpmn.save() except Exception, e: print e print 'exception' def createAdmin(username,password,email): try: admin, c = User.objects.get_or_create(email=email) if c: admin.set_password(password) admin.username=username admin.is_superuser = True admin.is_staff = True admin.save() print 'creato' else: admin.set_password(password) admin.save() print 'aggiornato' except Exception: print 'exception'
UTF-8
Python
false
false
2,014
4,458,176,066,234
b338b426b0332c5ca2661c1cecf9e424153ecbfa
e0407acfd27586a797758364eeab6172cb6c0524
/openstates/ky/__init__.py
e7a1d01364b324fb3f8484276057e014241b4f1d
[ "GPL-3.0-only" ]
non_permissive
tomschlick/openstates
https://github.com/tomschlick/openstates
bf619784ee0bc70b231e7e774e2adb0af7e0ce94
aecfe315ba3c0cefd042752f702d9aa73153ad81
refs/heads/master
2021-01-16T00:56:54.631046
2011-12-27T16:36:22
2011-12-27T16:36:22
3,058,625
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
metadata = dict( name='Kentucky', abbreviation='ky', legislature_name='Kentucky General Assembly', upper_chamber_name='Senate', lower_chamber_name='House of Representatives', upper_chamber_title='Senator', lower_chamber_title='Representative', upper_chamber_term=4, lower_chamber_term=2, terms=[ dict(name='2009', start_year=2009, end_year=2009, sessions=[ '2009 Regular Session', '2009 Special Session' ]), dict(name='2010', start_year=2010, end_year=2010, sessions=[ '2010 Regular Session', '2010 Extraordinary Session' ]), dict(name='2011', start_year=2011, end_year=2011, sessions=[ '2011 Regular Session' ]) ], session_details={ '2009 Regular Session': {'type': 'primary', 'display_name': '2009 Regular Session'}, '2009 Special Session': {'type': 'special', 'display_name': '2009 Special Session'}, '2010 Regular Session': {'type': 'primary', 'display_name': '2010 Regular Session'}, '2010 Extraordinary Session': { 'type': 'special', 'display_name': '2010 Extraordinary Session', }, '2011 Regular Session': {'type': 'primary', 'display_name': '2011 Regular Session'} }, feature_flags=['subjects', 'events'], )
UTF-8
Python
false
false
2,011
10,316,511,458,317
7c1fb7cddb5485111fd5e75c3ef375cef27099de
1404c83fa96d41cc9fc1edb091f3ac5e14bf9bc5
/src/tools/SquareFinder.py
933152ac2ad0e707bdb2e9e19603a3a5f07091ed
[]
no_license
CyberNAO/naovita
https://github.com/CyberNAO/naovita
75fc4b2075476b53232436fc5f3c16c03aa4ba94
3a84c1fb860eee449932a5b5f26716a1385059fd
refs/heads/master
2021-01-22T23:15:49.504892
2011-06-17T22:29:22
2011-06-17T22:29:22
1,860,441
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on 9 juin 2011 @author: Bruno Barbieri ''' from tools import Imagerie, Deplacements import Image import ImageFilter import ImageChops import operator class SquareFinder: def __init__(self, connection): ''' Constructor ''' #self.__camProxy = connection.getProxy('ALVideoDevice') self.__imagerie = Imagerie.Imagerie(connection) self.__walk = Deplacements.Deplacements(connection) def recon_old(self, firstTime): self.__walk.poseInit() if firstTime: angle = self.orient() if angle != None : return angle self.__walk.bendHead() angle = self.orient() if angle != None : self.__walk.raiseHead() return angle self.__walk.raiseHead() self.__walk.kneel() angle = self.orient() self.__walk.standUp() return angle #penser a tourner brusquement si y'a rien devant. def MYrecon(self, color): im = self.__imagerie.getImage() width, height = im.size squares, dists = self.findSquares(im, color) dir = None limit = width/8 if len(dists) != 0: if abs(dists[0]) < limit: #print("IT'S JUST IN FRONT OF US !!!") dir = 0 elif dists[0] < 0: #print("TO THE LEFT !!!!") dir = 1 else: #print("TO THE RIGHT !!!!") dir = -1 return {color : (dir, len(squares)) } #self.cropSquares(squares, im) def recon(self, color): im = self.__imagerie.getImage() width, height = im.size squares, dists = self.findSquares(im, color) dir = None limit = width/8 if len(dists) != 0: if abs(dists[0]) < limit: #print("IT'S JUST IN FRONT OF US !!!") dir = 0 elif dists[0] < 0: #print("TO THE LEFT !!!!") dir = 1 else: #print("TO THE RIGHT !!!!") dir = -1 range = None if dir != None and color == "red" : range = self.getRange(len(squares[0])) return {color : (dir, len(squares), range) } #self.cropSquares(squares, im) def getRange(self, area): if area < 250 : return 6.0 elif area >= 250 and area < 900 : return 3.0 elif area >= 900 : return 2.0 def orient(self, color): im = self.__imagerie.getImage() width, height = im.size squares, dists = self.findSquares(im, color) dir = None limit = width/8 if len(dists) != 0: if abs(dists[0]) < limit: #print("IT'S JUST IN FRONT OF US !!!") dir = 0 elif dists[0] < 0: #print("TO THE LEFT !!!!") dir = 1 else: #print("TO THE RIGHT !!!!") dir = -1 return {color : dir} #self.cropSquares(squares, im) def cropSquares(self, squares, im): #crop the squares num = 0 print("--------------------------------cropping " + str(len(squares)) + " square regions") for square in squares: imgSquare = self.regionCropper(square, im) imgSquare.save("squareTest" + str(num) + ".png") num += 1 #takes a binary image plan and returns the regions, i.e. the groups of connected points def regionFinder(self, image): width, height = image.size regions = [] points = [] for x in range(width): pointrow = [] for y in range(height): pointrow.append(0 != image.getpixel((x,y))) points.append(pointrow) imageSize = width*height for x in range(width): for y in range(height): if (points[x][y]): neighbours = [] neighbours.append([x,y]) region = [] while len(neighbours) > 0: point = neighbours.pop() region.append(point) for neighbour in self.getNeighbours(point, width, height, points): neighbours.append(neighbour) points[neighbour[0]][neighbour[1]] = False #tests on the region size if len(region)>imageSize/1000 and len(region)<imageSize/10: regions.append(region) return regions #gets the neighbours of a point def getNeighbours(self, point, width, height, points): neighbours = [] if (point[0] < width - 1) and points[point[0]+1][point[1]]: neighbours.append([point[0]+1, point[1]]) if (point[1] < height - 1) and points[point[0]][point[1]+1]: neighbours.append([point[0], point[1]+1]) if (point[0] > 0) and points[point[0]-1][point[1]]: neighbours.append([point[0]-1, point[1]]) if (point[1] > 0) and points[point[0]][point[1]-1]: neighbours.append([point[0], point[1]-1]) return neighbours #find the convex hull of a region def findConvexHull(self, region, height): hull = [] min = height for point in region: if point[1] < min: min = point[1] minPoint = point curPoint = minPoint hull.append(curPoint) curPoint = self.findPoint(region, curPoint) hull.append(curPoint) while curPoint != minPoint: curPoint = self.findPoint(region, curPoint) hull.append(curPoint) return hull def signum(self, int): if(int < 0): return -1; elif(int > 0): return 1; else: return int; def findPoint(self, points, start): if points[0] == start: end = points[1] else: end = points[0] for point in points: if (point != start) and (point != end): position = self.signum((end[0] - start[0]) * (point[1] - start[1]) - (end[1] - start[1]) * (point[0] - start[0])) if position > 0: end = point return end #square distance def dist(self, point1, point2): dx = abs(point1[0] - point2[0]) dy = abs(point1[1] - point2[1]) return dx*dx + dy*dy #finds the biggest diagonal (the biggest distance between two points in the polygon) and returns the two points def findBiggestDiagonal(self, polygon): maxDiag1 = (polygon[0], polygon[1]) max1 = self.dist(polygon[0], polygon[1]) for i in range(len(polygon)-1): for j in range(i+1,len(polygon)): point1 = polygon[i] point2 = polygon[j] distance = self.dist(point1, point2) if distance >= max1: max1 = distance maxDiag1 = (point1, point2) return (maxDiag1, max1) #gets the mean of a series of coordinates def getMassCenter(self, region): mean_x = 0 mean_y = 0 for point in region: mean_x += point[0] mean_y += point[1] mean_x = mean_x/len(region) mean_y = mean_y/len(region) return (mean_x, mean_y) #crops the image so that the points in the given region are all shown def regionCropper(self, region, image): region.sort(key=lambda point : point[1]) y_min = region[0][1] y_max = region[len(region)-1][1] region.sort(key=lambda point : point[0]) x_min = region[0][0] x_max = region[len(region)-1][0] croppedImage = Image.new("RGB", (x_max-x_min + 1, y_max-y_min + 1)) for x in range(x_min, x_max+1): for y in range(y_min, y_max+1): croppedImage.putpixel((x-x_min,y-y_min), image.getpixel((x,y))) return croppedImage #find if the object is a square def isSquare(self, region, hull): diag, dist = self.findBiggestDiagonal(hull) #find the ratio between diagonal and area : for a square, diag^2 should be equal to area*2 diagAreaRatio = float(len(region)*2) / float(dist) if diagAreaRatio > 0.8 and diagAreaRatio < 1.4: return True else: return False #finds blue squares on an image and returns the squares as lists of points (their convex hull) and #their distance from the center def findSquares(self,image, color): print("--------------------------------binarizing the image") result = self.ColorBinarizator(image, color) width, height = result.size #find the regions #print("finding regions") regions = self.regionFinder(result) if len(regions) > 0: print("size of the region : " + str(len(regions[0]))) #try to find squares ''' squares = [] dists = [] print("classifying " + str(len(regions)) + " regions") for region in regions: #get the mass Center of the region massCenter = self.getMassCenter(region) #find the convex hull of the region hull = self.findConvexHull(region, height+1) #testing if the region is a square isThisSquare = self.isSquare(region, hull) #drawing squares if isThisSquare: squares.append(region) dists.append(massCenter[0] - (width/2)) return (squares, dists) ''' dists = [] for region in regions: massCenter = self.getMassCenter(region) dists.append(massCenter[0] - width/2) return (regions, dists) #bynarizes the image according to fix threshold values on the colors def ColorBinarizator(self, image, color): image = image.filter(ImageFilter.BLUR) red, green, blue = image.split() width, height = image.size if color == "blue": impColor = blue elif color == "red": impColor = red else: impColor = green max = 0 img = Image.new("L", (width, height)) for x in range(width): for y in range(height): sum = blue.getpixel((x,y)) + green.getpixel((x,y)) + red.getpixel((x,y))+1 value = round((float(impColor.getpixel((x,y))) / float(sum)) * 255) img.putpixel((x, y), value) if value > max: max = value img.save("beforeT.png") print "max " + color + " = " + str(max) if max > 120: threshold = round(0.85*float(max)) else: threshold = 256 table = [] for i in range(256): if i < threshold: table.append(0) else: table.append(255) img = img.point(table) img = self.erode(img) img = self.dilate(img) img.save("afterT.png") return img def erode(self, image): paddedImage = self.createPaddedImage(image, 1) thresholdImg = paddedImage.point(lambda i, v=128: i > v and 255) filteredImg = thresholdImg.filter(ImageFilter.FIND_EDGES) thresholdImg = filteredImg.point(lambda i, v=128: i > v and 255) arithImg = ImageChops.subtract(paddedImage, thresholdImg) box = (1, 1, arithImg.size[0]-1, arithImg.size[1]-1) outImage = arithImg.crop(box) return outImage def dilate(self, image): paddedImage = self.createPaddedImage(image, 1) thresholdImg = paddedImage.point(lambda i, v=128: i > v and 255) thresholdImg = ImageChops.invert(thresholdImg) filteredImg = thresholdImg.filter(ImageFilter.FIND_EDGES) thresholdImg = filteredImg.point(lambda i, v=128: i > v and 255) arithImg = ImageChops.add(paddedImage, thresholdImg) box = (1, 1, arithImg.size[0]-1, arithImg.size[1]-1) outImage = arithImg.crop(box) return outImage def createPaddedImage(self, img, pad): '''Create an padded image - since it is created with resize() the bor= der =20 pixels will be same (almost) as the original edge pixels.''' sizeX, sizeY = img.size paddedImage = img.resize((sizeX+2*pad, sizeY+2*pad)) # paste original image into the new big image with an offset paddedImage.paste(img, (pad, pad)) return paddedImage def equalize(self, h): lut = [] for b in range(0, len(h), 256): # step size step = reduce(operator.add, h[b:b+256]) / 255 # create equalization lookup table n = 0 for i in range(256): lut.append(n / step) n = n + h[i+b] return lut
UTF-8
Python
false
false
2,011
4,638,564,702,389
7524e456efb08a1d36dfb5d2be473f3ca1c84c80
62b2af05ac7ae88b1201584c1b46e1b985020b05
/composcan/cps/views.py
3eb5f7071f5ba6039566fc09d2626a55bea99db0
[]
no_license
SevenStones/musang
https://github.com/SevenStones/musang
a9feb2369b5a52dd3c57b0a57c7ed8edf8bee8c4
45af082ba6a313fd09cae0ad2a93e63df2b50bce
refs/heads/master
2016-09-05T19:25:53.770592
2014-08-31T09:49:51
2014-08-31T09:49:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib import auth from django.http import HttpResponse from django.template import loader from cps.models import Targets,ScanConfigForm,Scans from django.shortcuts import render_to_response from django.template import RequestContext from cps.data import group_expand import logging def login(request): t = loader.get_template('admin/login.html') c = RequestContext(request) return HttpResponse(t.render(c)) @login_required def scan_config(request): #altered get_query_set and model manager (referenced by recent() method of model manager #RecentScans()) ...trickery to remove code away from View and into Model...MVC bla bla. #ReportForm = ReportGenForm() ip_addresses=list() if request.method == 'POST': # If the form has been submitted... form = ScanConfigForm(request.POST) # A form bound to the POST data if form.is_valid(): u=form.cleaned_data ip_addresses=group_expand(u['target_group']) return render_to_response('admin/scan_run.html', { 'group': u['target_group'], 'target_ips': ip_addresses }, context_instance=RequestContext(request)) else: form = ScanConfigForm() # An unbound form return render_to_response('admin/scans.html', { 'form': form }, context_instance=RequestContext(request)) def logout_page(request): auth.logout(request) t = loader.get_template('admin/logout.html') c = RequestContext(request) return HttpResponse(t.render(c))
UTF-8
Python
false
false
2,014
9,174,050,155,652
b87d896766e1efda74d8755c24b3e7522e9bf72d
e3b6f36f2b1cc51955a3b3ab327da76731edb7b6
/textrank.py
f7ce613326244b79c28399023c759ae3fcbb6cf3
[]
no_license
Wannawaiting/textRankPython
https://github.com/Wannawaiting/textRankPython
f7b6303a4a294e2a36738100f34ec689dd030381
98954ed13ccfca2db024697d908e13860276222a
refs/heads/master
2021-01-13T14:38:48.650411
2014-10-18T02:55:05
2014-10-18T02:55:05
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" From this paper: http://acl.ldc.upenn.edu/acl2004/emnlp/pdf/Mihalcea.pdf I used python with nltk, and pygraph to do an implmentation of of textrank. for questions: http://twitter.com/voidfiles """ import nltk import itertools import string import re from operator import itemgetter from pygraph.classes.graph import graph from pygraph.classes.digraph import digraph from pygraph.algorithms.pagerank import pagerank from pygraph.classes.exceptions import AdditionError import glob from os.path import expanduser home = expanduser("~") text = u"""In the Valley, we have lots of euphemisms for changing your business until you find a way to make money: You might throw things at the wall and see what sticks, or go where the money is, or pivot. Whatever you call it, it boils down to a basic animal instinct-the killer instinct. You need to look at the whole picture and attack an area that is vulnerable, and then keep attacking until you have won, or until you find an even more vulnerable spot. Rinse, then repeat. I have yet to run my own company, but that doesn't stop me from evaluating the ability of a business to harness its killer instinct and fuel its own expansion. I have worked for companies with and without this instinct. I like working for companies with a keen killer instinct. This killer instinct directly relates to last month's Google Reader debacle. I would often deride Google for changing Reader, but at the same time, I knew from the beginning that it was the right move on the part of Google. Google has amassed their resources to support Google+. They have gone so far as to tie employees' salaries and bonuses to how well Google+ does. They then rolled out integrations across the company. The company uses anything that could possibly prop up Google+ to drive the success of the project. This is the killer instinct in action. Google knows that if they don't combat Facebook, they are going to forfeit a significant market in the future. They aren't going to lose this battle without a fight. As an outsider, and as a former Yahoo employee, I applaud Google's determination. Yahoo had been trying to start a social networking service for as long as I worked there. The problem with the Yahoo social networking plan is that they have tried five5 different things in five5 years. Apparently Google+ wasn't all that welcome at Google in it's internal beta, and there have even been some very public rants from Googlers about the faults of Google+,the project- but Google is still pushing it hard. If Yahoo ran had run into this much resistance, they would have shut it down. Now that I work for a small company, I have had the chance to see killer instinct in the flesh. I know how much focus it gives a company, and that it drives the development of a strong plan. It gives you a roadmap, even when you don't always know what the future looks like. I can only hope that when I run my own company, I'll have that same killer instinct.""" def list_files_in_foler(folderPath = "/Users/soheildanesh/projects/cam/data/datasets/Hulth2003/Test/" , fileExtension = "*.abstr"): #from http://stackoverflow.com/questions/3207219/how-to-list-all-files-of-a-directory-in-python files = glob.glob(folderPath+"*.abstr") return files #DEBUG #print("files = %s" % files) #DEBUG #folderPath = "/Users/soheildanesh/GitHub/cam" #f = [] #from http://stackoverflow.com/questions/3207219/how-to-list-all-files-of-a-directory-in-python #for (dirpath, dirnames, filenames) in walk(folderPath): # print("dirpath, dirnames, filenames = %s, %s, %s" % (dirpath, dirnames, filenames) ) # f.extend(filenames) # break #return f def filter_for_tags(tagged, tags=['NN', 'JJ', 'NNP']): return [item for item in tagged if item[1] in tags] def filter_puncs(text): puncs = '!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~' #set(string.punctuation) replace_punctuation = string.maketrans(puncs, ' '*len(puncs)) text = text.translate(replace_punctuation) return text def normalize(tagged): return [(item[0].replace('.', ''), item[1]) for item in tagged] def unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D seen = set() seen_add = seen.add if key is None: for element in itertools.ifilterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element #to run on macmini and hulth: #macmini: textrank.textrankFilesInFolder("/Users/soheild/Documents/projects/cam/data/datasets/Hulth2003/Test/") #macbookPro: textrank.textrankFilesInFolder("/Users/soheildanesh/projects/cam/data/datasets/Hulth2003/Test/") def textrankFilesInFolder(folderPath): files = list_files_in_foler(folderPath) outputFile = open('textrankOutput.txt', 'w') for file in files: fileName = file.split("/").pop() text = open(file).read() print("fileName = %s" % fileName) print("text = %s" % text) phrasesAndWords = runtextrank(text) commaSeparatedListTopCandids = "" for term in phrasesAndWords: commaSeparatedListTopCandids = commaSeparatedListTopCandids + term + "," outputFile.write("%s : %s \n" % (fileName, commaSeparatedListTopCandids)) print("phrasesAndWords = %s" % phrasesAndWords) #given a list of words (topWordList) and toneized text (textWordList) (ie list of words in order as they appear in the original text), combine the words that occur adjacent to each other in the text into multi-word phrases, checks for inclusion in text before including candidates in the returned ones to avoid those candidates saparated by punctuation marks. def combineAdjacentWords(topWordList, wordsInOrder, text): phraseOrWord = "" ouputPhrasesAndWords = [] print("wordsInOrder = %s" % wordsInOrder) for word in wordsInOrder: word = word.lower() print("word = %s" % (word)) if word in topWordList: print("word in topWordList = %s" % (word)) if not phraseOrWord: #if phraseOrWord is not nil phraseOrWord = word else: phraseOrWord = phraseOrWord + " " + word #strip phraseOrWord: looks like python already strips, not seeing any leading/trailing spaces else: #DEBUG if phraseOrWord: print("phraseOrWord geting potentially added to outputPhrasesAndWords = %s" % phraseOrWord) if not phraseOrWord in text: print("not adding %s because it is not seen in text" % phraseOrWord) #DEBUG if phraseOrWord and phraseOrWord not in ouputPhrasesAndWords and phraseOrWord in text: #if wordOrPhrase is not already in outputPhrasesAndWords list ouputPhrasesAndWords.append(phraseOrWord) print("Appending to outputPhrasesAndWords = %s" % phraseOrWord) phraseOrWord = "" #DEBUG if phraseOrWord: print("phraseOrWord geting potentially added to outputPhrasesAndWords = %s" % phraseOrWord) if not phraseOrWord in text: print("not adding %s because it is not seen in text" % phraseOrWord) #DEBUG ## TAKE CARE OF CASE WHERE LAST WORD OR PHRASE SHOULD BE ADDED TO OUTPUT PHRASES BUT WE'VE ALREADY EXISTED THE LOOP AFTER THE IF AND DONT REACH THE ELSE SO ITS NOT APPENDED ## if phraseOrWord and phraseOrWord not in ouputPhrasesAndWords and phraseOrWord in text: ouputPhrasesAndWords.append(phraseOrWord) print("Appending to outputPhrasesAndWords = %s" % phraseOrWord) print("ouputPhrasesAndWords = %s" % ouputPhrasesAndWords) return ouputPhrasesAndWords def _combineAdjacentWords(topWordList, textWordList, text): phraseOrWord = "" ouputPhrasesAndWords = [] for word in textWordList: #word = wordWeight[0] word = word.lower() #print("word = %s" % (word)) if word in topWordList: #print("word in topWordList = %s" % (word)) if not phraseOrWord: #if phraseOrWord is not empty phraseOrWord = word else: phraseOrWord = phraseOrWord + " " + word #strip phraseOrWord: looks like python already strips, not seeing any leading/trailing spaces else: #print("phraseOrWord geting added to outputPhrasesAndWords %s" % (phraseOrWord)) if phraseOrWord and phraseOrWord not in ouputPhrasesAndWords and phraseOrWord in text: #if wordOrPhrase is not already in outputPhrasesAndWords list ouputPhrasesAndWords.append(phraseOrWord) #print("ouputPhrasesAndWords = %s" % (ouputPhrasesAndWords)) phraseOrWord = "" return ouputPhrasesAndWords def containsPunctuation(s): puncs = '!"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~' #set(string.punctuation) for punc in puncs: if punc in s: return True return False def runtextrankOnFilesInFoler(folderPath = "/Users/soheildanesh/projects/cam/data/datasets/Hulth2003/Test/" , fileExtension = "*.abstr"): textFiles = list_files_in_foler(folderPath, fileExtension) for fileName in textFiles: print("filefile = %s" % fileName) #<< now read each file and call textrank(text) then take the result and put it in a file like url : keyphrase, keyphrase, ... then load this to semeval and measure precision recall, with textrank style file = open(fileName, 'r') text = file.read() print("text = %s" % text) def runtextrank(text): print("text = %s" % text) ### REPLACE TAGS SUCH AS \n, \r and \t WITH SPACE ### p = re.compile('\t|\n|\r') text = p.sub(" ", text) multipSpaces = re.compile('\s+') text = multipSpaces.sub(" ",text) #note ^: above line might create double or multiple white spaces, replace these with singles so no trouble later on when checking if a multi-word is in text before including it as a multi-word candidate in combineAdjacentWords print("text after replacing tabs returns and new lines with space = %s" % text) ### REPLACE TAGS SUCH AS \n, \r and \t WITH SPACE ### #REMOVE PUNCTUATIONS FROM TEXT #use punctLess text to rank words but use the virgin text to combine words in combineAdjacentWords so words separated by puncs aren't combined into candidates textWithNoPuncs = filter_puncs(text) print("textWithNoPuncs = %s" % textWithNoPuncs) #REMOVE PUNCTUATIONS FROM TEXT textWordList = nltk.word_tokenize(textWithNoPuncs) print("textWordList = %s" % textWordList) #ELIMINATE SINGLE LETTERS #print("textWordList before eliminating single chars = %s" % textWordList) tempWordList = [] for word in textWordList: if len(word) > 1: tempWordList.append(word) textWordList = tempWordList #print("textWordList AFTER eliminating single chars = %s" % textWordList) #ELIMINATE SINGLE LETTERS tagged = nltk.pos_tag(textWordList) tagged = filter_for_tags(tagged) tagged = normalize(tagged) unique_word_set = unique_everseen([x[0] for x in tagged]) gr = digraph() gr.add_nodes(list(unique_word_set)) window_start = 0 window_end = 2 while 1: window_words = tagged[window_start:window_end] if len(window_words) == 2: #print window_words try: gr.add_edge((window_words[0][0], window_words[1][0])) except AdditionError, e: print 'already added %s, %s' % ((window_words[0][0], window_words[1][0])) else: break window_start += 1 window_end += 1 calculated_page_rank = pagerank(gr) di = sorted(calculated_page_rank.iteritems(), key=itemgetter(1), reverse=True) ### TAKE TOP 3rd WORDS AND COMBINE THEM ### thirdOfVertices = len(di) / 3 topWrodWeights = di[:thirdOfVertices] topWords = [] for wordWeight in topWrodWeights: topWords.append(wordWeight[0]) phrasesAndWords = combineAdjacentWords(topWords, textWordList, text) #phrasesAndWords = combineAdjacentWords(topWords, textWordList, text) #print("phrasesAndWords = %s" % phrasesAndWords) ### TAKE TOP 3rd WORDS AND COMBINE THEM ### #print 'di = %s' % (di) for k, g in itertools.groupby(di, key=itemgetter(1)): #print k, map(itemgetter(0), g) #return di return phrasesAndWords
UTF-8
Python
false
false
2,014
5,677,946,793,334
79bbc07a2abf5adac22fcee80a2c60468d97a28e
391c0df5fc7cd30a88f130c2d277cbc647fe00cf
/gateway/hueConfig.py
116c52bed7fff5569dfe49542fe2c83695881d62
[]
no_license
stege/HomeControl
https://github.com/stege/HomeControl
779b7503caea1fe6ca00176aec7ecfe11149181c
335f23ef9dcad3c06125daf6c43168c52e781853
refs/heads/master
2016-09-06T14:22:26.798659
2013-04-09T19:43:30
2013-04-09T19:43:30
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import logging from hueUtils import * from settings import WEB_SERVER_PORT USN_UUID = '08733a4a-5483-4ae9-8de8-185ab3e3c88b' # random uuid (uuid.uuid4()) PORT = WEB_SERVER_PORT SSDP_ADDR = '239.255.255.250' SSDP_PORT = 1900 LOGGER = 'HueBridge_Logger' LOGGING_LEVEL = logging.INFO # Default Status of Hue bridge DEFAULT_CONFIG = { "config": { "UTC": getUTC(), "dhcp": 'true', "gateway": "10.0.1.1", "ipaddress": "10.0.1.31", "linkbutton": 'false', "mac": "08:00:27:54:5f:85", # TODO "name": "Philips hue", "netmask": "255.255.255.0", # TODO "portalservices": 'true', "proxyaddress": "", "proxyport": 0, "swupdate": { "notify": 'false', "text": "", "updatestate": 0, "url": "" }, "swversion": "01003542", "whitelist": { #"22a828f1898a4257c3f181e75324f557": { # "create date": "2012-11-10T19:23:15", # "last use date": "2012-11-10T22:31:57", # "name": "python-hue" #}, } }, "groups": {}, "lights": { "1": { "modelid": "LCT001", "name": "Hue Lamp 1", "pointsymbol": { "1": "none", "2": "none", "3": "none", "4": "none", "5": "none", "6": "none", "7": "none", "8": "none" }, "state": { "alert": "none", "bri": 20, "colormode": "xy", "ct": 369, "effect": "none", "hue": 14922, "on": 'true', "reachable": 'true', "sat": 144, "xy": [ 0.4595, 0.4105 ] }, "swversion": "65003148", "type": "Extended color light" }, }, "schedules": {} } # Discovery description XML, parameters: (bridge IP, bridge port, UDN/USN) DESCRIPTION = """ <?xml version="1.0"?> <root xmlns="urn:schemas-upnp-org:device-1-0"> <specVersion> <major>1</major> <minor>0</minor> </specVersion> <URLBase>http://%s:%d/</URLBase> <device> <deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType> <friendlyName>Philips hue</friendlyName> <manufacturer>Royal Philips Electronics</manufacturer> <manufacturerURL>http://www.philips.com</manufacturerURL> <modelDescription>Philips hue Personal Wireless Lighting</modelDescription> <modelName>Philips hue bridge 2012</modelName> <modelNumber>1000000000000</modelNumber> <modelURL>http://www.meethue.com</modelURL> <serialNumber>93eadbeef13</serialNumber> <UDN>%s</UDN> <serviceList> <service> <serviceType>(null)</serviceType> <serviceId>(null)</serviceId> <controlURL>(null)</controlURL> <eventSubURL>(null)</eventSubURL> <SCPDURL>(null)</SCPDURL> </service> </serviceList> <presentationURL>index.html</presentationURL> <iconList> <icon> <mimetype>image/png</mimetype> <height>48</height> <width>48</width> <depth>24</depth> <url>hue_logo_0.png</url> </icon> <icon> <mimetype>image/png</mimetype> <height>120</height> <width>120</width> <depth>24</depth> <url>hue_logo_3.png</url> </icon> </iconList> </device> </root> """
UTF-8
Python
false
false
2,013
12,627,203,851,964
041282be1710e39b8ec5fd3b0235f5fa54a3af81
ef98cdad36bab1c9791b38f1e895ef9348aa5db4
/hdfs-wd/scripts/otherScripts/findDiffExecSets.py
d1fab4e26aa4d34303f130e9a12f212f9612e403
[]
no_license
pallavij/PCheck
https://github.com/pallavij/PCheck
8d3e10d1f08af1d9d2656332411be691490e810a
8a38c7459427486472ca59816534e755ac49b1f2
refs/heads/master
2021-01-25T05:35:09.712497
2012-11-14T20:45:06
2012-11-14T20:45:06
6,097,780
1
2
null
false
2023-03-20T11:50:47
2012-10-05T22:47:22
2015-05-27T08:32:52
2017-05-18T10:12:00
98,389
2
2
0
Java
false
false
#!/usr/bin/python import sys import os f1 = open("tmp1","r") line1 = f1.readline() f1.close() f2 = open("tmp2","r") line2 = f2.readline() f2.close() line1 = line1.rstrip("\n") line2 = line2.rstrip("\n") s1 = set(line1.split(" ")) s2 = set(line2.split(" ")) print "s1-s2" print (s1-s2) print "s2-s1" print (s2-s1)
UTF-8
Python
false
false
2,012
9,706,626,098,460
95d8d0edf5a3cdfc12f1a53f47d323ae67e60bb8
7fee6489175ba07aef9563160d58ee66274203c3
/remo/featuredrep/admin.py
be4e8ba13fe808f473e52919b1b14656e8bbc0f6
[]
no_license
v1ka5/remo
https://github.com/v1ka5/remo
80f79833f39911b23dbd5649201e3f367adf9d59
bad0cba50365ff2c5b7bf480d45304c4d60b4779
refs/heads/master
2020-12-24T09:53:35.529062
2014-07-03T08:35:23
2014-07-03T08:35:23
21,608,266
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.contrib import admin from import_export import fields, resources from import_export.admin import ExportMixin from remo.featuredrep.models import FeaturedRep class FeaturedRepResource(resources.ModelResource): user = fields.Field() class Meta: model = FeaturedRep def dehydrate_user(self, featuredrep): return featuredrep.user.get_full_name() class FeaturedRepAdmin(ExportMixin, admin.ModelAdmin): resource_class = FeaturedRepResource model = FeaturedRep list_display = ('user', 'created_on') search_fields = ['user__first_name', 'user__last_name', 'user__userprofile__display_name'] admin.site.register(FeaturedRep, FeaturedRepAdmin)
UTF-8
Python
false
false
2,014
8,735,963,500,793
3527693260c24b965efebbf3413a174eabcf6cf2
0539242b5af996a3b47e6c98f2fc8f7af14bc63c
/pylearn2/devtools/convert_pkl.py
2f09fd0cec4e6a555cd2d0a1c32bc42677099936
[ "BSD-3-Clause" ]
permissive
kelvinxu/pylearn2
https://github.com/kelvinxu/pylearn2
45cbd503f3d4461abea0fc89f85793a597ef5530
f3b5cb374547c3c6ee605c88c7f9d9ada8b4d37a
refs/heads/master
2020-12-31T02:14:50.105595
2014-11-27T19:12:28
2014-11-27T19:12:28
23,796,451
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" Convert a .pkl from Python 2 to Python 3 """ import pickle import sys if __name__ == "__main__": source_pkl = sys.argv[1] try: target_pkl = sys.argv[2] except IndexError: target_pkl = source_pkl with open(source_pkl, 'rb') as f: obj = pickle.load(f, encoding='latin-1') with open(target_pkl, 'wb') as f: pickle.dump(obj, f)
UTF-8
Python
false
false
2,014
16,286,515,993,832
f176c46408c93be3a241673f973f2ada12897f9e
f981f467fc4e75e3c2dbc67e7a85d6d9e3975496
/scripts/081/hamano.py
54a01190f54f699b3cdd3e88c223f275e64327d4
[]
no_license
sanosuke39/pjeuler
https://github.com/sanosuke39/pjeuler
0bc12e151ae9657090333464ad33d230471c4bee
31a93062ef078eb7bffc8ef4288e547d2227640d
refs/heads/master
2021-10-08T13:07:16.362261
2013-12-15T14:38:06
2013-12-15T14:38:06
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python from ctypes import * from struct import pack obj = [ 0x55, # push %rbp 0x53, # push %rbx 0x48,0x81,0xec,0x90,0x63,0x00,0x00, # sub $0x6390,%rsp 0x8b,0x16, # mov (%rsi),%edx 0x83,0xff,0x01, # cmp $0x1,%edi 0x89,0x54,0x24,0x88, # mov %edx,-0x78(%rsp) 0x0f,0x8e,0xca,0x00,0x00,0x00, # jle 400752 <p81+0xe2> 0x8d,0x5f,0xfe, # lea -0x2(%rdi),%ebx 0x48,0x8d,0x4c,0x24,0x88, # lea -0x78(%rsp),%rcx 0x31,0xc0, # xor %eax,%eax 0x4c,0x8d,0x04,0x9d,0x04,0x00,0x00, # lea 0x4(,%rbx,4),%r8 0x00, # 0x48,0x83,0xc1,0x04, # add $0x4,%rcx 0xeb,0x04, # jmp 4006a4 <p81+0x34> 0x8b,0x54,0x04,0x88, # mov -0x78(%rsp,%rax,1),%edx 0x03,0x54,0x06,0x04, # add 0x4(%rsi,%rax,1),%edx 0x89,0x14,0x01, # mov %edx,(%rcx,%rax,1) 0x48,0x83,0xc0,0x04, # add $0x4,%rax 0x4c,0x39,0xc0, # cmp %r8,%rax 0x75,0xec, # jne 4006a0 <p81+0x30> 0x48,0x63,0xc7, # movslq %edi,%rax 0x48,0x8d,0x1c,0x9d,0x04,0x00,0x00, # lea 0x4(,%rbx,4),%rbx 0x00, # 0x41,0x89,0xf9, # mov %edi,%r9d 0x48,0x8d,0x2c,0x85,0x00,0x00,0x00, # lea 0x0(,%rax,4),%rbp 0x00, # 0x4c,0x8d,0x54,0x86,0x04, # lea 0x4(%rsi,%rax,4),%r10 0x41,0xbb,0x01,0x00,0x00,0x00, # mov $0x1,%r11d 0x0f,0x1f,0x00, # nopl (%rax) 0x44,0x89,0xc8, # mov %r9d,%eax 0x49,0x63,0xd1, # movslq %r9d,%rdx 0x4e,0x8d,0x04,0x13, # lea (%rbx,%r10,1),%r8 0x29,0xf8, # sub %edi,%eax 0x48,0x98, # cltq 0x8b,0x44,0x84,0x88, # mov -0x78(%rsp,%rax,4),%eax 0x41,0x03,0x42,0xfc, # add -0x4(%r10),%eax 0x89,0x44,0x94,0x88, # mov %eax,-0x78(%rsp,%rdx,4) 0x41,0x8d,0x41,0x01, # lea 0x1(%r9),%eax 0x4c,0x89,0xd2, # mov %r10,%rdx 0xeb,0x1a, # jmp 400715 <p81+0xa5> 0x0f,0x1f,0x44,0x00,0x00, # nopl 0x0(%rax,%rax,1) 0x03,0x32, # add (%rdx),%esi 0x48,0x83,0xc2,0x04, # add $0x4,%rdx 0x48,0x63,0xc8, # movslq %eax,%rcx 0x83,0xc0,0x01, # add $0x1,%eax 0x4c,0x39,0xc2, # cmp %r8,%rdx 0x89,0x74,0x8c,0x88, # mov %esi,-0x78(%rsp,%rcx,4) 0x74,0x2e, # je 400743 <p81+0xd3> 0x89,0xc1, # mov %eax,%ecx 0x29,0xf9, # sub %edi,%ecx 0x48,0x63,0xc9, # movslq %ecx,%rcx 0x8b,0x74,0x8c,0x88, # mov -0x78(%rsp,%rcx,4),%esi 0x8d,0x48,0xff, # lea -0x1(%rax),%ecx 0x48,0x63,0xc9, # movslq %ecx,%rcx 0x8b,0x4c,0x8c,0x88, # mov -0x78(%rsp,%rcx,4),%ecx 0x39,0xce, # cmp %ecx,%esi 0x7c,0xd2, # jl 400700 <p81+0x90> 0x03,0x0a, # add (%rdx),%ecx 0x48,0x83,0xc2,0x04, # add $0x4,%rdx 0x48,0x63,0xf0, # movslq %eax,%rsi 0x83,0xc0,0x01, # add $0x1,%eax 0x4c,0x39,0xc2, # cmp %r8,%rdx 0x89,0x4c,0xb4,0x88, # mov %ecx,-0x78(%rsp,%rsi,4) 0x75,0xd2, # jne 400715 <p81+0xa5> 0x41,0x83,0xc3,0x01, # add $0x1,%r11d 0x41,0x01,0xf9, # add %edi,%r9d 0x49,0x01,0xea, # add %rbp,%r10 0x41,0x39,0xfb, # cmp %edi,%r11d 0x75,0x86, # jne 4006d8 <p81+0x68> 0x8b,0x84,0x24,0x84,0x63,0x00,0x00, # mov 0x6384(%rsp),%eax 0x48,0x81,0xc4,0x90,0x63,0x00,0x00, # add $0x6390,%rsp 0x5b, # pop %rbx 0x5d, # pop %rbp 0xc3, # retq 0x66,0x66,0x66,0x66,0x2e,0x0f,0x1f, # data32 data32 data32 nopw %cs:0x0(%rax,%rax,1) 0x84,0x00,0x00,0x00,0x00,0x00, # ] #from p81 import obj code = create_string_buffer(''.join(map(chr, obj))) addr = addressof(code) ps = pythonapi.getpagesize() size=(ps - 1) & addr + len(code) if pythonapi.mprotect(addr&-ps, size, 7) < 0: print("mprotect error, addr=%#x" % (addr)) exit() mat=[] n=0 for line in open('matrix.txt'): mat += map(int, line.split(',')) n +=1 p = create_string_buffer(''.join(map(lambda i: pack("i", i), mat))) print(cast(code, CFUNCTYPE(c_int, c_int, c_void_p))(n, p))
UTF-8
Python
false
false
2,013
15,040,975,476,495
94935cb903f95e717ce98fd0cf3bd8d1dc8ec6e3
c36dcb254a060353f54f396086a63c1f16ab9fb0
/src/neural_network/histogram.py
e331f9719c30e696193b03aca620b7ea8ef5b641
[ "GPL-2.0-only" ]
non_permissive
vmr2117/nnet
https://github.com/vmr2117/nnet
937c43b24c85746140136ce0402dc111db70712b
084c3aa834c757b90fb78633cdb0de3df6b19006
refs/heads/master
2021-01-23T07:02:39.976628
2014-05-21T06:00:22
2014-05-21T06:00:22
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import argparse import cPickle as pickle import numpy as N import pylab as P if __name__ == '__main__': parser = argparse.ArgumentParser(description='Compare layerwise weights of \ two neural network models using hinton \ diagrams. Compares corresponding weights \ at sampled indices.') parser.add_argument('weights_1', help='pickle file containing neural network \ model 1') parser.add_argument('weights_2', help='pickle file containing neural network \ model 2') parser.add_argument('title', help='title for the comparison plot') parser.add_argument('image_path', help='path format to output images') args = parser.parse_args() model_1 = pickle.load(open(args.weights_1, 'rb')) model_2 = pickle.load(open(args.weights_2, 'rb')) fig = P.figure() for ind, (wts_1, wts_2) in enumerate(zip(model_1, model_2)): P.subplot(2, 2, ind*2 + 1) P.hist(wts_1.flatten()) P.title('Initial Weights: Layer '+ str(ind+1)) P.subplot(2, 2, ind*2 + 2) P.hist(wts_2.flatten()) P.title('Final Weights: Layer '+ str(ind+1)) P.suptitle(args.title) P.savefig(args.image_path)
UTF-8
Python
false
false
2,014
4,518,305,631,780
866cd41bb8573807fe4640b9c528ab69188ad031
525a6e12afe74e69c75c2ae7332a20800677f53f
/main/views.py
49cb1b5fb3a57a90e9716f4b53ab326aa8c6f02a
[]
no_license
krbnr/uruguay-critica
https://github.com/krbnr/uruguay-critica
b892ca58c2923d7f9a34fc582466616c98b57ae6
c6d53ff6b3128c494178784c7c138d586131e402
refs/heads/master
2016-09-10T14:34:48.329068
2013-03-02T16:03:13
2013-03-02T16:03:13
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.shortcuts import render_to_response from django.http import HttpResponseRedirect, HttpResponse from django.template import RequestContext from main.models import pelicula def index(request): return render_to_response('index.html', context_instance=RequestContext(request)) def peliView(request, nombrepeli = None): #raise Exception(nombrepeli) if nombrepeli: #TODO get movie or none #peli = get_pelicula()pelicula.objects.get(titulo=nombrepeli) peli = pelicula.objects.get(titulo=nombrepeli) if peli: return render_to_response('pelicula.html', { 'Pelicula' : peli, }, context_instance=RequestContext(request)) else: resp = "<html><head></head><body><h1>No se encontro la pelicula buscada %s.</h1></body></html>" % nombrepeli return HttpResponse(resp) else: return render_to_response('pelicula.html', context_instance=RequestContext(request)) def peliculasView(request): return render_to_response('peliculas.html', context_instance=RequestContext(request)) def user(request, NombrePelicula): Nombreusuario = str(NombrePelicula) if NombrePelicula: resp = "<html><head></head><body><p>Pelicula: %s</p></body></html>" % (Nombreusuario) return HttpResponse(resp) else: resp = "<html><head></head><body><p>Peliculas index%s</p></body></html>" % (Nombreusuario) return HttpResponse(resp) #TODO get the movie or return none """ def get_pelicula(arg) """
UTF-8
Python
false
false
2,013
3,015,067,071,912
8c3b7571b6aee8b98af4b981f89271e4d80c4c54
6f7fbf2c5d785c3db9c7f8949d8f5d6755de7462
/sitepaths/sitemap.py
91c89edb2307d7640a63c247dd72da0f45448ee4
[ "BSD-3-Clause" ]
permissive
StuartMacKay/django-sitepaths
https://github.com/StuartMacKay/django-sitepaths
4a9add281876da23e85b20ef8d5fc605e34a0ae7
f07fa810f0bbcc5e9f086d32e5930451e6acb64a
refs/heads/master
2016-09-05T23:52:59.074440
2012-10-14T09:01:01
2012-10-14T09:01:01
6,162,196
2
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf import settings from django.contrib.sitemaps import Sitemap from .models import Sitepath class SitepathSitemap(Sitemap): def items(self): items = {} objects = Sitepath.objects.filter(type='page', sitemap=True) if hasattr(settings, 'SITE_ID'): objects = objects.filter(site_id=settings.SITE_ID) for obj in objects: if not obj.referrer: if not obj.location in items: obj.items = [] items[obj.location] = obj else: # TODO report duplicate location pass for obj in objects: if obj.referrer: if obj.referrer in items: items[obj.referrer].items.append(obj) else: # TODO report missing landing page pass return items.values() def changefreq(self, obj): return obj.frequency def priority(self, obj): return obj.priority def lastmod(self, obj): return obj.modified def location(self, obj): return obj.location
UTF-8
Python
false
false
2,012
3,375,844,330,809
5917e3c0783bff374a59196c81c6d499a8aebdbb
364249d5c7e9af7a7fd5d6122d4489fa5250919c
/salest/core/payments.py
1180de5b629278dd0fc7407b56bc3e088c68bdcf
[ "GPL-3.0-or-later" ]
non_permissive
anvil8/salest
https://github.com/anvil8/salest
b67cbaee6edf4cdfae77bd31191f5bd05ace213b
a25b9ab5ff2fab309b5d8b85b4c46d0e60f71410
refs/heads/master
2020-05-30T14:23:47.923820
2012-08-07T11:29:27
2012-08-07T11:29:27
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
""" This module should consists of Basic Payment Management functionality """ class BasePaymentProcessorManager(object): """ This class should convert any order information to PaymentInfo instance that is a standart class for input data to any Payment processor. Then he should get the necessary processor and make payments using the selected payment processor.""" def __init__(self, order_info): """ init """ self.order_info = order_info prepared_data = self.get_data_dict(order_info) payment_info = PaymentInfo(prepared_data) self.payment_info = payment_info self.processor = self.get_processor(self.order_info) def run(self): """ run Payment process """ processor = self.processor processor.prepare_data(self.payment_info) return processor.process() def get_order_info(self, order_info): """ """ return order_info or self.order_info def get_processor(self, order_info): """ This method shound return necessary payment processor""" raise NotImplementedError('get_processor must be overrided according \ to Your data schema') def get_data_dict(self, order_info=None): """ This method should return dict that would be understandable by PaymentInfo and converted to it. Here is example of dict - { 'firts_name': 'Chris', 'last_name': 'Smith', 'phone': ''801-555-9242'', 'address': '123 Main Street', 'city': 'New York', 'state': 'NY', 'country': 'US', 'post_code': '12345', 'credit_type': 'VISA', 'credit_number': '4111111111111111', 'credit_ccv': '123', 'credit_expire_year': '2012', 'id': 12, 'order_cost': '1.00', 'order_description': 'Some stuff', } """ raise NotImplementedError('get_data_dict must be overrided according \ to Your data schema') class PaymentInfo(object): """ This class should store information that should be input to payment processor and processed there. """ def __init__(self, data_dict): """ This method setup basic info of this PaymentInfo and should get such dict to setup necessary attributes - { # USER INFO 'firts_name': 'Chris', 'last_name': 'Smith', 'phone': ''801-555-9242'', # LOCATION INFO 'address': '123 Main Street', 'city': 'New York', 'state': 'NY', 'country': 'US', 'post_code': '12345', # CREDIT CARD INFO 'credit_type': 'VISA', 'credit_number': '4111111111111111', 'credit_ccv': '123', 'credit_expire_month': '10', 'credit_expire_year': '2012', # ORDER INFO 'id': 12, 'order_cost': '1.00', 'order_description': 'Some stuff', } Also all values of basic dict accessible as PaymentInfo instance attribute info = PaymentInfo({'x':1}) info.data_dict.get('x') >> 1 info.x >> 1 """ self.data_dict = data_dict def __getattr__(self, name): """ This method redefined to get access to basic dict keys as PaymentInfo attributes. """ return self.data_dict[name] def get_full_name(self): """ This method returns card holder full name. """ return "%s %s" % (self.firts_name, self.last_name) class ProcessorResult(object): """ Instance of this class should be returned from payment processor. """ def __init__(self, processor, success, message, payment=None): """Initialize with: processor - the key of the processor setting the result success - boolean message - a lazy string label, such as _('OK) payment - an OrderPayment or OrderAuthorization """ self.success = success self.processor = processor self.message = message self.payment = payment def __unicode__(self): """ Unicode """ status = 'Success' if self.success else 'Failure' return u"ProcessorResult: %s [%s] %s" % (self.processor, status, self.message) def print_result(self): """ print results """ print { 'key': self.processor, "status": self.success, 'msg': self.message, 'payment': self.payment, }
UTF-8
Python
false
false
2,012
11,046,655,892,996
88b9b843f1dc06e89315e5f23d2a035b7e72c132
2f0cac2d094ecb700c7c92a638eba417eaffe4de
/mishapp_api/database.py
9b70b8a3faa92925e31f5cdce747d3e3e1c7a852
[]
no_license
gudeg-united/mishapp-api-hackathon
https://github.com/gudeg-united/mishapp-api-hackathon
decb3b69aa525335c705944f102bf9c7e1ac87c1
282c59173b41fdc24959f38a6b1699756ecf2780
refs/heads/master
2021-01-16T01:01:38.534588
2014-12-07T20:27:50
2014-12-07T20:27:50
33,819,419
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import json from datetime import datetime from Pubnub import Pubnub from flask.ext.mongoengine import MongoEngine from flask import current_app db = MongoEngine() class Disaster(db.Document): source = db.StringField(required=True) source_id = db.StringField(required=True) type = db.StringField() properties = db.DictField() geometry = db.DictField() modified_at = db.DateTimeField(default=datetime.utcnow) meta = { "indexes": [[("geometry", "2dsphere")]], } def asdict(self): return { "id": "{}".format(self["id"]), "source": self["source"], "source_id": self["source_id"], "type": self["type"], "properties": self["properties"], "geometry": self["geometry"], "modified_at": self["modified_at"].isoformat(), } @classmethod def create_unique(cls, **fields): # TODO: use upsert! if not cls.objects(source=fields["source"], source_id=fields["id"]).count(): # noqa disaster = Disaster() disaster.source = fields["source"] disaster.source_id = fields["id"] disaster.type = "Feature" disaster.properties = fields["properties"] disaster.geometry = fields["geometry"] disaster.save() return disaster @classmethod def post_save(cls, sender, document, **kwargs): cfg = current_app.config pubnub = Pubnub( publish_key=cfg["PUBNUB_PUB_KEY"], subscribe_key=cfg["PUBNUB_SUB_KEY"], ssl_on=False, ) pubnub.publish(channel="globaldisaster", message=json.dumps(document.asdict())) from mongoengine import signals signals.post_save.connect(Disaster.post_save, sender=Disaster)
UTF-8
Python
false
false
2,014
7,679,401,531,684
ca5a6006497000055ba56de949250ab44b8c39d8
362b2669b19c4f9f8817a2e13f186f66cd47c96a
/src/py/models/ideal.py
62d8dcc3ec7f3a4ba4d07c3a89e38005534d4a29
[]
no_license
andreaspetrovic/mrifit
https://github.com/andreaspetrovic/mrifit
812c029ae41119089cfeb6d2d51cd8300fc7ce6d
91f8f6579c1bf6663991cb20a7782e5fb9c39a37
refs/heads/master
2020-05-31T07:35:13.077737
2014-07-24T14:51:56
2014-07-24T14:51:56
25,912,320
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python ##@package ideal # Functionality to perform IDEAL fat water separation # # Implemented according to S. B. Reeder et al., Multicoil Dixon Chemical Species Separation With an # Iterative Least-Squares Estimation Method, Magn Reson Med, 51:35-45, (2004) import numpy as np import numpy.ma as ma import sys; import numpy.random as ra # ---------------------------------------------------------------------------- ## Model for IDEAL fat-water separation # Has functions to compute the system matrices A and B # as well as intermediate least squares result u # FIXXXME: remove echo time length hardcoded (matrix reshaping) class IDEAL: ## Constructor #@param data numpy array holding sample points of complex fat water signal #@param te numpy array of sampling times (echo times) def __init__(self, data, te): self.te = te; self.omega = 0; self.data = data; self.temp = 0; # fat offset frequency freq = 460; self.freq = 460; # fat coefficients self.c1n = np.cos(2*np.pi*freq*te); self.d1n = np.sin(2*np.pi*freq*te); # water coefficients self.c2n = np.ones(len(time)); self.d2n = np.zeros(len(time)); self.fresh = 0; self.deltaomega = 0; # vector to hold results of first least squares solution self.u = np.zeros(4); # data prepared as vector for least squares optimization self.signalvec = np.concatenate((signal.real, signal.imag),axis=0); ## Returns Least Squares Matrix A #@return 4x6 numpy matrix for first least squares step def getA(self): A = np.concatenate((self.c1n, self.d1n, -self.d1n, self.c1n, self.c2n, self.d2n, -self.d2n, self.c2n),axis=0) A.shape = (4,6); A = A.T; return A; ## Returns Least Squares Matrix B #@return 5x6 numpy matrix for second least squares step def getB(self): gr = 2*np.pi*self.te*(-self.u[0]*self.d1n - self.u[1]*self.c1n - self.u[2]*self.d2n - self.u[3]*self.c2n) gi = 2*np.pi*self.te*( self.u[0]*self.c1n - self.u[1]*self.d1n + self.u[2]*self.c2n - self.u[3]*self.d2n) C = np.concatenate((gr, gi, self.c1n, self.d1n, -self.d1n, self.c1n, self.c2n, self.d2n, -self.d2n, self.c2n),axis=0) C.shape = (5,6); C = C.T; return C; ## Returns delta omega of last iteration #@return delta omega def getDeltaOmega(self): return self.deltaomega; ## Sets the intermediate result u, real and imaginary parts of fat and water signal #@param u numpy array (1x4) def setU(self, u): self.u = u; self.fresh = 1; ## Update field map at end of iteration #@param v numpy array (1x5) def updateOmega(self, v): self.omega += v[0]; self.deltaomega = v[0]; omegafact = np.exp(2*np.pi*1j*self.omega*self.te); dummyr = v[1]*self.c1n - v[2]*self.d1n + v[3]*self.c2n - v[4]*self.d2n; dummyi = v[1]*self.d1n + v[2]*self.c1n + v[3]*self.d2n + v[4]*self.c2n; #self.data += dummyr + 1j*dummyi; self.data = self.data / omegafact; self.signalvec = np.concatenate((self.data.real, self.data.imag),axis=0); def getSignalVector(self): return self.signalvec; def getSignalVector2(self): dummyr =np.array(self.u[0]*self.c1n-self.u[1]*self.d1n+self.u[2]*self.c2n - self.u[3]*self.d2n); dummyi =np.array(self.u[0]*self.d1n+self.u[1]*self.c1n+self.u[2]*self.d2n + self.u[3]*self.c2n); return self.signalvec - np.concatenate((dummyr, dummyi)); # ---------------------------------------------------------------------------- ## Class for least squares optimization of IDEAL data class IDEALOptimizer: ## Constructor #@param idealmodel IDEAL model object def __init__(self, idealmodel): self.ideal = idealmodel; self.iterationcount = 0; ## Function to start least squares optimization def optimize(self): while (ideal.fresh == 0 or ideal.getDeltaOmega() > 0.6): A = self.ideal.getA(); data = self.ideal.getSignalVector(); u = np.linalg.lstsq(A, data)[0]; self.ideal.setU(u); B = self.ideal.getB() data = self.ideal.getSignalVector2(); v = np.linalg.lstsq(B,data)[0]; v[1:len(v)] += u; self.ideal.updateOmega(v); self.iterationcount += 1; self.result = v[1:len(v)]; self.omega = self.ideal.omega; # ---------------------------------------------------------------------------- if __name__ == '__main__': # create test signal ========================================== time = np.array([1, 3, 5])*0.001 print time freq = 460; omega = 2*np.pi*460; fat = 23.4*np.exp(1j*time*omega) water = 10.05*np.ones(len(time)); signal = (fat + water)*np.exp(1j*2*np.pi*10*time); print(signal) signal += (ra.random(3) + ra.random(3)*1j) *0.0; # reconstruction ============================================= ideal = IDEAL(signal, time); while (ideal.fresh == 0 or ideal.getDeltaOmega() > 0.6): A = ideal.getA(); data = ideal.getSignalVector(); u = np.linalg.lstsq(A, data)[0]; ideal.setU(u); B = ideal.getB() data = ideal.getSignalVector2(); v = np.linalg.lstsq(B,data)[0]; v[1:len(v)] += u; ideal.updateOmega(v); # Print results ================================================ np.set_printoptions(suppress=True,precision=2) print "RESULT:" print "coefficients: %s" % np.array_str(v) print "omega: %s" % np.array_str(ideal.omega) ideal = IDEAL(signal, time); optim = IDEALOptimizer(ideal); optim.optimize(); print optim.result print optim.omega ############################################################## ############################################################## sys.exit(); b= []; a = np.array([1.0, 1.0, 100.0, 1.0, 200.0, 3.0, 400.0, 30.0]) a.shape = (2,4) b = ma.array(a, mask = [a[0] <= 3, a[0] < 3]) print a[0] print a print b print a > 3.0 print b.shape res = ma.array(np.zeros(b.shape),mask = b.mask) it = np.nditer([b[~b.mask], None]) print "Bmask:" print b[~b.mask] count = 0 for i,o in it: print i o[...] = i * 2; count +=1 res[~res.mask] = it.operands[1]; print res print b
UTF-8
Python
false
false
2,014
19,378,892,473,393
52faa4eaaf391d0d86547167ada0c06831819905
1f61f009b4b70cfd8dc05ea50a4238d8e236c259
/EasyMerge/merger/type3_dealer.py
0bd870f3fa05a602ed9f67c72f54f38057b85bef
[ "MIT" ]
permissive
h2oloopan/easymerge
https://github.com/h2oloopan/easymerge
3e3fe651bdd4bc13ba81cf2bd339108c2e79cc67
980307bfc7da40dd1bb752371b000f362edb2dda
refs/heads/master
2021-01-01T20:17:15.301839
2014-03-26T15:37:21
2014-03-26T15:37:21
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
''' Created on Mar 15, 2014 @author: h7qin ''' "Usage: unparse.py <path to source file>" import sys import ast import cStringIO import os # Large float and imaginary literals get turned into infinities in the AST. # We unparse those infinities to INFSTR. INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) build_in_name = ["abs", "divmod", "input", "open", "staticmethod", "all", "enumerate", "int", "ord", "str", "any", "eval", "isinstance", "pow", "sum", "basestring", "execfile", "issubclass", "print", "super", "bin", "file", "iter", "property", "tuple", "bool", "filter", "len", "range", "type", "bytearray", "float", "list", "raw_input", "unichr", "callable", "format", "locals", "reduce", "unicode", "chr", "frozenset", "long", "reload", "vars", "classmethod", "getattr", "map", "repr", "xrange", "cmp", "globals", "max", "reversed", "zip", "compile", "hasattr", "memoryview", "round", "__import__", "complex", "hash", "min", "set", "apply", "delattr", "help", "next", "setattr", "buffer", "dict", "hex", "object", "slice", "coerce", "dir", "id", "oct", "sorted", "intern", "None", "False", "True"] def interleave(inter, f, seq): """Call f on each item in seq, calling inter() in between. """ seq = iter(seq) try: f(next(seq)) except StopIteration: pass else: for x in seq: inter() f(x) class Code: s = "" def __init__(self, id): self.id = id def write(self, s): self.s+=s def flush(self): pass def split(self): self.code_lines = self.s.splitlines() #self.code_lines = filter(lambda a: a != "", self.code_lines) while self.code_lines[0]=="": self.code_lines = self.code_lines[1:] def generate_var_set(self, vars): self.orig_vars = vars self.var_set = [] for i in range(len(vars)): self.var_set.append(i) var = vars[i] for j in range(0,i): cur_var = vars[j] if cur_var==var: self.var_set[-1] = j break #print self.var_set def add_parameter(self, vars, bins): self.generate_var_set(vars) self.orig_bins = bins param = [] def merge_vars(vars): var_dict = {} for i in vars: var_dict[i]=1 return var_dict self.vars = merge_vars(vars) if "self" in self.vars: param.append("self") for i in self.vars: if i=="self": continue if not i.startswith("new@"): param.append(i) self.param = param def write_head_line(self,vars, bins): self.add_parameter(vars, bins) func_name = "helper"+str(self.id) if len(self.param)>0: param = "(" for i in self.param: param += i+", " param = param[:-2]+")" else: param = "()" headline = "def "+func_name+param+":" for i in range(len(self.code_lines)): self.code_lines[i] = " "+self.code_lines[i] self.code_lines = [headline]+self.code_lines def rewrite_head_line(self, new_param): func_name = "helper"+str(self.id) if len(new_param)>0: param = "(" for i in new_param: param += (i+", ") param = param[:-2]+")" else: param = "()" headline = "def "+func_name+param+":" self.code_lines[0] = headline def add_rets(self, return_vars): def merge_vars(vars): var_dict = {} for i in vars: var_dict[i]=1 return var_dict ret_vars = [] for i in merge_vars(return_vars): ret_vars.append(i) self.return_vars = ret_vars def write_return_line(self,return_vars): self.add_rets(return_vars) ret_line = " return (" if len(self.return_vars)>1: for i in self.return_vars: ret_line+=(i+", ") ret_line = ret_line[:-2]+")" elif len(self.return_vars)==1: ret_line = ret_line[:-1]+self.return_vars[0] else: ret_line = ret_line[:-2] self.code_lines.append(ret_line) self.receiver = ret_line.strip()[6:].strip() def rewrite_return_line(self, new_ret): ret_line = " return (" if len(new_ret)>1: for i in new_ret: ret_line+=(i+", ") ret_line = ret_line[:-2]+")" elif len(new_ret)==1: ret_line = ret_line[:-1]+new_ret[0] else: ret_line = ret_line[:-2] self.code_lines[-1] = ret_line def get_caller(self): func_name = "helper"+str(self.id) s2 = "" if self.receiver: s2 += self.receiver+" = " s2 += self.code_lines[0][4:-1] self.caller = s2 def reget_caller(self, new_caller, new_receiver): func_name = "helper"+str(self.id) if len(new_caller)>0: paramline = "(" for i in new_caller: paramline += i+", " paramline = paramline[:-2]+")" else: paramline = "()" ret_line = "(" if len(new_receiver)>1: for i in new_receiver: ret_line+=(i+", ") ret_line = ret_line[:-2]+")" elif len(new_receiver)==1: ret_line = ret_line[:-1]+new_receiver[0] else: ret_line = "" s2 = "" if ret_line!="": s2 += ret_line+" = " s2 += func_name+paramline self.caller = s2 def rewrite_code(self, new_vars, diff_bins): for j in range(len(new_vars)): for i in range(1,len(self.code_lines)-1): line = self.code_lines[i] tag = self.orig_vars[j] if tag.startswith("new@"): tag = tag[4:] tag+=("@"+str(j)) if line.find(tag)>=0: line = line.replace(tag,new_vars[j]) break self.code_lines[i] = line for j in range(len(self.orig_bins)): for i in range(1,len(self.code_lines)-1): line = self.code_lines[i] tag = self.orig_bins[j] tag+=("@bin"+str(j)) new_tag = self.orig_bins[j] if j in diff_bins: id = diff_bins.index(j) new_tag = "diff_params["+str(id)+"]" if line.find(tag)>=0: line = line.replace(tag,new_tag) break self.code_lines[i] = line def get_code(self): s = "" for i in self.code_lines: s+=i+"\n" return s def output(self,file = sys.stdout): self.f = file self.f.write("======================\n") self.f.write(self.caller+"\n") for i in self.code_lines: self.f.write(i+"\n") self.f.flush() class Unparser: """Methods in this class recursively traverse an AST and output source code for the abstract syntax; original formatting is disregarded. """ def __init__(self, tree, lines, src_ast, file = sys.stdout): """Unparser(tree, file=sys.stdout) -> None. Print the source for tree to file.""" self.calls = self.crop_calls(lines, src_ast) self.variable, self.return_vars = self.crop_vars(lines, src_ast) self.mod_calls = [] self.functions = src_ast.functions self.classes = src_ast.classes self.lines = lines self.cur_call = -1 self.incall = False self.cur_str = "" self.ret_str = False self.top_level = True self.args = [] self.all_name = [] self.bins = [] self.vars = [] self.is_func_name = False self.f = file #for i in src_ast.raw_scope: # print i '''for i in self.calls: self.f.write("=======================\n") self.f.write("Name:"+str(i.name)+"\n") self.f.write("line:"+str(i.line)+"\n") self.f.write("scope:"+str(i.scope)+"\n") self.f.write("source:"+str(i.source)+"\n") self.f.write("tree:"+str(i.tree)+"\n") ''' self.f = file self.future_imports = [] self._indent = 0 self.dispatch(tree) self.f.write("\n") self.f.flush() rm_ret_var = [] for i in self.return_vars: if i in self.mod_calls: rm_ret_var.append(i) for i in rm_ret_var: self.return_vars.remove(i) #print "mod_calls:",self.mod_calls #print "new_vars:",self.vars '''for i in range(len(self.variable)): if self.variable[i].startswith("new@"): self.variable[i]=""''' #print "old_vars:",self.variable if self.vars!=self.variable: print "ERROR" #print "return_vars:",self.return_vars #print "" def crop_calls(self, lines, src_ast): calls = [] for i in src_ast.calls: if i.line<lines[0] or i.line>lines[1]: continue else: calls.append(i) return calls def crop_vars(self, lines, src_ast): #for i in src_ast.functions: # print i.name, i.env new_imports = [] for i in src_ast.raw_imports: new_imports.append(i[0]) if i[1]: new_imports.append(i[1]) vars = [] return_vars = [] for i in src_ast.var: if i[0]<lines[0] or i[0]>lines[1]: continue else: if i[1] in build_in_name: continue if i[1]=="self": vars.append(i[1]) return_vars.append(i[1]) continue if i[1] in new_imports: vars.append(i[1]) continue is_class = False for j in src_ast.classes: if i[1]==j.name: vars.append(i[1]) is_class = True break if is_class: continue env = ["Global", "Function"] is_func = False for j in src_ast.functions: if i[1]==j.name: if j.env[-1][0] in env: vars.append(i[1]) is_func = True break if is_func: continue vars.append("new@"+i[1]) for j in src_ast.var: if j[1]==i[1] and j[0]<lines[0]: #-1? if j[2]==-1: #print "global",j vars[-1]=i[1] break scope_i = src_ast.raw_scope[i[2]] scope_j = src_ast.raw_scope[j[2]] if len(scope_j)<=len(scope_i) and scope_j==scope_i[:len(scope_j)]: #print i,j vars[-1]=i[1] break for j in src_ast.var: if j[1]==i[1] and j[0]>lines[1]: #-1? if i[2]==-1: #print "return global",i return_vars.append(i[1]) break scope_i = src_ast.raw_scope[i[2]] scope_j = src_ast.raw_scope[j[2]] if len(scope_j)>=len(scope_i) and scope_i==scope_j[:len(scope_i)]: #print "return:", i,j return_vars.append(i[1]) break #print src_ast.raw_scope[i[2]] #print i #print vars return vars,return_vars def call_dealer(self,tree): #self.write("CALL_HERE"+str(tree.lineno)+","+str(tree.col_offset)) def process_mod_call(call): self.is_func_name = True self.ret_str = True self.dispatch(tree.func) self.f.write(self.cur_str) self.ret_str = False self.is_func_name = False #print "@"+self.cur_str, #self.write("unreachable_method["+str(len(self.mod_calls))+"]") #self.write("$CALL:"+str(call.source)+"$") if not self.cur_str in build_in_name: if self.cur_str.find(".")>0: self.mod_calls.append(self.cur_str[:self.cur_str.find(".")]) else: self.mod_calls.append(self.cur_str) self.cur_str = "" self.cur_call+=1 call = self.calls[self.cur_call] if isinstance(call.source, tuple): source = call.source else: source = ("Unknown", call.source) if source==("Unknown",-1) or source==("member",-1): return False else: #call import process_mod_call(call) return True def fill(self, text = ""): "Indent a piece of text, according to the current indentation level" self.f.write("\n"+" "*self._indent + text) def write(self, text): "Append a piece of text to the current line." if not self.ret_str: self.f.write(text) else: self.cur_str+=(text) def enter(self): "Print ':', and increase the indentation." self.write(":") self._indent += 1 def leave(self): "Decrease the indentation level." self._indent -= 1 def dispatch(self, tree): "Dispatcher function, dispatching tree type T to method _T." if isinstance(tree, list): for t in tree: self.dispatch(t) return meth = getattr(self, "_"+tree.__class__.__name__) meth(tree) ############### Unparsing methods ###################### # There should be one method per concrete grammar type # # Constructors should be grouped by sum type. Ideally, # # this would follow the order in the grammar, but # # currently doesn't. # ######################################################## def _Module(self, tree): for stmt in tree.body: self.dispatch(stmt) # stmt def _Expr(self, tree): self.fill() self.dispatch(tree.value) def _Import(self, t): self.fill("import ") interleave(lambda: self.write(", "), self.dispatch, t.names) def _ImportFrom(self, t): # A from __future__ import may affect unparsing, so record it. if t.module and t.module == '__future__': self.future_imports.extend(n.name for n in t.names) self.fill("from ") self.write("." * t.level) if t.module: self.write(t.module) self.write(" import ") interleave(lambda: self.write(", "), self.dispatch, t.names) def _Assign(self, t): self.fill() for target in t.targets: self.dispatch(target) self.write(" = ") self.dispatch(t.value) def _AugAssign(self, t): self.fill() self.dispatch(t.target) self.write(" "+self.binop[t.op.__class__.__name__]+"= ") self.dispatch(t.value) def _Return(self, t): self.fill("return") if t.value: self.write(" ") self.dispatch(t.value) def _Pass(self, t): self.fill("pass") def _Break(self, t): self.fill("break") def _Continue(self, t): self.fill("continue") def _Delete(self, t): self.fill("del ") interleave(lambda: self.write(", "), self.dispatch, t.targets) def _Assert(self, t): self.fill("assert ") self.dispatch(t.test) if t.msg: self.write(", ") self.dispatch(t.msg) def _Exec(self, t): self.fill("exec ") self.dispatch(t.body) if t.globals: self.write(" in ") self.dispatch(t.globals) if t.locals: self.write(", ") self.dispatch(t.locals) def _Print(self, t): self.fill("print ") do_comma = False if t.dest: self.write(">>") self.dispatch(t.dest) do_comma = True for e in t.values: if do_comma:self.write(", ") else:do_comma=True self.dispatch(e) if not t.nl: self.write(",") def _Global(self, t): self.fill("global ") interleave(lambda: self.write(", "), self.write, t.names) def _Yield(self, t): self.write("(") self.write("yield") if t.value: self.write(" ") self.dispatch(t.value) self.write(")") def _Raise(self, t): self.fill('raise ') if t.type: self.dispatch(t.type) if t.inst: self.write(", ") self.dispatch(t.inst) if t.tback: self.write(", ") self.dispatch(t.tback) def _TryExcept(self, t): self.fill("try") self.enter() self.dispatch(t.body) self.leave() for ex in t.handlers: self.dispatch(ex) if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _TryFinally(self, t): if len(t.body) == 1 and isinstance(t.body[0], ast.TryExcept): # try-except-finally self.dispatch(t.body) else: self.fill("try") self.enter() self.dispatch(t.body) self.leave() self.fill("finally") self.enter() self.dispatch(t.finalbody) self.leave() def _ExceptHandler(self, t): self.fill("except") if t.type: self.write(" ") self.dispatch(t.type) if t.name: self.write(" as ") self.dispatch(t.name) self.enter() self.dispatch(t.body) self.leave() def _ClassDef(self, t): self.write("\n") for deco in t.decorator_list: self.fill("@") self.dispatch(deco) self.fill("class "+t.name) if t.bases: self.write("(") for a in t.bases: self.dispatch(a) self.write(", ") self.write(")") self.enter() self.dispatch(t.body) self.leave() def _FunctionDef(self, t): self.write("\n") for deco in t.decorator_list: self.fill("@") self.dispatch(deco) self.fill("def "+t.name + "(") self.dispatch(t.args) self.write(")") self.enter() self.dispatch(t.body) self.leave() def _For(self, t): self.fill("for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _If(self, t): self.fill("if ") self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() # collapse nested ifs into equivalent elifs. while (t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If)): t = t.orelse[0] self.fill("elif ") self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() # final else if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _While(self, t): self.fill("while ") self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill("else") self.enter() self.dispatch(t.orelse) self.leave() def _With(self, t): self.fill("with ") self.dispatch(t.context_expr) if t.optional_vars: self.write(" as ") self.dispatch(t.optional_vars) self.enter() self.dispatch(t.body) self.leave() # expr def _Str(self, tree): # if from __future__ import unicode_literals is in effect, # then we want to output string literals using a 'b' prefix # and unicode literals with no prefix. if "unicode_literals" not in self.future_imports: self.write(repr(tree.s)+"@bin"+str(len(self.bins))) self.bins.append(repr(tree.s)) elif isinstance(tree.s, str): self.write("b" + repr(tree.s)+"@bin"+str(len(self.bins))) self.bins.append("b" + repr(tree.s)) elif isinstance(tree.s, unicode): self.write(repr(tree.s).lstrip("u")+"@bin"+str(len(self.bins))) self.bins.append(repr(tree.s).lstrip("u")) else: assert False, "shouldn't get here" def _Name(self, t): self.all_name.append(t.id) #if t.id=="ui": #print "ui=======",self.rec_var if t.id in build_in_name: self.write(t.id+"@bin"+str(len(self.bins))) self.bins.append(t.id) else: idx = len(self.vars) should_var = self.variable[idx] if should_var.startswith("new@"): should_id = should_var[4:] if should_id!=t.id: self.write(t.id) else: self.vars.append("new@"+t.id) self.write(t.id+"@"+str(idx)) else: if t.id!=should_var: self.write(t.id) else: self.vars.append(t.id) self.write(t.id+"@"+str(idx)) '''if not self.is_func_name: #print self.variable #print self.vars #print self.incall #print self.is_func_name #print t.id if t.id in build_in_name: self.write(t.id) elif self.variable[len(self.vars)]!="new" : if t.id==self.variable[len(self.vars)]: self.vars.append(t.id) self.write(t.id)#+"@VAR") else: self.vars.append("") self.write(t.id) else: #del self.variable[len(self.vars)] self.write(t.id)''' def _Repr(self, t): self.write("`") self.dispatch(t.value) self.write("`") def _Num(self, t): tmp_s = "" repr_n = repr(t.n) # Parenthesize negative numbers, to avoid turning (-1)**2 into -1**2. if repr_n.startswith("-"): self.write("(") tmp_s += "(" # Substitute overflowing decimal literal for AST infinities. self.write(repr_n.replace("inf", INFSTR)) tmp_s += (repr_n.replace("inf", INFSTR)) if repr_n.startswith("-"): self.write(")") tmp_s += ")" self.write("@bin"+str(len(self.bins))) self.bins.append(tmp_s) def _List(self, t): self.write("[") interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write("]") def _ListComp(self, t): self.write("[") self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write("]") def _GeneratorExp(self, t): self.write("(") self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write(")") def _SetComp(self, t): self.write("{") self.dispatch(t.elt) for gen in t.generators: self.dispatch(gen) self.write("}") def _DictComp(self, t): self.write("{") self.dispatch(t.key) self.write(": ") self.dispatch(t.value) for gen in t.generators: self.dispatch(gen) self.write("}") def _comprehension(self, t): self.write(" for ") self.dispatch(t.target) self.write(" in ") self.dispatch(t.iter) for if_clause in t.ifs: self.write(" if ") self.dispatch(if_clause) def _IfExp(self, t): self.write("(") self.dispatch(t.body) self.write(" if ") self.dispatch(t.test) self.write(" else ") self.dispatch(t.orelse) self.write(")") def _Set(self, t): assert(t.elts) # should be at least one element self.write("{") interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write("}") def _Dict(self, t): self.write("{") def write_pair(pair): (k, v) = pair self.dispatch(k) self.write(": ") self.dispatch(v) interleave(lambda: self.write(", "), write_pair, zip(t.keys, t.values)) self.write("}") def _Tuple(self, t): self.write("(") if len(t.elts) == 1: (elt,) = t.elts self.dispatch(elt) self.write(",") else: interleave(lambda: self.write(", "), self.dispatch, t.elts) self.write(")") unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} def _UnaryOp(self, t): self.write("(") self.write(self.unop[t.op.__class__.__name__]) self.write(" ") # If we're applying unary minus to a number, parenthesize the number. # This is necessary: -2147483648 is different from -(2147483648) on # a 32-bit machine (the first is an int, the second a long), and # -7j is different from -(7j). (The first has real part 0.0, the second # has real part -0.0.) if isinstance(t.op, ast.USub) and isinstance(t.operand, ast.Num): self.write("(") self.dispatch(t.operand) self.write(")") else: self.dispatch(t.operand) self.write(")") binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", "LShift":"<<", "RShift":">>", "BitOr":"|", "BitXor":"^", "BitAnd":"&", "FloorDiv":"//", "Pow": "**"} def _BinOp(self, t): self.write("(") self.dispatch(t.left) self.write(" " + self.binop[t.op.__class__.__name__] + " ") self.dispatch(t.right) self.write(")") cmpops = {"Eq":"==", "NotEq":"!=", "Lt":"<", "LtE":"<=", "Gt":">", "GtE":">=", "Is":"is", "IsNot":"is not", "In":"in", "NotIn":"not in"} def _Compare(self, t): self.write("(") self.dispatch(t.left) for o, e in zip(t.ops, t.comparators): self.write(" " + self.cmpops[o.__class__.__name__] + " ") self.dispatch(e) self.write(")") boolops = {ast.And: 'and', ast.Or: 'or'} def _BoolOp(self, t): self.write("(") s = " %s " % self.boolops[t.op.__class__] interleave(lambda: self.write(s), self.dispatch, t.values) self.write(")") def _Attribute(self,t): self.dispatch(t.value) # Special case: 3.__abs__() is a syntax error, so if t.value # is an integer literal then we need to either parenthesize # it or add an extra space to get 3 .__abs__(). if isinstance(t.value, ast.Num) and isinstance(t.value.n, int): self.write(" ") self.write(".") self.write(t.attr) def _Call(self, t): mod = False #print "F:=========",t.func._fields cur_incall = self.incall if not self.incall: self.incall = True if self.call_dealer(t): mod = True if not mod: self.dispatch(t.func) '''else: self.is_func_name = True self.ret_str = True self.dispatch(t.func) self.ret_str = False self.mod_calls[-1] = self.cur_str self.cur_str = "" self.is_func_name = False''' self.write("(") if not cur_incall: self.incall = False comma = False for e in t.args: if comma: self.write(", ") else: comma = True self.dispatch(e) for e in t.keywords: if comma: self.write(", ") else: comma = True self.dispatch(e) if t.starargs: if comma: self.write(", ") else: comma = True self.write("*") self.dispatch(t.starargs) if t.kwargs: if comma: self.write(", ") else: comma = True self.write("**") self.dispatch(t.kwargs) self.write(")") def _Subscript(self, t): self.dispatch(t.value) self.write("[") self.dispatch(t.slice) self.write("]") # slice def _Ellipsis(self, t): self.write("...") def _Index(self, t): self.dispatch(t.value) def _Slice(self, t): if t.lower: self.dispatch(t.lower) self.write(":") if t.upper: self.dispatch(t.upper) if t.step: self.write(":") self.dispatch(t.step) def _ExtSlice(self, t): interleave(lambda: self.write(', '), self.dispatch, t.dims) # others def _arguments(self, t): first = True # normal arguments defaults = [None] * (len(t.args) - len(t.defaults)) + t.defaults for a,d in zip(t.args, defaults): if first:first = False else: self.write(", ") if self.top_level: self.args.append(a.id) self.dispatch(a), if d: self.write("=") self.dispatch(d) # varargs if t.vararg: if first:first = False else: self.write(", ") if self.top_level: self.args.append("*"+str(t.vararg)) self.write("*") self.write(t.vararg) # kwargs if t.kwarg: if first:first = False else: self.write(", ") if self.top_level: self.args.append("**"+str(t.kwarg)) self.write("**"+t.kwarg) self.top_level = False def _keyword(self, t): self.write(t.arg) self.write("=") self.dispatch(t.value) def _Lambda(self, t): self.write("(") self.write("lambda ") self.dispatch(t.args) self.write(": ") self.dispatch(t.body) self.write(")") def _alias(self, t): self.write(t.name) if t.asname: self.write(" as "+t.asname) def generateNewCode(id, source, lines, src_ast, tag, output=sys.stdout): tree = compile(source, tag[0], "exec", ast.PyCF_ONLY_AST) merged = Code(id) up = Unparser(tree, lines, src_ast, merged) merged.split() merged.write_head_line(up.vars, up.bins) merged.write_return_line(up.return_vars) merged.get_caller() #merged.output() #print up.vars return merged def checkMergable(merged_list): mergable = True for merge in merged_list[1:]: if merge.var_set!=merged_list[0].var_set: mergable = False if len(merge.orig_bins) != len(merged_list[0].orig_bins): mergable = False if mergable: #print "YES MERGABLE" return True else: #print "NOT MERGABLE" return False def generateCommonCode(merged_list): common_vars = [] common_param = [] common_ret = [] new_caller = [] new_receiver = [] for i in merged_list: new_caller.append([]) new_receiver.append([]) def mergeNames(names): new_name = [] for i in names: if i.startswith("new@"): i = i[4:] if not i in new_name: new_name.append(i) if len(new_name)==1: return new_name[0] else: name = "" for i in new_name: name+=(i+"_") name=name[:-1] return name def setTrue(li): for i in li: if i: return True return False for i in range(len(merged_list[0].var_set)): names = [] is_param = [] is_ret = [] for merge in merged_list: names.append(merge.orig_vars[i]) if merge.orig_vars[i] in merge.param: is_param.append(True) else: is_param.append(False) if merge.orig_vars[i] in merge.return_vars: is_ret.append(True) else: is_ret.append(False) name = mergeNames(names) common_vars.append(name) if setTrue(is_param) and not name in common_param: common_param.append(name) for i2 in range(len(is_param)): if is_param[i2]: new_caller[i2].append(names[i2]) else: new_caller[i2].append("None") if setTrue(is_ret) and not name in common_ret: common_ret.append(name) for i2 in range(len(is_ret)): if is_ret[i2]: new_receiver[i2].append(names[i2]) else: new_receiver[i2].append("None") diff_bins = [] for i in range(len(merged_list[0].orig_bins)): for merge in merged_list[1:]: if merge.orig_bins[i]!=merged_list[0].orig_bins[i]: diff_bins.append(i) break #print "DIFF_BIN :", diff_bins if len(diff_bins)>0: common_param.append("*diff_params") param_bins = [] for i in merged_list: param_bins.append([]) for j in diff_bins: param_bins[-1].append(i.orig_bins[j]) for j in param_bins[-1]: new_caller[merged_list.index(i)].append(j) #print "PARAM :", param_bins[-1] for i in range(len(merged_list)): merge = merged_list[i] #merge.output() merge.rewrite_code(common_vars, diff_bins) merge.rewrite_head_line(common_param) merge.rewrite_return_line(common_ret) merge.reget_caller(new_caller[i], new_receiver[i]) merge.param = common_param merge.return_vars = common_ret #merge.output() merged_list[i] = merge return merged_list
UTF-8
Python
false
false
2,014
15,126,874,865,407
985c59bdba268e05a22276e17b95e523614a5349
0d37e3f4b32525ccd440f084b548003d618eccaf
/core/settings-example.py
6bdb8b34e570cc4aef2f1f705ebb42d900bbb505
[ "GPL-3.0-only" ]
non_permissive
bensentropy/happytransport
https://github.com/bensentropy/happytransport
bffecea27a6a5c3de287a3d097dd683a5aee25a1
e6ffe6db798f035954820a36848f229b7b1adfa3
refs/heads/master
2020-06-02T20:18:27.646933
2014-05-25T08:44:33
2014-05-25T08:44:57
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
DEBUG = False # Set secret keys for CSRF protection SECRET_KEY = 'replace_with_secret_key' CSRF_SESSION_LKEY = 'replace_with_secret_lkey' CSRF_ENABLED = True ANALYTICS_ID = 'UA-your-id'
UTF-8
Python
false
false
2,014
13,743,895,375,667
ac5c4ccc52062ae9d6ce66220d4afb9be19afad6
e2ba841119b79cc56f011e5b0e6a8f97ba3493e1
/export/middleware.py
3608bb4aa6be9123717b01e4b01c491e508e5f5f
[]
no_license
tlam/exportrecord
https://github.com/tlam/exportrecord
a1b38e784e90d9e94f1c888ee1b238c66c79ab6a
b7121537f8edf442e0c800097f17f573cd63228d
refs/heads/master
2016-08-08T01:23:46.570699
2013-02-03T14:56:15
2013-02-03T14:56:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.conf import settings from django.core.urlresolvers import reverse from django.shortcuts import redirect class RequireLoginMiddleware(object): def __init__(self): self.path_exceptions = ( reverse('admin:index'), reverse('export:login'), ) def process_request(self, request): if request.user.is_authenticated(): return None if request.path.replace('/export', '').startswith(self.path_exceptions): return None return redirect('export:login')
UTF-8
Python
false
false
2,013
17,008,070,524,690
df7621c0fb778304b2c526068668a66115cd0613
da7d8d6edd5747ebff5ed0304b1d1bd6c35f1be5
/reviewer.py
8031ba34f731a8a70af21a6e14c07ef5b4328833
[]
no_license
stuycs-softdev-fall-2013/proj2-pd7-08-theRoom
https://github.com/stuycs-softdev-fall-2013/proj2-pd7-08-theRoom
accce55daac8cb6a8665f42fc4eb2e33154db4f1
3ece7ea496f63b1865a8a461b01f8cf9aaac949b
refs/heads/master
2021-01-19T20:27:32.077899
2013-12-05T18:00:07
2013-12-05T18:00:07
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from random import choice from document import doc # our magical, automated reviewer def weightedListofKeys(corpus): out = [] for key in corpus: for i in range(corpus[key]): out.append(key) return out # returns a list of keys, with there being weight instances of each key def weightedListofKeys2D(corpus): out = [] for key in corpus: for k in corpus[key]: for i in range(corpus[key][k]): out.append(key) return out from collections import defaultdict from re import sub, UNICODE START = doc.SQLss END = doc.SQLes # credit to Earwig for this (I got his permission to use it) # https://github.com/earwig/earwigbot/blob/develop/earwigbot/wiki/copyvios/markov.py def generateCorpus(text, corpus=None): """Implements a basic ngram Markov chain of words.""" degree = 3 # 2 for bigrams, 3 for trigrams, etc. sentences = text.split(". ") if (len(sentences) > 1): for sentence in sentences: corpus = generateCorpus(sentence, corpus) return corpus # use the given corpus, else use a blank new one. chain = defaultdict(lambda: defaultdict(lambda: 0)) if corpus == None else corpus words = sub("", "", text.lower(), flags=UNICODE).split() padding = degree - 1 words = ([START] * padding) + words + ([END] * padding) for i in range(len(words) - degree + 1): last = i + degree - 1 chain[tuple(words[i:last])][words[last]] += 1 return chain def generateSentence(corpus,getNext,maxLength): pick = [END, END]; while pick[0] != START: pick = choice(corpus) sentence = " " while not END in pick: if pick[1] != START: sentence += str(pick[1]) + " " pick = (pick[1], choice(weightedListofKeys(getNext(pick)))) if len(sentence.split(" ")) > maxLength: return None return sentence def generateReview(movie_id): return "something something ipsum" def generateSentenceWithGrammar(corpus,getNext): max_len = 30 sentence = generateSentence(corpus,getNext,max_len) while sentence is None or len(sentence.split(' ')) > max_len: sentence = generateSentence(corpus,getNext,max_len) return sentence corpus = None for sentence in open('quorum4').read().split('. '): corpus = generateCorpus(sentence + '.', corpus)
UTF-8
Python
false
false
2,013
15,023,795,612,828
45f7b94b02bbdba91306c70be6e13e60d0c1b51f
a968e67fa4c2ba1a673d518a5b53fe97b1d370c9
/hsgame/agents/user_agents.py
f00885ce6ea8ea5dd16b3c5a4530d6bfff56a722
[ "MIT" ]
permissive
JonathanFlynn/hearthstone-simulator
https://github.com/JonathanFlynn/hearthstone-simulator
a9353c8c54a9323faf323323fd6e5b4b3905d2ce
799896be1c52ad1df93b2023403be8da18d168a6
refs/heads/master
2018-08-26T09:16:39.147573
2014-07-11T15:49:49
2014-07-11T15:49:49
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
class Observer: def __init__(self, writer): self.game = None self.writer = writer def observe(self, game): self.game = game game.players[0].bind("turn_started", self.turn_started, game.players[0]) game.players[0].bind("turn_ended", self.turn_ended, game.players[0]) game.players[0].hero.bind("died", self.died, game.players[0]) game.players[0].bind("card_drawn", self.card_drawn, game.players[0]) game.players[0].bind("card_put back", self.card_put_back, game.players[0]) game.players[0].hero.bind("damaged", self.damaged, game.players[0]) game.players[1].bind("turn_started", self.turn_started, game.players[1]) game.players[1].bind("turn_ended", self.turn_ended, game.players[1]) game.players[1].hero.bind("died", self.died, game.players[1]) game.players[1].bind("card_drawn", self.card_drawn, game.players[1]) game.players[1].bind("card_put back", self.card_put_back, game.players[1]) game.players[1].hero.bind("damaged", self.damaged, game.players[1]) def turn_started(self, player): self.writer.write("Turn started for " + str(player) + "\n") def turn_ended(self, player): self.writer.write("Turn ended for " + str(player) + "\n") def died(self, attacker, player): self.writer.write(str(player) + " died!\n") def card_drawn(self, card, player): self.writer.write(str(player) + " drew card " + str(card) + "\n") def card_put_back(self, card, player): self.writer.write(str(player) + " put back card " + str(card) + "\n") def damaged(self, amount, what, player): if what is None: what = "fatigue" self.writer.write(str(player) + " was damaged " + str(amount) + " by " + str(what) + "\n")
UTF-8
Python
false
false
2,014
3,143,916,109,031
a47204f2bfa1afb3cb8435130062bcb71f92c3eb
801435fc03c772a53bc8397dd0ab8607ce6bd73a
/path_finder.py
7f9b68140d8e35d5dbda0c035fdf251faf36d507
[]
no_license
ragnraok/RandomMaze
https://github.com/ragnraok/RandomMaze
548448b0bde3619b1bd1b2f2c18d8a702c781fb9
f7a8f24d417a63ebcc0b6e30b0da65e6640c571f
refs/heads/master
2020-04-27T08:55:09.913876
2012-07-15T14:05:03
2012-07-15T14:05:03
5,014,910
7
3
null
null
null
null
null
null
null
null
null
null
null
null
null
from random_maze import RandomMaze class MazePathFinder(object): def __init__(self, maze, start_pos, end_pos, row_num, col_num): """ the maze: 1 denote wall, 0 denote road and the borders are all 0 the maze must be solvable start_pos: the start position tuple(row, col) end_pos: the end position tuple(row, col) """ self.maze = maze self.start_pos = start_pos self.end_pos = end_pos self.row_num = row_num self.col_num = col_num def __if_pos_valid(self, row, col): # check the index range if row < 0 or row >= self.row_num or col < 0 or col >= self.col_num: return False # check if borders elif row == 0 or row == self.row_num - 1: return False elif col == 0 or col == self.col_num - 1: return False # then check if wall elif self.maze[row][col] == 1: return False # then check if visited, which is marked as -1 elif self.maze[row][col] == -1: return False return True def bfs_find_path(self): """ use BFS to find the shortest path from start_pos to end_pos """ direction = [ (0, 1), (0, -1), (1, 0), (-1, 0)] path_queue = [tuple(self.start_pos)] self.maze[self.start_pos[0]][self.start_pos[1]] = -1 if_find = False record = [(0, 0)]*self.row_num for i in range(self.row_num): record[i] = [(0, 0)]*self.col_num while len(path_queue) > 0 and not if_find: cur_pos = path_queue[0] del path_queue[0] #print cur_pos self.maze[cur_pos[0]][cur_pos[1]] = -1 for i in range(4): next_dir = direction[i] next_pos = (cur_pos[0] + next_dir[0], cur_pos[1] + next_dir[1]) #print next_pos if self.__if_pos_valid(next_pos[0], next_pos[1]): self.maze[next_pos[0]][next_pos[1]] = -1 path_queue.append(next_pos) # mark the precursor record[next_pos[0]][next_pos[1]] = cur_pos if next_pos[0] == self.end_pos[0] and next_pos[1] == self.end_pos[1]: if_find = True break if if_find: path = [] start_row = self.start_pos[0] start_col = self.start_pos[1] end_row = self.end_pos[0] end_col = self.end_pos[1] cur_row = end_row cur_col = end_col path.append(self.end_pos) #print record while cur_row != start_row or cur_col != start_col: row = cur_row col = cur_col cur_row = record[row][col][0] cur_col = record[row][col][1] path.append((cur_row, cur_col)) print path_queue print path return path else: print 'no path find' return [] if __name__ == '__main__': row = 11 col = 11 cell_row_num = (row - 3) // 2 cell_col_num = (col - 3) // 2 rand_maze = RandomMaze(row, col) maze, path_track = rand_maze.dfs_maze() path_finder = MazePathFinder(maze, (2, 1), (2 * cell_row_num, 2 * cell_col_num + 1), row, col) path_queue = path_finder.bfs_find_path() for i in range(len(maze)): print maze[i] print path_queue
UTF-8
Python
false
false
2,012
4,647,154,644,953
92c6aa421841b6c02a06b73cdd23bc76014c6630
c561e7c6bd87ee3da54d057b3b1dc6847f373606
/sbfury/tests/test_common.py
9f245c6ab0b23cbc6ba7f5459635913d769d6f47
[ "GPL-3.0-only" ]
non_permissive
HieuLsw/sbfury
https://github.com/HieuLsw/sbfury
b4d48c7df0f485ae3c9b9a754ed8377244e7519f
926529b4fb0085fbcb2976833a62b1e20aa895c8
refs/heads/master
2016-08-12T21:37:24.071552
2010-07-02T01:44:49
2010-07-02T01:44:49
53,143,507
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
import unittest import sys sys.path.append('./') from test import * import common class TestCommon(unittest.TestCase): def test_load_image(self): common.load_image('dragon/head.png') common.load_image('dragon/part.png') if __name__ == '__main__': unittest.main()
UTF-8
Python
false
false
2,010
1,443,109,054,110
2d2a17a6310d04618e216674f12c0f9165fe4cdf
8449ba9666e2eeab9311986fc9232d84c17426be
/unidecode/x063.py
794dbe899070a8f67659351f7eeebaef8f4d338b
[ "GPL-1.0-or-later", "Artistic-1.0-Perl" ]
non_permissive
youngking/Unidecode
https://github.com/youngking/Unidecode
c9f0ad04f89e8a421cef2319b22852b4974e9c57
f5894da85874e2ecd5e19d2236e6c9522f218fd3
refs/heads/master
2021-01-19T06:31:15.184221
2011-12-11T15:21:52
2011-12-11T15:21:52
2,958,623
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
data = ( 'Bo ',#0x0 'Chi ',#0x1 'Gua ',#0x2 'Zhi ',#0x3 'Kuo ',#0x4 'Duo ',#0x5 'Duo ',#0x6 'Zhi ',#0x7 'Qie ',#0x8 'An ',#0x9 'Nong ',#0xa 'Zhen ',#0xb 'Ge ',#0xc 'Jiao ',#0xd 'Ku ',#0xe 'Dong ',#0xf 'Ru ',#0x10 'Tiao ',#0x11 'Lie ',#0x12 'Zha ',#0x13 'Lu ',#0x14 'Die ',#0x15 'Wa ',#0x16 'Jue ',#0x17 'Mushiru ',#0x18 'Ju ',#0x19 'Zhi ',#0x1a 'Luan ',#0x1b 'Ya ',#0x1c 'Zhua ',#0x1d 'Ta ',#0x1e 'Xie ',#0x1f 'Nao ',#0x20 'Dang ',#0x21 'Jiao ',#0x22 'Zheng ',#0x23 'Ji ',#0x24 'Hui ',#0x25 'Xun ',#0x26 'Ku ',#0x27 'Ai ',#0x28 'Tuo ',#0x29 'Nuo ',#0x2a 'Cuo ',#0x2b 'Bo ',#0x2c 'Geng ',#0x2d 'Ti ',#0x2e 'Zhen ',#0x2f 'Cheng ',#0x30 'Suo ',#0x31 'Suo ',#0x32 'Keng ',#0x33 'Mei ',#0x34 'Long ',#0x35 'Ju ',#0x36 'Peng ',#0x37 'Jian ',#0x38 'Yi ',#0x39 'Ting ',#0x3a 'Shan ',#0x3b 'Nuo ',#0x3c 'Wan ',#0x3d 'Xie ',#0x3e 'Cha ',#0x3f 'Feng ',#0x40 'Jiao ',#0x41 'Wu ',#0x42 'Jun ',#0x43 'Jiu ',#0x44 'Tong ',#0x45 'Kun ',#0x46 'Huo ',#0x47 'Tu ',#0x48 'Zhuo ',#0x49 'Pou ',#0x4a 'Le ',#0x4b 'Ba ',#0x4c 'Han ',#0x4d 'Shao ',#0x4e 'Nie ',#0x4f 'Juan ',#0x50 'Ze ',#0x51 'Song ',#0x52 'Ye ',#0x53 'Jue ',#0x54 'Bu ',#0x55 'Huan ',#0x56 'Bu ',#0x57 'Zun ',#0x58 'Yi ',#0x59 'Zhai ',#0x5a 'Lu ',#0x5b 'Sou ',#0x5c 'Tuo ',#0x5d 'Lao ',#0x5e 'Sun ',#0x5f 'Bang ',#0x60 'Jian ',#0x61 'Huan ',#0x62 'Dao ',#0x63 'wei',#0x64 'Wan ',#0x65 'Qin ',#0x66 'Peng ',#0x67 'She ',#0x68 'Lie ',#0x69 'Min ',#0x6a 'Men ',#0x6b 'Fu ',#0x6c 'Bai ',#0x6d 'Ju ',#0x6e 'Dao ',#0x6f 'Wo ',#0x70 'Ai ',#0x71 'Juan ',#0x72 'Yue ',#0x73 'Zong ',#0x74 'Chen ',#0x75 'Chui ',#0x76 'Jie ',#0x77 'Tu ',#0x78 'Ben ',#0x79 'Na ',#0x7a 'Nian ',#0x7b 'Nuo ',#0x7c 'Zu ',#0x7d 'Wo ',#0x7e 'Xi ',#0x7f 'Xian ',#0x80 'Cheng ',#0x81 'Dian ',#0x82 'Sao ',#0x83 'Lun ',#0x84 'Qing ',#0x85 'Gang ',#0x86 'Duo ',#0x87 'Shou ',#0x88 'Diao ',#0x89 'Pou ',#0x8a 'Di ',#0x8b 'Zhang ',#0x8c 'Gun ',#0x8d 'Ji ',#0x8e 'Tao ',#0x8f 'Qia ',#0x90 'Qi ',#0x91 'Pai ',#0x92 'Shu ',#0x93 'Qian ',#0x94 'Ling ',#0x95 'Yi ',#0x96 'Ya ',#0x97 'Jue ',#0x98 'Zheng ',#0x99 'Liang ',#0x9a 'Gua ',#0x9b 'Yi ',#0x9c 'Huo ',#0x9d 'Shan ',#0x9e 'Zheng ',#0x9f 'Lue ',#0xa0 'Cai ',#0xa1 'Tan ',#0xa2 'Che ',#0xa3 'Bing ',#0xa4 'Jie ',#0xa5 'Ti ',#0xa6 'Kong ',#0xa7 'Tui ',#0xa8 'Yan ',#0xa9 'Cuo ',#0xaa 'Zou ',#0xab 'Ju ',#0xac 'Tian ',#0xad 'Qian ',#0xae 'Ken ',#0xaf 'Bai ',#0xb0 'Shou ',#0xb1 'Jie ',#0xb2 'Lu ',#0xb3 'Guo ',#0xb4 'Haba ',#0xb5 'jie',#0xb6 'Zhi ',#0xb7 'Dan ',#0xb8 'Mang ',#0xb9 'Xian ',#0xba 'Sao ',#0xbb 'Guan ',#0xbc 'Peng ',#0xbd 'Yuan ',#0xbe 'Nuo ',#0xbf 'Jian ',#0xc0 'Zhen ',#0xc1 'Jiu ',#0xc2 'Jian ',#0xc3 'Yu ',#0xc4 'Yan ',#0xc5 'Kui ',#0xc6 'Nan ',#0xc7 'Hong ',#0xc8 'Rou ',#0xc9 'Pi ',#0xca 'Wei ',#0xcb 'Sai ',#0xcc 'Zou ',#0xcd 'Xuan ',#0xce 'Miao ',#0xcf 'Ti ',#0xd0 'Nie ',#0xd1 'Cha ',#0xd2 'Shi ',#0xd3 'Zong ',#0xd4 'Zhen ',#0xd5 'Yi ',#0xd6 'Shun ',#0xd7 'Heng ',#0xd8 'Bian ',#0xd9 'Yang ',#0xda 'Huan ',#0xdb 'Yan ',#0xdc 'Zuan ',#0xdd 'An ',#0xde 'Xu ',#0xdf 'Ya ',#0xe0 'Wo ',#0xe1 'Ke ',#0xe2 'Chuai ',#0xe3 'Ji ',#0xe4 'Ti ',#0xe5 'La ',#0xe6 'La ',#0xe7 'Cheng ',#0xe8 'Kai ',#0xe9 'Jiu ',#0xea 'Jiu ',#0xeb 'Tu ',#0xec 'Jie ',#0xed 'Hui ',#0xee 'Geng ',#0xef 'Chong ',#0xf0 'Shuo ',#0xf1 'She ',#0xf2 'Xie ',#0xf3 'Yuan ',#0xf4 'Qian ',#0xf5 'Ye ',#0xf6 'Cha ',#0xf7 'Zha ',#0xf8 'Bei ',#0xf9 'Yao ',#0xfa 'wei',#0xfb 'beng',#0xfc 'Lan ',#0xfd 'Wen ',#0xfe 'Qin ',#0xff )
UTF-8
Python
false
false
2,011
10,874,857,225,659
1d18dab92987ba26b45411dbec6bcacc94ceae38
2324903d6cd9aa8c82881a6f8d842d3663363d4d
/server/main.py
dad0955a232ddb99871076162ae50a97042e8347
[]
no_license
xyzwvut/CacheFS
https://github.com/xyzwvut/CacheFS
4cf701abbbe118bb1726c9bcdd0086e5634db9db
b329c13f4449cfb1a6fa2175cbce39140eb55f08
refs/heads/master
2021-01-20T07:02:59.969429
2014-09-14T21:01:15
2014-09-14T21:01:15
16,241,332
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python3 import argparse import sys import os import config import backend from console import CacheFSConsole from cache import Cache def accessible_directory(path): """ Check if given path is an readable and writable directory """ if not os.path.isdir(path): raise argparse.ArgumentTypeError("{0} is not a valid directory".format(path)) if os.access(path, os.R_OK | os.W_OK): return path else: raise argparse.ArgumentTypeError("{0} is not a accessible".format(path)) def parse_cmdline(argv): parser = argparse.ArgumentParser() parser.set_defaults(show_console=False) parser.add_argument('config', type=argparse.FileType('r'), help='path to configfile') parser.add_argument('-v', '--verbosity', action='count', help='increase verbosity of debug output') parser.add_argument('-p', '--port', type=int, default=None, help='port number (default: 5555)') parser.add_argument('-c', '--with-console', dest='show_console', action='store_true', help='Show interactive console') args = parser.parse_args(argv) return args def apply_cmdline_overwrites(args): """ Update configs with overwrites from commandline """ if args.port: config.config.set('main', 'port', args.port) if args.show_console: config.config.set('main', 'console', 'True') def main(argv): args = parse_cmdline(argv) config.load_config(args.config) apply_cmdline_overwrites(args) # TODO: Path not expaned used before cache sanity-checked it back = backend.create(config.config['back'], config.config['cache']['dir']) cache = Cache(config.config['cache'], back) if config.config.getboolean('main', 'console'): CacheFSConsole(cache).cmdloop() cache.shutdown() if __name__ == '__main__': main(sys.argv[1:])
UTF-8
Python
false
false
2,014
11,733,850,675,398
6f58ab4ffe28a72a4eb32d0c39e250b5538be3f5
9514ae9f7784995eebafcf92635298a7448fe1d4
/test/partial.py
9dac37b47195df533557c36d6ae84b2bb7ffd271
[]
no_license
GreenTree96/hotpy
https://github.com/GreenTree96/hotpy
6799b5ce09350cf22faa0990c6d6fcf85ba6a566
df049de08cda7ee39b2e3b7698bb9dcd89e8740d
refs/heads/master
2015-08-21T19:01:07.711253
2011-12-06T11:41:31
2011-12-06T11:41:31
32,379,294
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
iadd = int.__add__ print(iadd.__class__) print(iadd(1,2)) ineg = int.__neg__ print(ineg.__class__) print(ineg(1)) oneadd = (1).__add__ print(oneadd.__class__) print(oneadd(2)) oneneg = (1).__neg__ print(oneneg.__class__) print(oneneg())
UTF-8
Python
false
false
2,011