{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n \n \"\"\"\n\n bs = BeautifulSoup(text, \"html.parser\")\n\n links = crawler.extract_links(bs)\n expected = [\"http://example.com/elsie\",\n \"http://example.com/lacie\",\n \"http://example.com/tillie\"]\n\n assert links == expected\n\n\ndef test_link_trimmer():\n result = crawler.trim_link(\"http://example.com/lacie\", \"http://example.com\")\n assert result == \"/lacie\"\n"},"new_contents":{"kind":"string","value":"from grazer.core import crawler\nfrom bs4 import BeautifulSoup\n\n\ndef test_extract_links():\n text = \"\"\"\n The Dormouse's story\n \n

The Dormouse's story

\n\n

Once upon a time there were three little sisters; and their names were\n Elsie,\n Lacie and\n Tillie;\n and they lived at the bottom of a well.

\n \n \n \"\"\"\n\n bs = BeautifulSoup(text, \"html.parser\")\n\n links = crawler.extract_links(bs)\n expected = [\"http://example.com/elsie\",\n \"http://example.com/lacie\",\n \"http://example.com/tillie\"]\n\n assert links == expected\n\n\ndef test_link_trimmer():\n result = crawler.trim_link(\"http://example.com/lacie\", \"http://example.com\")\n assert result == \"/lacie\"\n\n\ndef test_trim_link_without_trailing_slash():\n result = crawler.trim_link(\"http://example.com\", \"http://example.com\")\n assert result == \"http://example.com\"\n"},"subject":{"kind":"string","value":"Test to validate trimming scenario"},"message":{"kind":"string","value":"Test to validate trimming scenario\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"CodersOfTheNight/verata"},"ndiff":{"kind":"string","value":" from grazer.core import crawler\n from bs4 import BeautifulSoup\n \n \n def test_extract_links():\n text = \"\"\"\n The Dormouse's story\n \n

The Dormouse's story

\n \n

Once upon a time there were three little sisters; and their names were\n Elsie,\n Lacie and\n Tillie;\n and they lived at the bottom of a well.

\n \n \n \"\"\"\n \n bs = BeautifulSoup(text, \"html.parser\")\n \n links = crawler.extract_links(bs)\n expected = [\"http://example.com/elsie\",\n \"http://example.com/lacie\",\n \"http://example.com/tillie\"]\n \n assert links == expected\n \n \n def test_link_trimmer():\n result = crawler.trim_link(\"http://example.com/lacie\", \"http://example.com\")\n assert result == \"/lacie\"\n \n+ \n+ def test_trim_link_without_trailing_slash():\n+ result = crawler.trim_link(\"http://example.com\", \"http://example.com\")\n+ assert result == \"http://example.com\"\n+ "},"instruction":{"kind":"string","value":"Test to validate trimming scenario"},"content":{"kind":"string","value":"## Code Before:\nfrom grazer.core import crawler\nfrom bs4 import BeautifulSoup\n\n\ndef test_extract_links():\n text = \"\"\"\n The Dormouse's story\n \n

The Dormouse's story

\n\n

Once upon a time there were three little sisters; and their names were\n Elsie,\n Lacie and\n Tillie;\n and they lived at the bottom of a well.

\n \n \n \"\"\"\n\n bs = BeautifulSoup(text, \"html.parser\")\n\n links = crawler.extract_links(bs)\n expected = [\"http://example.com/elsie\",\n \"http://example.com/lacie\",\n \"http://example.com/tillie\"]\n\n assert links == expected\n\n\ndef test_link_trimmer():\n result = crawler.trim_link(\"http://example.com/lacie\", \"http://example.com\")\n assert result == \"/lacie\"\n\n## Instruction:\nTest to validate trimming scenario\n## Code After:\nfrom grazer.core import crawler\nfrom bs4 import BeautifulSoup\n\n\ndef test_extract_links():\n text = \"\"\"\n The Dormouse's story\n \n

The Dormouse's story

\n\n

Once upon a time there were three little sisters; and their names were\n Elsie,\n Lacie and\n Tillie;\n and they lived at the bottom of a well.

\n \n \n \"\"\"\n\n bs = BeautifulSoup(text, \"html.parser\")\n\n links = crawler.extract_links(bs)\n expected = [\"http://example.com/elsie\",\n \"http://example.com/lacie\",\n \"http://example.com/tillie\"]\n\n assert links == expected\n\n\ndef test_link_trimmer():\n result = crawler.trim_link(\"http://example.com/lacie\", \"http://example.com\")\n assert result == \"/lacie\"\n\n\ndef test_trim_link_without_trailing_slash():\n result = crawler.trim_link(\"http://example.com\", \"http://example.com\")\n assert result == \"http://example.com\"\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\ndef test_link_trimmer():\n result = crawler.trim_link(\"http://example.com/lacie\", \"http://example.com\")\n assert result == \"/lacie\"\n\n\ndef test_trim_link_without_trailing_slash():\n result = crawler.trim_link(\"http://example.com\", \"http://example.com\")\n assert result == \"http://example.com\"\n\n\n# ... rest of the code ..."}}},{"rowIdx":1576,"cells":{"commit":{"kind":"string","value":"1ee414611fa6e01516d545bb284695a62bd69f0a"},"old_file":{"kind":"string","value":"rtrss/daemon.py"},"new_file":{"kind":"string","value":"rtrss/daemon.py"},"old_contents":{"kind":"string","value":"import sys\nimport os\nimport logging\nimport atexit\nfrom rtrss.basedaemon import BaseDaemon\n\n_logger = logging.getLogger(__name__)\n\n\nclass WorkerDaemon(BaseDaemon):\n def run(self):\n _logger.info('Daemon started ith pid %d', os.getpid())\n \n from rtrss.worker import app_init, worker_action\n worker_action('import_categories') # TODO run()\n \n _logger.info('Daemon is done and exiting')\n\n def start(self):\n _logger.info('Starting daemon')\n super(WorkerDaemon, self).start()\n\n def stop(self):\n _logger.info('Stopping daemon')\n super(WorkerDaemon, self).stop()\n\n def restart(self):\n _logger.info('Restarting daemon')\n super(WorkerDaemon, self).restart()\n \n\ndef make_daemon(config):\n '''Returns WorkerDaemon instance'''\n pidfile = os.path.join(config.DATA_DIR, 'daemon.pid')\n logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR\n logfile = os.path.join(logdir, 'daemon.log')\n return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)\n "},"new_contents":{"kind":"string","value":"import os\nimport logging\n\nfrom rtrss.basedaemon import BaseDaemon\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass WorkerDaemon(BaseDaemon):\n def run(self):\n _logger.info('Daemon started ith pid %d', os.getpid())\n\n from rtrss.worker import worker_action\n worker_action('run')\n\n _logger.info('Daemon is done and exiting')\n\n def start(self):\n _logger.info('Starting daemon')\n super(WorkerDaemon, self).start()\n\n def stop(self):\n _logger.info('Stopping daemon')\n super(WorkerDaemon, self).stop()\n\n def restart(self):\n _logger.info('Restarting daemon')\n super(WorkerDaemon, self).restart()\n\n\ndef make_daemon(config):\n '''Returns WorkerDaemon instance'''\n pidfile = os.path.join(config.DATA_DIR, 'daemon.pid')\n logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR\n logfile = os.path.join(logdir, 'daemon.log')\n return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)\n"},"subject":{"kind":"string","value":"Change debug action to production"},"message":{"kind":"string","value":"Change debug action to production\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"notapresent/rtrss,notapresent/rtrss,notapresent/rtrss,notapresent/rtrss"},"ndiff":{"kind":"string","value":"- import sys\n import os\n import logging\n- import atexit\n+ \n from rtrss.basedaemon import BaseDaemon\n+ \n \n _logger = logging.getLogger(__name__)\n \n \n class WorkerDaemon(BaseDaemon):\n def run(self):\n _logger.info('Daemon started ith pid %d', os.getpid())\n- \n+ \n- from rtrss.worker import app_init, worker_action\n+ from rtrss.worker import worker_action\n- worker_action('import_categories') # TODO run()\n- \n+ worker_action('run')\n+ \n _logger.info('Daemon is done and exiting')\n \n def start(self):\n _logger.info('Starting daemon')\n super(WorkerDaemon, self).start()\n \n def stop(self):\n _logger.info('Stopping daemon')\n super(WorkerDaemon, self).stop()\n \n def restart(self):\n _logger.info('Restarting daemon')\n super(WorkerDaemon, self).restart()\n- \n+ \n \n def make_daemon(config):\n '''Returns WorkerDaemon instance'''\n pidfile = os.path.join(config.DATA_DIR, 'daemon.pid')\n logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR\n logfile = os.path.join(logdir, 'daemon.log')\n return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)\n- \n+ "},"instruction":{"kind":"string","value":"Change debug action to production"},"content":{"kind":"string","value":"## Code Before:\nimport sys\nimport os\nimport logging\nimport atexit\nfrom rtrss.basedaemon import BaseDaemon\n\n_logger = logging.getLogger(__name__)\n\n\nclass WorkerDaemon(BaseDaemon):\n def run(self):\n _logger.info('Daemon started ith pid %d', os.getpid())\n \n from rtrss.worker import app_init, worker_action\n worker_action('import_categories') # TODO run()\n \n _logger.info('Daemon is done and exiting')\n\n def start(self):\n _logger.info('Starting daemon')\n super(WorkerDaemon, self).start()\n\n def stop(self):\n _logger.info('Stopping daemon')\n super(WorkerDaemon, self).stop()\n\n def restart(self):\n _logger.info('Restarting daemon')\n super(WorkerDaemon, self).restart()\n \n\ndef make_daemon(config):\n '''Returns WorkerDaemon instance'''\n pidfile = os.path.join(config.DATA_DIR, 'daemon.pid')\n logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR\n logfile = os.path.join(logdir, 'daemon.log')\n return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)\n \n## Instruction:\nChange debug action to production\n## Code After:\nimport os\nimport logging\n\nfrom rtrss.basedaemon import BaseDaemon\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass WorkerDaemon(BaseDaemon):\n def run(self):\n _logger.info('Daemon started ith pid %d', os.getpid())\n\n from rtrss.worker import worker_action\n worker_action('run')\n\n _logger.info('Daemon is done and exiting')\n\n def start(self):\n _logger.info('Starting daemon')\n super(WorkerDaemon, self).start()\n\n def stop(self):\n _logger.info('Stopping daemon')\n super(WorkerDaemon, self).stop()\n\n def restart(self):\n _logger.info('Restarting daemon')\n super(WorkerDaemon, self).restart()\n\n\ndef make_daemon(config):\n '''Returns WorkerDaemon instance'''\n pidfile = os.path.join(config.DATA_DIR, 'daemon.pid')\n logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR\n logfile = os.path.join(logdir, 'daemon.log')\n return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\nimport os\nimport logging\n\nfrom rtrss.basedaemon import BaseDaemon\n\n\n_logger = logging.getLogger(__name__)\n\n\n\n// ... modified code ... \n\n\nclass WorkerDaemon(BaseDaemon):\n def run(self):\n _logger.info('Daemon started ith pid %d', os.getpid())\n\n from rtrss.worker import worker_action\n worker_action('run')\n\n _logger.info('Daemon is done and exiting')\n\n def start(self):\n\n\n ... \n\n\n def restart(self):\n _logger.info('Restarting daemon')\n super(WorkerDaemon, self).restart()\n\n\ndef make_daemon(config):\n '''Returns WorkerDaemon instance'''\n\n\n ... \n\n\n logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR\n logfile = os.path.join(logdir, 'daemon.log')\n return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)\n\n\n// ... rest of the code ..."}}},{"rowIdx":1577,"cells":{"commit":{"kind":"string","value":"0050711d85ba4084e9d0f32d3bad1b3400350476"},"old_file":{"kind":"string","value":"name/feeds.py"},"new_file":{"kind":"string","value":"name/feeds.py"},"old_contents":{"kind":"string","value":"from django.contrib.syndication.views import Feed\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom . import app_settings\nfrom .models import Name\n\n\nclass NameAtomFeedType(Atom1Feed):\n \"\"\"Create an Atom feed that sets the Content-Type response\n header to application/xml.\n \"\"\"\n mime_type = 'application/xml'\n\n\nclass NameAtomFeed(Feed):\n feed_type = NameAtomFeedType\n link = reverse_lazy(\"name_feed\")\n title = \"Name App\"\n subtitle = \"New Name Records\"\n author_name = app_settings.NAME_FEED_AUTHOR_NAME\n author_email = app_settings.NAME_FEED_AUTHOR_EMAIL\n author_link = app_settings.NAME_FEED_AUTHOR_LINK\n\n def items(self):\n # last 5 added items\n return Name.objects.order_by('-date_created')[:20]\n\n def item_title(self, obj):\n return obj.name\n\n def item_description(self, obj):\n return 'Name Type: {0}'.format(obj.get_name_type_label())\n\n def item_link(self, obj):\n return obj.get_absolute_url()\n"},"new_contents":{"kind":"string","value":"from django.contrib.syndication.views import Feed\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom . import app_settings\nfrom .models import Name\n\n\nclass NameAtomFeedType(Atom1Feed):\n \"\"\"Create an Atom feed that sets the Content-Type response\n header to application/xml.\n \"\"\"\n mime_type = 'application/xml'\n\n def root_attributes(self):\n attrs = super(NameAtomFeedType, self).root_attributes()\n attrs['xmlns:georss'] = 'http://www.georss.org/georss'\n return attrs\n\n def add_item_elements(self, handler, item):\n super(NameAtomFeedType, self).add_item_elements(handler, item)\n if item.get('location'):\n handler.addQuickElement('georss:point', item['location'])\n\n\nclass NameAtomFeed(Feed):\n feed_type = NameAtomFeedType\n link = reverse_lazy(\"name_feed\")\n title = \"Name App\"\n subtitle = \"New Name Records\"\n author_name = app_settings.NAME_FEED_AUTHOR_NAME\n author_email = app_settings.NAME_FEED_AUTHOR_EMAIL\n author_link = app_settings.NAME_FEED_AUTHOR_LINK\n\n def items(self):\n # last 5 added items\n return Name.objects.order_by('-date_created')[:20]\n\n def item_title(self, obj):\n return obj.name\n\n def item_description(self, obj):\n return 'Name Type: {0}'.format(obj.get_name_type_label())\n\n def item_link(self, obj):\n return obj.get_absolute_url()\n\n def item_location(self, obj):\n if obj.has_locations() and obj.location_set.current_location:\n return obj.location_set.current_location.geo_point()\n\n def item_extra_kwargs(self, obj):\n return dict(location=self.item_location(obj))\n"},"subject":{"kind":"string","value":"Add the location as a georss:point element."},"message":{"kind":"string","value":"Add the location as a georss:point element.\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"repos":{"kind":"string","value":"damonkelley/django-name,damonkelley/django-name,unt-libraries/django-name,damonkelley/django-name,unt-libraries/django-name,unt-libraries/django-name"},"ndiff":{"kind":"string","value":" from django.contrib.syndication.views import Feed\n from django.core.urlresolvers import reverse_lazy\n from django.utils.feedgenerator import Atom1Feed\n \n from . import app_settings\n from .models import Name\n \n \n class NameAtomFeedType(Atom1Feed):\n \"\"\"Create an Atom feed that sets the Content-Type response\n header to application/xml.\n \"\"\"\n mime_type = 'application/xml'\n+ \n+ def root_attributes(self):\n+ attrs = super(NameAtomFeedType, self).root_attributes()\n+ attrs['xmlns:georss'] = 'http://www.georss.org/georss'\n+ return attrs\n+ \n+ def add_item_elements(self, handler, item):\n+ super(NameAtomFeedType, self).add_item_elements(handler, item)\n+ if item.get('location'):\n+ handler.addQuickElement('georss:point', item['location'])\n \n \n class NameAtomFeed(Feed):\n feed_type = NameAtomFeedType\n link = reverse_lazy(\"name_feed\")\n title = \"Name App\"\n subtitle = \"New Name Records\"\n author_name = app_settings.NAME_FEED_AUTHOR_NAME\n author_email = app_settings.NAME_FEED_AUTHOR_EMAIL\n author_link = app_settings.NAME_FEED_AUTHOR_LINK\n \n def items(self):\n # last 5 added items\n return Name.objects.order_by('-date_created')[:20]\n \n def item_title(self, obj):\n return obj.name\n \n def item_description(self, obj):\n return 'Name Type: {0}'.format(obj.get_name_type_label())\n \n def item_link(self, obj):\n return obj.get_absolute_url()\n \n+ def item_location(self, obj):\n+ if obj.has_locations() and obj.location_set.current_location:\n+ return obj.location_set.current_location.geo_point()\n+ \n+ def item_extra_kwargs(self, obj):\n+ return dict(location=self.item_location(obj))\n+ "},"instruction":{"kind":"string","value":"Add the location as a georss:point element."},"content":{"kind":"string","value":"## Code Before:\nfrom django.contrib.syndication.views import Feed\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom . import app_settings\nfrom .models import Name\n\n\nclass NameAtomFeedType(Atom1Feed):\n \"\"\"Create an Atom feed that sets the Content-Type response\n header to application/xml.\n \"\"\"\n mime_type = 'application/xml'\n\n\nclass NameAtomFeed(Feed):\n feed_type = NameAtomFeedType\n link = reverse_lazy(\"name_feed\")\n title = \"Name App\"\n subtitle = \"New Name Records\"\n author_name = app_settings.NAME_FEED_AUTHOR_NAME\n author_email = app_settings.NAME_FEED_AUTHOR_EMAIL\n author_link = app_settings.NAME_FEED_AUTHOR_LINK\n\n def items(self):\n # last 5 added items\n return Name.objects.order_by('-date_created')[:20]\n\n def item_title(self, obj):\n return obj.name\n\n def item_description(self, obj):\n return 'Name Type: {0}'.format(obj.get_name_type_label())\n\n def item_link(self, obj):\n return obj.get_absolute_url()\n\n## Instruction:\nAdd the location as a georss:point element.\n## Code After:\nfrom django.contrib.syndication.views import Feed\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.utils.feedgenerator import Atom1Feed\n\nfrom . import app_settings\nfrom .models import Name\n\n\nclass NameAtomFeedType(Atom1Feed):\n \"\"\"Create an Atom feed that sets the Content-Type response\n header to application/xml.\n \"\"\"\n mime_type = 'application/xml'\n\n def root_attributes(self):\n attrs = super(NameAtomFeedType, self).root_attributes()\n attrs['xmlns:georss'] = 'http://www.georss.org/georss'\n return attrs\n\n def add_item_elements(self, handler, item):\n super(NameAtomFeedType, self).add_item_elements(handler, item)\n if item.get('location'):\n handler.addQuickElement('georss:point', item['location'])\n\n\nclass NameAtomFeed(Feed):\n feed_type = NameAtomFeedType\n link = reverse_lazy(\"name_feed\")\n title = \"Name App\"\n subtitle = \"New Name Records\"\n author_name = app_settings.NAME_FEED_AUTHOR_NAME\n author_email = app_settings.NAME_FEED_AUTHOR_EMAIL\n author_link = app_settings.NAME_FEED_AUTHOR_LINK\n\n def items(self):\n # last 5 added items\n return Name.objects.order_by('-date_created')[:20]\n\n def item_title(self, obj):\n return obj.name\n\n def item_description(self, obj):\n return 'Name Type: {0}'.format(obj.get_name_type_label())\n\n def item_link(self, obj):\n return obj.get_absolute_url()\n\n def item_location(self, obj):\n if obj.has_locations() and obj.location_set.current_location:\n return obj.location_set.current_location.geo_point()\n\n def item_extra_kwargs(self, obj):\n return dict(location=self.item_location(obj))\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\n header to application/xml.\n \"\"\"\n mime_type = 'application/xml'\n\n def root_attributes(self):\n attrs = super(NameAtomFeedType, self).root_attributes()\n attrs['xmlns:georss'] = 'http://www.georss.org/georss'\n return attrs\n\n def add_item_elements(self, handler, item):\n super(NameAtomFeedType, self).add_item_elements(handler, item)\n if item.get('location'):\n handler.addQuickElement('georss:point', item['location'])\n\n\nclass NameAtomFeed(Feed):\n\n\n# ... modified code ... \n\n\n\n def item_link(self, obj):\n return obj.get_absolute_url()\n\n def item_location(self, obj):\n if obj.has_locations() and obj.location_set.current_location:\n return obj.location_set.current_location.geo_point()\n\n def item_extra_kwargs(self, obj):\n return dict(location=self.item_location(obj))\n\n\n# ... rest of the code ..."}}},{"rowIdx":1578,"cells":{"commit":{"kind":"string","value":"4db714570a9ce58a08c72aa1477e9e7a48ed650c"},"old_file":{"kind":"string","value":"tests/util_tests.py"},"new_file":{"kind":"string","value":"tests/util_tests.py"},"old_contents":{"kind":"string","value":"from chai import Chai\n\nfrom arrow import util\n\n\nclass UtilTests(Chai):\n def test_is_timestamp(self):\n timestamp_float = 1563047716.958061\n timestamp_int = int(timestamp_float)\n\n self.assertTrue(util.is_timestamp(timestamp_int))\n self.assertTrue(util.is_timestamp(timestamp_float))\n\n self.assertFalse(util.is_timestamp(str(timestamp_int)))\n self.assertFalse(util.is_timestamp(str(timestamp_float)))\n self.assertFalse(util.is_timestamp(True))\n self.assertFalse(util.is_timestamp(False))\n\n full_datetime = \"2019-06-23T13:12:42\"\n self.assertFalse(util.is_timestamp(full_datetime))\n\n overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_float)\n\n overflow_timestamp_int = int(overflow_timestamp_float)\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_int)\n"},"new_contents":{"kind":"string","value":"import time\n\nfrom chai import Chai\n\nfrom arrow import util\n\n\nclass UtilTests(Chai):\n def test_is_timestamp(self):\n timestamp_float = time.time()\n timestamp_int = int(timestamp_float)\n\n self.assertTrue(util.is_timestamp(timestamp_int))\n self.assertTrue(util.is_timestamp(timestamp_float))\n\n self.assertFalse(util.is_timestamp(str(timestamp_int)))\n self.assertFalse(util.is_timestamp(str(timestamp_float)))\n self.assertFalse(util.is_timestamp(True))\n self.assertFalse(util.is_timestamp(False))\n\n full_datetime = \"2019-06-23T13:12:42\"\n self.assertFalse(util.is_timestamp(full_datetime))\n\n overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_float)\n\n overflow_timestamp_int = int(overflow_timestamp_float)\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_int)\n"},"subject":{"kind":"string","value":"Replace hard coded timestamp with time.time()"},"message":{"kind":"string","value":"Replace hard coded timestamp with time.time()\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"crsmithdev/arrow"},"ndiff":{"kind":"string","value":"+ import time\n+ \n from chai import Chai\n \n from arrow import util\n \n \n class UtilTests(Chai):\n def test_is_timestamp(self):\n- timestamp_float = 1563047716.958061\n+ timestamp_float = time.time()\n timestamp_int = int(timestamp_float)\n \n self.assertTrue(util.is_timestamp(timestamp_int))\n self.assertTrue(util.is_timestamp(timestamp_float))\n \n self.assertFalse(util.is_timestamp(str(timestamp_int)))\n self.assertFalse(util.is_timestamp(str(timestamp_float)))\n self.assertFalse(util.is_timestamp(True))\n self.assertFalse(util.is_timestamp(False))\n \n full_datetime = \"2019-06-23T13:12:42\"\n self.assertFalse(util.is_timestamp(full_datetime))\n \n overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_float)\n \n overflow_timestamp_int = int(overflow_timestamp_float)\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_int)\n "},"instruction":{"kind":"string","value":"Replace hard coded timestamp with time.time()"},"content":{"kind":"string","value":"## Code Before:\nfrom chai import Chai\n\nfrom arrow import util\n\n\nclass UtilTests(Chai):\n def test_is_timestamp(self):\n timestamp_float = 1563047716.958061\n timestamp_int = int(timestamp_float)\n\n self.assertTrue(util.is_timestamp(timestamp_int))\n self.assertTrue(util.is_timestamp(timestamp_float))\n\n self.assertFalse(util.is_timestamp(str(timestamp_int)))\n self.assertFalse(util.is_timestamp(str(timestamp_float)))\n self.assertFalse(util.is_timestamp(True))\n self.assertFalse(util.is_timestamp(False))\n\n full_datetime = \"2019-06-23T13:12:42\"\n self.assertFalse(util.is_timestamp(full_datetime))\n\n overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_float)\n\n overflow_timestamp_int = int(overflow_timestamp_float)\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_int)\n\n## Instruction:\nReplace hard coded timestamp with time.time()\n## Code After:\nimport time\n\nfrom chai import Chai\n\nfrom arrow import util\n\n\nclass UtilTests(Chai):\n def test_is_timestamp(self):\n timestamp_float = time.time()\n timestamp_int = int(timestamp_float)\n\n self.assertTrue(util.is_timestamp(timestamp_int))\n self.assertTrue(util.is_timestamp(timestamp_float))\n\n self.assertFalse(util.is_timestamp(str(timestamp_int)))\n self.assertFalse(util.is_timestamp(str(timestamp_float)))\n self.assertFalse(util.is_timestamp(True))\n self.assertFalse(util.is_timestamp(False))\n\n full_datetime = \"2019-06-23T13:12:42\"\n self.assertFalse(util.is_timestamp(full_datetime))\n\n overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_float)\n\n overflow_timestamp_int = int(overflow_timestamp_float)\n with self.assertRaises((OverflowError, ValueError)):\n util.is_timestamp(overflow_timestamp_int)\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\nimport time\n\nfrom chai import Chai\n\nfrom arrow import util\n\n\n// ... modified code ... \n\n\n\nclass UtilTests(Chai):\n def test_is_timestamp(self):\n timestamp_float = time.time()\n timestamp_int = int(timestamp_float)\n\n self.assertTrue(util.is_timestamp(timestamp_int))\n\n\n// ... rest of the code ..."}}},{"rowIdx":1579,"cells":{"commit":{"kind":"string","value":"4208538a2b7c5f2280f67520a73bd87b74de26dd"},"old_file":{"kind":"string","value":"scripts/getsent.py"},"new_file":{"kind":"string","value":"scripts/getsent.py"},"old_contents":{"kind":"string","value":"\nimport sys\nimport depio\n\nsentnum = int(sys.argv[2])\nfnames = [sys.argv[1]]\nfor fname in fnames:\n\tsents = list(depio.depread(fname))\n\ti=0\n\tout = open(\"%d.%s\" % (sentnum,fname),'w')\n\tfor outl in sents[sentnum]:\n\t\tout.write('\\t'.join(outl) + '\\n')\n\tbreak\n\tout.close()\n"},"new_contents":{"kind":"string","value":"\nimport sys\nimport depio\n\nsentnum = int(sys.argv[2])\nfnames = [sys.argv[1]]\nfor fname in fnames:\n\tsents = list(depio.depread(fname))\n\ti=0\n\tout = open(\"%d.%s\" % (sentnum,fname),'w')\n\tfor outl in sents[sentnum]:\n\t\tout.write('\\t'.join(outl) + '\\n')\n\tout.write('\\n')\n\tout.close()\n"},"subject":{"kind":"string","value":"Fix script to output new line at end of file"},"message":{"kind":"string","value":"Fix script to output new line at end of file\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"habeanf/yap,habeanf/yap"},"ndiff":{"kind":"string","value":" \n import sys\n import depio\n \n sentnum = int(sys.argv[2])\n fnames = [sys.argv[1]]\n for fname in fnames:\n \tsents = list(depio.depread(fname))\n \ti=0\n \tout = open(\"%d.%s\" % (sentnum,fname),'w')\n \tfor outl in sents[sentnum]:\n \t\tout.write('\\t'.join(outl) + '\\n')\n- \tbreak\n+ \tout.write('\\n')\n \tout.close()\n "},"instruction":{"kind":"string","value":"Fix script to output new line at end of file"},"content":{"kind":"string","value":"## Code Before:\n\nimport sys\nimport depio\n\nsentnum = int(sys.argv[2])\nfnames = [sys.argv[1]]\nfor fname in fnames:\n\tsents = list(depio.depread(fname))\n\ti=0\n\tout = open(\"%d.%s\" % (sentnum,fname),'w')\n\tfor outl in sents[sentnum]:\n\t\tout.write('\\t'.join(outl) + '\\n')\n\tbreak\n\tout.close()\n\n## Instruction:\nFix script to output new line at end of file\n## Code After:\n\nimport sys\nimport depio\n\nsentnum = int(sys.argv[2])\nfnames = [sys.argv[1]]\nfor fname in fnames:\n\tsents = list(depio.depread(fname))\n\ti=0\n\tout = open(\"%d.%s\" % (sentnum,fname),'w')\n\tfor outl in sents[sentnum]:\n\t\tout.write('\\t'.join(outl) + '\\n')\n\tout.write('\\n')\n\tout.close()\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\n\tout = open(\"%d.%s\" % (sentnum,fname),'w')\n\tfor outl in sents[sentnum]:\n\t\tout.write('\\t'.join(outl) + '\\n')\n\tout.write('\\n')\n\tout.close()\n\n\n# ... rest of the code ..."}}},{"rowIdx":1580,"cells":{"commit":{"kind":"string","value":"ce67500ec566784f6f8883e1ffcaef6ad768d810"},"old_file":{"kind":"string","value":"2018/05/solve.py"},"new_file":{"kind":"string","value":"2018/05/solve.py"},"old_contents":{"kind":"string","value":"data = open(\"input.txt\").read().strip()\n\nimport re\nfrom collections import Counter\n\ndef solve1(data):\n prevData = None\n while data != prevData:\n prevData = data\n for a,b in zip(data, data[1:]):\n if (a != b and a == b.lower()) or (a != b and a.lower() == b):\n data = data.replace(a+b, \"\")\n break\n\n return len(data)\n\nprint(solve1(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve1(data))\n\ndef solve2(data):\n min_len = len(data)\n min_chr = None\n for c in 'abcdefghijklmnopqrstubwxyz':\n d = data.replace(c, \"\").replace(c.upper(), \"\")\n l = solve1(d)\n if l < min_len:\n min_len = l\n min_chr = c\n return min_len\n\nprint(solve2(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve2(data))\n"},"new_contents":{"kind":"string","value":"data = open(\"input.txt\").read().strip()\n\nimport re\nimport string\nfrom collections import Counter\n\ndef solve1(data):\n prevData = None\n while data != prevData:\n prevData = data\n for a,b in zip(data, data[1:]):\n if (a != b and a == b.lower()) or (a != b and a.lower() == b):\n data = data.replace(a+b, \"\")\n break\n\n return len(data)\n\nprint(solve1(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve1(data))\n\ndef solve2(data):\n min_len = len(data)\n for c in string.ascii_lowercase:\n d = data.replace(c, \"\").replace(c.upper(), \"\")\n l = solve1(d)\n if l < min_len:\n min_len = l\n return min_len\n\nprint(solve2(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve2(data))\n"},"subject":{"kind":"string","value":"Fix bug with omitting v"},"message":{"kind":"string","value":"Fix bug with omitting v\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"lamperi/aoc,lamperi/aoc,lamperi/aoc,lamperi/aoc,lamperi/aoc"},"ndiff":{"kind":"string","value":" data = open(\"input.txt\").read().strip()\n \n import re\n+ import string\n from collections import Counter\n \n def solve1(data):\n prevData = None\n while data != prevData:\n prevData = data\n for a,b in zip(data, data[1:]):\n if (a != b and a == b.lower()) or (a != b and a.lower() == b):\n data = data.replace(a+b, \"\")\n break\n \n return len(data)\n \n print(solve1(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n \n print(solve1(data))\n \n def solve2(data):\n min_len = len(data)\n+ for c in string.ascii_lowercase:\n- min_chr = None\n- for c in 'abcdefghijklmnopqrstubwxyz':\n d = data.replace(c, \"\").replace(c.upper(), \"\")\n l = solve1(d)\n if l < min_len:\n min_len = l\n- min_chr = c\n return min_len\n \n print(solve2(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n \n print(solve2(data))\n "},"instruction":{"kind":"string","value":"Fix bug with omitting v"},"content":{"kind":"string","value":"## Code Before:\ndata = open(\"input.txt\").read().strip()\n\nimport re\nfrom collections import Counter\n\ndef solve1(data):\n prevData = None\n while data != prevData:\n prevData = data\n for a,b in zip(data, data[1:]):\n if (a != b and a == b.lower()) or (a != b and a.lower() == b):\n data = data.replace(a+b, \"\")\n break\n\n return len(data)\n\nprint(solve1(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve1(data))\n\ndef solve2(data):\n min_len = len(data)\n min_chr = None\n for c in 'abcdefghijklmnopqrstubwxyz':\n d = data.replace(c, \"\").replace(c.upper(), \"\")\n l = solve1(d)\n if l < min_len:\n min_len = l\n min_chr = c\n return min_len\n\nprint(solve2(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve2(data))\n\n## Instruction:\nFix bug with omitting v\n## Code After:\ndata = open(\"input.txt\").read().strip()\n\nimport re\nimport string\nfrom collections import Counter\n\ndef solve1(data):\n prevData = None\n while data != prevData:\n prevData = data\n for a,b in zip(data, data[1:]):\n if (a != b and a == b.lower()) or (a != b and a.lower() == b):\n data = data.replace(a+b, \"\")\n break\n\n return len(data)\n\nprint(solve1(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve1(data))\n\ndef solve2(data):\n min_len = len(data)\n for c in string.ascii_lowercase:\n d = data.replace(c, \"\").replace(c.upper(), \"\")\n l = solve1(d)\n if l < min_len:\n min_len = l\n return min_len\n\nprint(solve2(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\nprint(solve2(data))\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\ndata = open(\"input.txt\").read().strip()\n\nimport re\nimport string\nfrom collections import Counter\n\ndef solve1(data):\n\n\n// ... modified code ... \n\n\n\ndef solve2(data):\n min_len = len(data)\n for c in string.ascii_lowercase:\n d = data.replace(c, \"\").replace(c.upper(), \"\")\n l = solve1(d)\n if l < min_len:\n min_len = l\n return min_len\n\nprint(solve2(\"\"\"dabAcCaCBAcCcaDA\"\"\"))\n\n\n// ... rest of the code ..."}}},{"rowIdx":1581,"cells":{"commit":{"kind":"string","value":"406abe34d82aeb7d72d7f4dc96d44d57807f928b"},"old_file":{"kind":"string","value":"devilry/devilry_authenticate/urls.py"},"new_file":{"kind":"string","value":"devilry/devilry_authenticate/urls.py"},"old_contents":{"kind":"string","value":"from devilry.devilry_authenticate.views import CustomLoginView\nfrom django.conf.urls import url\n\nfrom django_cradmin.apps.cradmin_authenticate.views import logout\n\nurlpatterns = [\n url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'),\n url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'),\n]\n"},"new_contents":{"kind":"string","value":"from devilry.devilry_authenticate.views import CustomLoginView\nfrom django.conf.urls import url\n\nfrom django_cradmin.apps.cradmin_authenticate.views import logout\n\nurlpatterns = [\n url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'),\n url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'),\n # url(r'^feide/oauth-successful-login-callback', ...),\n]\n"},"subject":{"kind":"string","value":"Add placeholder for feide oauth callback."},"message":{"kind":"string","value":"authenticate: Add placeholder for feide oauth callback.\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"repos":{"kind":"string","value":"devilry/devilry-django,devilry/devilry-django,devilry/devilry-django,devilry/devilry-django"},"ndiff":{"kind":"string","value":" from devilry.devilry_authenticate.views import CustomLoginView\n from django.conf.urls import url\n \n from django_cradmin.apps.cradmin_authenticate.views import logout\n \n urlpatterns = [\n url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'),\n url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'),\n+ # url(r'^feide/oauth-successful-login-callback', ...),\n ]\n "},"instruction":{"kind":"string","value":"Add placeholder for feide oauth callback."},"content":{"kind":"string","value":"## Code Before:\nfrom devilry.devilry_authenticate.views import CustomLoginView\nfrom django.conf.urls import url\n\nfrom django_cradmin.apps.cradmin_authenticate.views import logout\n\nurlpatterns = [\n url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'),\n url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'),\n]\n\n## Instruction:\nAdd placeholder for feide oauth callback.\n## Code After:\nfrom devilry.devilry_authenticate.views import CustomLoginView\nfrom django.conf.urls import url\n\nfrom django_cradmin.apps.cradmin_authenticate.views import logout\n\nurlpatterns = [\n url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'),\n url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'),\n # url(r'^feide/oauth-successful-login-callback', ...),\n]\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\nurlpatterns = [\n url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'),\n url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'),\n # url(r'^feide/oauth-successful-login-callback', ...),\n]\n\n\n// ... rest of the code ..."}}},{"rowIdx":1582,"cells":{"commit":{"kind":"string","value":"2565df456ecb290f620ce4dadca19c76b0eeb1af"},"old_file":{"kind":"string","value":"widgets/card.py"},"new_file":{"kind":"string","value":"widgets/card.py"},"old_contents":{"kind":"string","value":"\nfrom flask import render_template\n\nfrom cache import cache\nfrom models.person import Person\n\n\n@cache.memoize(24 * 60 * 60)\ndef card(person_or_id, **kwargs):\n\n if isinstance(person_or_id, Person):\n person = person_or_id\n else:\n person = Person.query.filter_by(id=person_or_id).first()\n\n return render_template('widgets/card.html', person=person, **kwargs)\n\n"},"new_contents":{"kind":"string","value":"\nfrom flask import render_template\n\nfrom cache import cache\nfrom models.person import Person\n\n\n@cache.memoize(24 * 60 * 60)\ndef card(person_or_id, detailed=False, small=False):\n\n if isinstance(person_or_id, Person):\n person = person_or_id\n else:\n person = Person.query.filter_by(id=person_or_id).first()\n\n return render_template('widgets/card.html', person=person, detailed=detailed, small=small)\n\n"},"subject":{"kind":"string","value":"Fix a bug in caching"},"message":{"kind":"string","value":"Fix a bug in caching\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"teampopong/pokr.kr,teampopong/pokr.kr,teampopong/pokr.kr,teampopong/pokr.kr"},"ndiff":{"kind":"string","value":" \n from flask import render_template\n \n from cache import cache\n from models.person import Person\n \n \n @cache.memoize(24 * 60 * 60)\n- def card(person_or_id, **kwargs):\n+ def card(person_or_id, detailed=False, small=False):\n \n if isinstance(person_or_id, Person):\n person = person_or_id\n else:\n person = Person.query.filter_by(id=person_or_id).first()\n \n- return render_template('widgets/card.html', person=person, **kwargs)\n+ return render_template('widgets/card.html', person=person, detailed=detailed, small=small)\n \n "},"instruction":{"kind":"string","value":"Fix a bug in caching"},"content":{"kind":"string","value":"## Code Before:\n\nfrom flask import render_template\n\nfrom cache import cache\nfrom models.person import Person\n\n\n@cache.memoize(24 * 60 * 60)\ndef card(person_or_id, **kwargs):\n\n if isinstance(person_or_id, Person):\n person = person_or_id\n else:\n person = Person.query.filter_by(id=person_or_id).first()\n\n return render_template('widgets/card.html', person=person, **kwargs)\n\n\n## Instruction:\nFix a bug in caching\n## Code After:\n\nfrom flask import render_template\n\nfrom cache import cache\nfrom models.person import Person\n\n\n@cache.memoize(24 * 60 * 60)\ndef card(person_or_id, detailed=False, small=False):\n\n if isinstance(person_or_id, Person):\n person = person_or_id\n else:\n person = Person.query.filter_by(id=person_or_id).first()\n\n return render_template('widgets/card.html', person=person, detailed=detailed, small=small)\n\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\n\n\n@cache.memoize(24 * 60 * 60)\ndef card(person_or_id, detailed=False, small=False):\n\n if isinstance(person_or_id, Person):\n person = person_or_id\n\n\n# ... modified code ... \n\n\n else:\n person = Person.query.filter_by(id=person_or_id).first()\n\n return render_template('widgets/card.html', person=person, detailed=detailed, small=small)\n\n\n\n# ... rest of the code ..."}}},{"rowIdx":1583,"cells":{"commit":{"kind":"string","value":"e0d811f5146ba2c97af3da4ac904db4d16b5d9bb"},"old_file":{"kind":"string","value":"python/ctci_big_o.py"},"new_file":{"kind":"string","value":"python/ctci_big_o.py"},"old_contents":{"kind":"string","value":"p = int(input().strip())\nfor a0 in range(p):\n n = int(input().strip())\n"},"new_contents":{"kind":"string","value":"from collections import deque\n\n\nclass Sieve(object):\n\n def __init__(self, upper_bound):\n self.upper_bound = upper_bound + 1\n self.primes = []\n self.populate_primes()\n # print(\"Primes \" + str(self.primes))\n\n def is_prime(self, potential_prime):\n return potential_prime in self.primes\n\n def populate_primes(self,):\n remaining = deque(range(2, self.upper_bound))\n while remaining:\n prime = remaining.popleft()\n self.primes.append(prime)\n for multiple in self.multiples(prime):\n if multiple in remaining:\n remaining.remove(multiple)\n\n def multiples(self, num):\n return range(num, self.upper_bound, num)\n\n\nNUM_CASES = int(input().strip())\nTEST_CASES = []\nfor _ in range(NUM_CASES):\n TEST_CASES.append(int(input().strip()))\n\n# print(\"Max: \" + str(max(TEST_CASES)))\nSIEVE = Sieve(max(TEST_CASES))\nfor test_case in TEST_CASES:\n if SIEVE.is_prime(test_case):\n print(\"Prime\")\n else:\n print(\"Not prime\")\n"},"subject":{"kind":"string","value":"Solve all test cases but 2"},"message":{"kind":"string","value":"Solve all test cases but 2\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank"},"ndiff":{"kind":"string","value":"+ from collections import deque\n- p = int(input().strip())\n- for a0 in range(p):\n- n = int(input().strip())\n \n+ \n+ class Sieve(object):\n+ \n+ def __init__(self, upper_bound):\n+ self.upper_bound = upper_bound + 1\n+ self.primes = []\n+ self.populate_primes()\n+ # print(\"Primes \" + str(self.primes))\n+ \n+ def is_prime(self, potential_prime):\n+ return potential_prime in self.primes\n+ \n+ def populate_primes(self,):\n+ remaining = deque(range(2, self.upper_bound))\n+ while remaining:\n+ prime = remaining.popleft()\n+ self.primes.append(prime)\n+ for multiple in self.multiples(prime):\n+ if multiple in remaining:\n+ remaining.remove(multiple)\n+ \n+ def multiples(self, num):\n+ return range(num, self.upper_bound, num)\n+ \n+ \n+ NUM_CASES = int(input().strip())\n+ TEST_CASES = []\n+ for _ in range(NUM_CASES):\n+ TEST_CASES.append(int(input().strip()))\n+ \n+ # print(\"Max: \" + str(max(TEST_CASES)))\n+ SIEVE = Sieve(max(TEST_CASES))\n+ for test_case in TEST_CASES:\n+ if SIEVE.is_prime(test_case):\n+ print(\"Prime\")\n+ else:\n+ print(\"Not prime\")\n+ "},"instruction":{"kind":"string","value":"Solve all test cases but 2"},"content":{"kind":"string","value":"## Code Before:\np = int(input().strip())\nfor a0 in range(p):\n n = int(input().strip())\n\n## Instruction:\nSolve all test cases but 2\n## Code After:\nfrom collections import deque\n\n\nclass Sieve(object):\n\n def __init__(self, upper_bound):\n self.upper_bound = upper_bound + 1\n self.primes = []\n self.populate_primes()\n # print(\"Primes \" + str(self.primes))\n\n def is_prime(self, potential_prime):\n return potential_prime in self.primes\n\n def populate_primes(self,):\n remaining = deque(range(2, self.upper_bound))\n while remaining:\n prime = remaining.popleft()\n self.primes.append(prime)\n for multiple in self.multiples(prime):\n if multiple in remaining:\n remaining.remove(multiple)\n\n def multiples(self, num):\n return range(num, self.upper_bound, num)\n\n\nNUM_CASES = int(input().strip())\nTEST_CASES = []\nfor _ in range(NUM_CASES):\n TEST_CASES.append(int(input().strip()))\n\n# print(\"Max: \" + str(max(TEST_CASES)))\nSIEVE = Sieve(max(TEST_CASES))\nfor test_case in TEST_CASES:\n if SIEVE.is_prime(test_case):\n print(\"Prime\")\n else:\n print(\"Not prime\")\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\nfrom collections import deque\n\n\nclass Sieve(object):\n\n def __init__(self, upper_bound):\n self.upper_bound = upper_bound + 1\n self.primes = []\n self.populate_primes()\n # print(\"Primes \" + str(self.primes))\n\n def is_prime(self, potential_prime):\n return potential_prime in self.primes\n\n def populate_primes(self,):\n remaining = deque(range(2, self.upper_bound))\n while remaining:\n prime = remaining.popleft()\n self.primes.append(prime)\n for multiple in self.multiples(prime):\n if multiple in remaining:\n remaining.remove(multiple)\n\n def multiples(self, num):\n return range(num, self.upper_bound, num)\n\n\nNUM_CASES = int(input().strip())\nTEST_CASES = []\nfor _ in range(NUM_CASES):\n TEST_CASES.append(int(input().strip()))\n\n# print(\"Max: \" + str(max(TEST_CASES)))\nSIEVE = Sieve(max(TEST_CASES))\nfor test_case in TEST_CASES:\n if SIEVE.is_prime(test_case):\n print(\"Prime\")\n else:\n print(\"Not prime\")\n\n\n# ... rest of the code ..."}}},{"rowIdx":1584,"cells":{"commit":{"kind":"string","value":"b86c53c388c39baee1ddfe3a615cdad20d272055"},"old_file":{"kind":"string","value":"antcolony/util.py"},"new_file":{"kind":"string","value":"antcolony/util.py"},"old_contents":{"kind":"string","value":"import json\n\ndef avg(iterable):\n return sum(iterable) / len(iterable)\n\ndef nice_json_dump(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n"},"new_contents":{"kind":"string","value":"import json\n\ndef avg(iterable):\n sum_ = 0\n element_count = 0\n for element in iterable:\n sum_ += element\n element_count += 1\n return sum_ / element_count\n\ndef nice_json_dump(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n"},"subject":{"kind":"string","value":"Make avg() work with iterators"},"message":{"kind":"string","value":"Make avg() work with iterators\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"repos":{"kind":"string","value":"ppolewicz/ant-colony,ppolewicz/ant-colony"},"ndiff":{"kind":"string","value":" import json\n \n def avg(iterable):\n- return sum(iterable) / len(iterable)\n+ sum_ = 0\n+ element_count = 0\n+ for element in iterable:\n+ sum_ += element\n+ element_count += 1\n+ return sum_ / element_count\n \n def nice_json_dump(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))\n \n "},"instruction":{"kind":"string","value":"Make avg() work with iterators"},"content":{"kind":"string","value":"## Code Before:\nimport json\n\ndef avg(iterable):\n return sum(iterable) / len(iterable)\n\ndef nice_json_dump(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n\n## Instruction:\nMake avg() work with iterators\n## Code After:\nimport json\n\ndef avg(iterable):\n sum_ = 0\n element_count = 0\n for element in iterable:\n sum_ += element\n element_count += 1\n return sum_ / element_count\n\ndef nice_json_dump(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\nimport json\n\ndef avg(iterable):\n sum_ = 0\n element_count = 0\n for element in iterable:\n sum_ += element\n element_count += 1\n return sum_ / element_count\n\ndef nice_json_dump(data, filepath):\n with open(filepath, 'w') as f:\n\n\n ... "}}},{"rowIdx":1585,"cells":{"commit":{"kind":"string","value":"419d2ca4d53e33c58d556b45bcc6910bd28ef91a"},"old_file":{"kind":"string","value":"djangae/apps.py"},"new_file":{"kind":"string","value":"djangae/apps.py"},"old_contents":{"kind":"string","value":"from django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\nclass DjangaeConfig(AppConfig):\n name = 'djangae'\n verbose_name = _(\"Djangae\")\n\n def ready(self):\n from .patches.contenttypes import patch\n patch()\n\n from djangae.db.backends.appengine.caching import reset_context\n from django.core.signals import request_finished, request_started\n\n request_finished.connect(reset_context)\n request_started.connect(reset_context)\n"},"new_contents":{"kind":"string","value":"from django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\nclass DjangaeConfig(AppConfig):\n name = 'djangae'\n verbose_name = _(\"Djangae\")\n\n def ready(self):\n from .patches.contenttypes import patch\n patch()\n\n from djangae.db.backends.appengine.caching import reset_context\n from django.core.signals import request_finished, request_started\n\n request_finished.connect(reset_context, dispatch_uid=\"request_finished_context_reset\")\n request_started.connect(reset_context, dispatch_uid=\"request_started_context_reset\")\n"},"subject":{"kind":"string","value":"Make sure we only connect to the signals onces"},"message":{"kind":"string","value":"Make sure we only connect to the signals onces\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"bsd-3-clause"},"repos":{"kind":"string","value":"kirberich/djangae,asendecka/djangae,asendecka/djangae,SiPiggles/djangae,wangjun/djangae,potatolondon/djangae,kirberich/djangae,SiPiggles/djangae,SiPiggles/djangae,leekchan/djangae,armirusco/djangae,chargrizzle/djangae,trik/djangae,grzes/djangae,armirusco/djangae,jscissr/djangae,trik/djangae,jscissr/djangae,wangjun/djangae,asendecka/djangae,leekchan/djangae,chargrizzle/djangae,wangjun/djangae,grzes/djangae,trik/djangae,potatolondon/djangae,grzes/djangae,jscissr/djangae,chargrizzle/djangae,kirberich/djangae,armirusco/djangae,leekchan/djangae"},"ndiff":{"kind":"string","value":" from django.apps import AppConfig\n from django.utils.translation import ugettext_lazy as _\n \n class DjangaeConfig(AppConfig):\n name = 'djangae'\n verbose_name = _(\"Djangae\")\n \n def ready(self):\n from .patches.contenttypes import patch\n patch()\n \n from djangae.db.backends.appengine.caching import reset_context\n from django.core.signals import request_finished, request_started\n \n- request_finished.connect(reset_context)\n- request_started.connect(reset_context)\n+ request_finished.connect(reset_context, dispatch_uid=\"request_finished_context_reset\")\n+ request_started.connect(reset_context, dispatch_uid=\"request_started_context_reset\")\n "},"instruction":{"kind":"string","value":"Make sure we only connect to the signals onces"},"content":{"kind":"string","value":"## Code Before:\nfrom django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\nclass DjangaeConfig(AppConfig):\n name = 'djangae'\n verbose_name = _(\"Djangae\")\n\n def ready(self):\n from .patches.contenttypes import patch\n patch()\n\n from djangae.db.backends.appengine.caching import reset_context\n from django.core.signals import request_finished, request_started\n\n request_finished.connect(reset_context)\n request_started.connect(reset_context)\n\n## Instruction:\nMake sure we only connect to the signals onces\n## Code After:\nfrom django.apps import AppConfig\nfrom django.utils.translation import ugettext_lazy as _\n\nclass DjangaeConfig(AppConfig):\n name = 'djangae'\n verbose_name = _(\"Djangae\")\n\n def ready(self):\n from .patches.contenttypes import patch\n patch()\n\n from djangae.db.backends.appengine.caching import reset_context\n from django.core.signals import request_finished, request_started\n\n request_finished.connect(reset_context, dispatch_uid=\"request_finished_context_reset\")\n request_started.connect(reset_context, dispatch_uid=\"request_started_context_reset\")\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\n from djangae.db.backends.appengine.caching import reset_context\n from django.core.signals import request_finished, request_started\n\n request_finished.connect(reset_context, dispatch_uid=\"request_finished_context_reset\")\n request_started.connect(reset_context, dispatch_uid=\"request_started_context_reset\")\n\n\n# ... rest of the code ..."}}},{"rowIdx":1586,"cells":{"commit":{"kind":"string","value":"318c98ab5a9710dfdeedc0ee893e87993ac49727"},"old_file":{"kind":"string","value":"robosync/test/test_robosync.py"},"new_file":{"kind":"string","value":"robosync/test/test_robosync.py"},"old_contents":{"kind":"string","value":"import unittest\nimport os\nimport shutil\n\nclass testMirror(unittest.TestCase):\n\n def setUp(self):\n os.mkdir('test_source')\n os.mkdir('test_dest')\n source_dirs = ['dir1', 'dir2', 'dir3']\n filenames = ['file1.txt', 'file2.txt', 'file3.txt']\n contents = ['foobar1', 'foobar2', 'foobar3']\n for d_name, f_name, content, in zip(source_dirs, filenames, contents):\n new_dir = os.path.join('test_source', d_name)\n os.mkdir(new_dir)\n with open(os.path.join('test_source', d_name, f_name), 'w') as f:\n f.write(content)\n\n\n def tearDown(self):\n shutil.rmtree('test_source')\n shutil.rmtree('test_dest')\n\n\n def test_mirror(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n"},"new_contents":{"kind":"string","value":"import unittest\nimport os\nimport shutil\n\nclass testMirror(unittest.TestCase):\n\n def setUp(self):\n os.mkdir('test_source')\n os.mkdir('test_dest')\n source_dirs = ['dir1', 'dir2', 'dir3']\n dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c']\n filenames = ['file1.txt', 'file2.txt', 'file3.txt']\n contents = ['foobar1', 'foobar2', 'foobar3']\n with open('source_file.txt', 'w') as f:\n f.write('\\n'.join(source_dirs))\n\n with open('dest_file.txt', 'w') as f:\n f.write('\\n'.join(dest_dirs))\n\n for d_name, f_name, content, in zip(source_dirs, filenames, contents):\n new_dir = os.path.join('test_source', d_name)\n os.mkdir(new_dir)\n with open(os.path.join('test_source', d_name, f_name), 'w') as f:\n f.write(content)\n\n\n def tearDown(self):\n shutil.rmtree('test_source')\n shutil.rmtree('test_dest')\n os.remove('source_file.txt')\n os.remove('dest_file.txt')\n\n\n def test_mirror(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n"},"subject":{"kind":"string","value":"Add source and destination list to setup and teardown"},"message":{"kind":"string","value":"Add source and destination list to setup and teardown\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"rbn920/robosync"},"ndiff":{"kind":"string","value":" import unittest\n import os\n import shutil\n \n class testMirror(unittest.TestCase):\n \n def setUp(self):\n os.mkdir('test_source')\n os.mkdir('test_dest')\n source_dirs = ['dir1', 'dir2', 'dir3']\n+ dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c']\n filenames = ['file1.txt', 'file2.txt', 'file3.txt']\n contents = ['foobar1', 'foobar2', 'foobar3']\n+ with open('source_file.txt', 'w') as f:\n+ f.write('\\n'.join(source_dirs))\n+ \n+ with open('dest_file.txt', 'w') as f:\n+ f.write('\\n'.join(dest_dirs))\n+ \n for d_name, f_name, content, in zip(source_dirs, filenames, contents):\n new_dir = os.path.join('test_source', d_name)\n os.mkdir(new_dir)\n with open(os.path.join('test_source', d_name, f_name), 'w') as f:\n f.write(content)\n \n \n def tearDown(self):\n shutil.rmtree('test_source')\n shutil.rmtree('test_dest')\n+ os.remove('source_file.txt')\n+ os.remove('dest_file.txt')\n \n \n def test_mirror(self):\n pass\n \n if __name__ == '__main__':\n unittest.main()\n "},"instruction":{"kind":"string","value":"Add source and destination list to setup and teardown"},"content":{"kind":"string","value":"## Code Before:\nimport unittest\nimport os\nimport shutil\n\nclass testMirror(unittest.TestCase):\n\n def setUp(self):\n os.mkdir('test_source')\n os.mkdir('test_dest')\n source_dirs = ['dir1', 'dir2', 'dir3']\n filenames = ['file1.txt', 'file2.txt', 'file3.txt']\n contents = ['foobar1', 'foobar2', 'foobar3']\n for d_name, f_name, content, in zip(source_dirs, filenames, contents):\n new_dir = os.path.join('test_source', d_name)\n os.mkdir(new_dir)\n with open(os.path.join('test_source', d_name, f_name), 'w') as f:\n f.write(content)\n\n\n def tearDown(self):\n shutil.rmtree('test_source')\n shutil.rmtree('test_dest')\n\n\n def test_mirror(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n\n## Instruction:\nAdd source and destination list to setup and teardown\n## Code After:\nimport unittest\nimport os\nimport shutil\n\nclass testMirror(unittest.TestCase):\n\n def setUp(self):\n os.mkdir('test_source')\n os.mkdir('test_dest')\n source_dirs = ['dir1', 'dir2', 'dir3']\n dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c']\n filenames = ['file1.txt', 'file2.txt', 'file3.txt']\n contents = ['foobar1', 'foobar2', 'foobar3']\n with open('source_file.txt', 'w') as f:\n f.write('\\n'.join(source_dirs))\n\n with open('dest_file.txt', 'w') as f:\n f.write('\\n'.join(dest_dirs))\n\n for d_name, f_name, content, in zip(source_dirs, filenames, contents):\n new_dir = os.path.join('test_source', d_name)\n os.mkdir(new_dir)\n with open(os.path.join('test_source', d_name, f_name), 'w') as f:\n f.write(content)\n\n\n def tearDown(self):\n shutil.rmtree('test_source')\n shutil.rmtree('test_dest')\n os.remove('source_file.txt')\n os.remove('dest_file.txt')\n\n\n def test_mirror(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\n os.mkdir('test_source')\n os.mkdir('test_dest')\n source_dirs = ['dir1', 'dir2', 'dir3']\n dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c']\n filenames = ['file1.txt', 'file2.txt', 'file3.txt']\n contents = ['foobar1', 'foobar2', 'foobar3']\n with open('source_file.txt', 'w') as f:\n f.write('\\n'.join(source_dirs))\n\n with open('dest_file.txt', 'w') as f:\n f.write('\\n'.join(dest_dirs))\n\n for d_name, f_name, content, in zip(source_dirs, filenames, contents):\n new_dir = os.path.join('test_source', d_name)\n os.mkdir(new_dir)\n\n\n ... \n\n\n def tearDown(self):\n shutil.rmtree('test_source')\n shutil.rmtree('test_dest')\n os.remove('source_file.txt')\n os.remove('dest_file.txt')\n\n\n def test_mirror(self):\n\n\n ... "}}},{"rowIdx":1587,"cells":{"commit":{"kind":"string","value":"34bf8d82580b83b1e0409db8636877a22203996b"},"old_file":{"kind":"string","value":"cryptex/trade.py"},"new_file":{"kind":"string","value":"cryptex/trade.py"},"old_contents":{"kind":"string","value":"class Trade(object):\n BUY = 0\n SELL = 1\n def __init__(self, trade_id, trade_type, base_currency, counter_currency,\n time, order_id, amount, price, fee=None):\n self.trade_id = trade_id\n self.trade_type = trade_type\n self.base_currency = base_currency\n self.counter_currency = counter_currency\n self.time = time\n self.order_id = order_id\n self.amount = amount\n self.price = price\n self.fee = fee\n\n def __str__(self):\n if self.trade_type == 0:\n ts = 'Buy'\n else:\n ts ='Sell'\n return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)\n"},"new_contents":{"kind":"string","value":"class Trade(object):\n BUY = 0\n SELL = 1\n def __init__(self, trade_id, trade_type, base_currency, counter_currency,\n time, order_id, amount, price, fee=None):\n self.trade_id = trade_id\n self.trade_type = trade_type\n self.base_currency = base_currency\n self.counter_currency = counter_currency\n self.time = time\n self.order_id = order_id\n self.amount = amount\n self.price = price\n self.fee = fee\n\n def __str__(self):\n if self.trade_type == Trade.BUY:\n ts = 'Buy'\n else:\n ts = 'Sell'\n return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)\n"},"subject":{"kind":"string","value":"Remove magic number check in Trade str method"},"message":{"kind":"string","value":"Remove magic number check in Trade str method\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"coink/cryptex"},"ndiff":{"kind":"string","value":" class Trade(object):\n BUY = 0\n SELL = 1\n def __init__(self, trade_id, trade_type, base_currency, counter_currency,\n time, order_id, amount, price, fee=None):\n self.trade_id = trade_id\n self.trade_type = trade_type\n self.base_currency = base_currency\n self.counter_currency = counter_currency\n self.time = time\n self.order_id = order_id\n self.amount = amount\n self.price = price\n self.fee = fee\n \n def __str__(self):\n- if self.trade_type == 0:\n+ if self.trade_type == Trade.BUY:\n ts = 'Buy'\n else:\n- ts ='Sell'\n+ ts = 'Sell'\n return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)\n "},"instruction":{"kind":"string","value":"Remove magic number check in Trade str method"},"content":{"kind":"string","value":"## Code Before:\nclass Trade(object):\n BUY = 0\n SELL = 1\n def __init__(self, trade_id, trade_type, base_currency, counter_currency,\n time, order_id, amount, price, fee=None):\n self.trade_id = trade_id\n self.trade_type = trade_type\n self.base_currency = base_currency\n self.counter_currency = counter_currency\n self.time = time\n self.order_id = order_id\n self.amount = amount\n self.price = price\n self.fee = fee\n\n def __str__(self):\n if self.trade_type == 0:\n ts = 'Buy'\n else:\n ts ='Sell'\n return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)\n\n## Instruction:\nRemove magic number check in Trade str method\n## Code After:\nclass Trade(object):\n BUY = 0\n SELL = 1\n def __init__(self, trade_id, trade_type, base_currency, counter_currency,\n time, order_id, amount, price, fee=None):\n self.trade_id = trade_id\n self.trade_type = trade_type\n self.base_currency = base_currency\n self.counter_currency = counter_currency\n self.time = time\n self.order_id = order_id\n self.amount = amount\n self.price = price\n self.fee = fee\n\n def __str__(self):\n if self.trade_type == Trade.BUY:\n ts = 'Buy'\n else:\n ts = 'Sell'\n return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\n self.fee = fee\n\n def __str__(self):\n if self.trade_type == Trade.BUY:\n ts = 'Buy'\n else:\n ts = 'Sell'\n return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)\n\n\n// ... rest of the code ..."}}},{"rowIdx":1588,"cells":{"commit":{"kind":"string","value":"b2d9b56ceb96718d1f3edc8ec019ca7218e33e7d"},"old_file":{"kind":"string","value":"src/rnaseq_lib/math/__init__.py"},"new_file":{"kind":"string","value":"src/rnaseq_lib/math/__init__.py"},"old_contents":{"kind":"string","value":"import numpy as np\n\n\n# Outlier\ndef iqr_bounds(ys):\n \"\"\"\n Return upper and lower bound for an array of values\n\n Lower bound: Q1 - (IQR * 1.5)\n Upper bound: Q3 + (IQR * 1.5)\n\n :param list ys: List of values to calculate IQR\n :return: Upper and lower bound\n :rtype: tuple(float, float)\n \"\"\"\n quartile_1, quartile_3 = np.percentile(ys, [25, 75])\n iqr = quartile_3 - quartile_1\n lower_bound = quartile_1 - (iqr * 1.5)\n upper_bound = quartile_3 + (iqr * 1.5)\n return upper_bound, lower_bound\n\n\n# Normalization\ndef min_max_normalize(df):\n return (df - df.min()) / (df.max() - df.min())\n\n\ndef mean_normalize(df):\n return (df - df.mean()) / df.std()\n\n\ndef l2norm(x, pad=0.001):\n \"\"\"\n Log2 normalization function\n\n :param float x: Input value\n :param int|float pad: Pad value (to handle zeros)\n :return: log2(x+1) normalized value\n :rtype: float\n \"\"\"\n return np.log2(x + pad)\n"},"new_contents":{"kind":"string","value":"import numpy as np\n\n\n# Outlier\ndef iqr_bounds(ys):\n \"\"\"\n Return upper and lower bound for an array of values\n\n Lower bound: Q1 - (IQR * 1.5)\n Upper bound: Q3 + (IQR * 1.5)\n\n :param list ys: List of values to calculate IQR\n :return: Upper and lower bound\n :rtype: tuple(float, float)\n \"\"\"\n quartile_1, quartile_3 = np.percentile(ys, [25, 75])\n iqr = quartile_3 - quartile_1\n lower_bound = quartile_1 - (iqr * 1.5)\n upper_bound = quartile_3 + (iqr * 1.5)\n return upper_bound, lower_bound\n\n\n# Normalization\ndef min_max_normalize(df):\n return (df - df.min()) / (df.max() - df.min())\n\n\ndef mean_normalize(df):\n return (df - df.mean()) / df.std()\n\n\ndef softmax(df):\n \"\"\"\n Normalizes columns to sum to 1\n\n :param pd.DataFrame df: Dataframe to normalize\n :return: Normalized DataFrame\n :rtype: pd.DataFrame\n \"\"\"\n return df.divide(df.sum())\n\n\ndef l2norm(x, pad=0.001):\n \"\"\"\n Log2 normalization function\n\n :param float x: Input value\n :param int|float pad: Pad value (to handle zeros)\n :return: log2(x+1) normalized value\n :rtype: float\n \"\"\"\n return np.log2(x + pad)\n"},"subject":{"kind":"string","value":"Add docstring for softmax normalization function"},"message":{"kind":"string","value":"Add docstring for softmax normalization function\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"jvivian/rnaseq-lib,jvivian/rnaseq-lib"},"ndiff":{"kind":"string","value":" import numpy as np\n \n \n # Outlier\n def iqr_bounds(ys):\n \"\"\"\n Return upper and lower bound for an array of values\n \n Lower bound: Q1 - (IQR * 1.5)\n Upper bound: Q3 + (IQR * 1.5)\n \n :param list ys: List of values to calculate IQR\n :return: Upper and lower bound\n :rtype: tuple(float, float)\n \"\"\"\n quartile_1, quartile_3 = np.percentile(ys, [25, 75])\n iqr = quartile_3 - quartile_1\n lower_bound = quartile_1 - (iqr * 1.5)\n upper_bound = quartile_3 + (iqr * 1.5)\n return upper_bound, lower_bound\n \n \n # Normalization\n def min_max_normalize(df):\n return (df - df.min()) / (df.max() - df.min())\n \n \n def mean_normalize(df):\n return (df - df.mean()) / df.std()\n \n \n+ def softmax(df):\n+ \"\"\"\n+ Normalizes columns to sum to 1\n+ \n+ :param pd.DataFrame df: Dataframe to normalize\n+ :return: Normalized DataFrame\n+ :rtype: pd.DataFrame\n+ \"\"\"\n+ return df.divide(df.sum())\n+ \n+ \n def l2norm(x, pad=0.001):\n \"\"\"\n Log2 normalization function\n \n :param float x: Input value\n :param int|float pad: Pad value (to handle zeros)\n :return: log2(x+1) normalized value\n :rtype: float\n \"\"\"\n return np.log2(x + pad)\n "},"instruction":{"kind":"string","value":"Add docstring for softmax normalization function"},"content":{"kind":"string","value":"## Code Before:\nimport numpy as np\n\n\n# Outlier\ndef iqr_bounds(ys):\n \"\"\"\n Return upper and lower bound for an array of values\n\n Lower bound: Q1 - (IQR * 1.5)\n Upper bound: Q3 + (IQR * 1.5)\n\n :param list ys: List of values to calculate IQR\n :return: Upper and lower bound\n :rtype: tuple(float, float)\n \"\"\"\n quartile_1, quartile_3 = np.percentile(ys, [25, 75])\n iqr = quartile_3 - quartile_1\n lower_bound = quartile_1 - (iqr * 1.5)\n upper_bound = quartile_3 + (iqr * 1.5)\n return upper_bound, lower_bound\n\n\n# Normalization\ndef min_max_normalize(df):\n return (df - df.min()) / (df.max() - df.min())\n\n\ndef mean_normalize(df):\n return (df - df.mean()) / df.std()\n\n\ndef l2norm(x, pad=0.001):\n \"\"\"\n Log2 normalization function\n\n :param float x: Input value\n :param int|float pad: Pad value (to handle zeros)\n :return: log2(x+1) normalized value\n :rtype: float\n \"\"\"\n return np.log2(x + pad)\n\n## Instruction:\nAdd docstring for softmax normalization function\n## Code After:\nimport numpy as np\n\n\n# Outlier\ndef iqr_bounds(ys):\n \"\"\"\n Return upper and lower bound for an array of values\n\n Lower bound: Q1 - (IQR * 1.5)\n Upper bound: Q3 + (IQR * 1.5)\n\n :param list ys: List of values to calculate IQR\n :return: Upper and lower bound\n :rtype: tuple(float, float)\n \"\"\"\n quartile_1, quartile_3 = np.percentile(ys, [25, 75])\n iqr = quartile_3 - quartile_1\n lower_bound = quartile_1 - (iqr * 1.5)\n upper_bound = quartile_3 + (iqr * 1.5)\n return upper_bound, lower_bound\n\n\n# Normalization\ndef min_max_normalize(df):\n return (df - df.min()) / (df.max() - df.min())\n\n\ndef mean_normalize(df):\n return (df - df.mean()) / df.std()\n\n\ndef softmax(df):\n \"\"\"\n Normalizes columns to sum to 1\n\n :param pd.DataFrame df: Dataframe to normalize\n :return: Normalized DataFrame\n :rtype: pd.DataFrame\n \"\"\"\n return df.divide(df.sum())\n\n\ndef l2norm(x, pad=0.001):\n \"\"\"\n Log2 normalization function\n\n :param float x: Input value\n :param int|float pad: Pad value (to handle zeros)\n :return: log2(x+1) normalized value\n :rtype: float\n \"\"\"\n return np.log2(x + pad)\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\n return (df - df.mean()) / df.std()\n\n\ndef softmax(df):\n \"\"\"\n Normalizes columns to sum to 1\n\n :param pd.DataFrame df: Dataframe to normalize\n :return: Normalized DataFrame\n :rtype: pd.DataFrame\n \"\"\"\n return df.divide(df.sum())\n\n\ndef l2norm(x, pad=0.001):\n \"\"\"\n Log2 normalization function\n\n\n ... "}}},{"rowIdx":1589,"cells":{"commit":{"kind":"string","value":"b4bdd8e20b82f8016030037712094f257af9221f"},"old_file":{"kind":"string","value":"cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py"},"new_file":{"kind":"string","value":"cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py"},"old_contents":{"kind":"string","value":"\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import MetaData, String, Table\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = Column('provider_location', String(255))\n snapshots.create_column(provider_location)\n\n\ndef downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = snapshots.columns.provider_location\n provider_location.drop()\n"},"new_contents":{"kind":"string","value":"\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import MetaData, String, Table\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = Column('provider_location', String(255))\n snapshots.create_column(provider_location)\n snapshots.update().values(provider_location=None).execute()\n\n\ndef downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = snapshots.columns.provider_location\n snapshots.drop_column(provider_location)\n"},"subject":{"kind":"string","value":"Fix provider_location column add for PSQL"},"message":{"kind":"string","value":"Fix provider_location column add for PSQL\n\nMigration 006 (commit 690cae58e6bbac5758ea2f7b60774c797d28fba5)\ndidn't work properly for postgres,\nthis patch corrects the upgrade by ensuring the execute\nis performed and the value is initialized to None.\n\nSince we haven't released a milestone etc with this migration in the\ncode it should be safe to just fix it here and submit.\n\nChange-Id: I10a09aed3470c35c8ebbe22f29aa511592167c35\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"nexusriot/cinder,github-borat/cinder,mahak/cinder,CloudServer/cinder,eharney/cinder,spring-week-topos/cinder-week,blueboxgroup/cinder,potsmaster/cinder,julianwang/cinder,github-borat/cinder,Datera/cinder,j-griffith/cinder,cloudbau/cinder,cloudbase/cinder,redhat-openstack/cinder,NeCTAR-RC/cinder,rakeshmi/cinder,abusse/cinder,winndows/cinder,abusse/cinder,dims/cinder,rickerc/cinder_audit,petrutlucian94/cinder,duhzecca/cinder,julianwang/cinder,ntt-sic/cinder,maelnor/cinder,apporc/cinder,nikesh-mahalka/cinder,tlakshman26/cinder-new-branch,phenoxim/cinder,nikesh-mahalka/cinder,JioCloud/cinder,rickerc/cinder_audit,winndows/cinder,Akrog/cinder,Paul-Ezell/cinder-1,Hybrid-Cloud/cinder,Paul-Ezell/cinder-1,alex8866/cinder,tlakshman26/cinder-https-changes,tlakshman26/cinder-bug-fix-volume-conversion-full,inkerra/cinder,dims/cinder,hguemar/cinder,scottdangelo/RemoveVolumeMangerLocks,j-griffith/cinder,leilihh/cinder,Thingee/cinder,takeshineshiro/cinder,Datera/cinder,bswartz/cinder,nexusriot/cinder,phenoxim/cinder,NetApp/cinder,tlakshman26/cinder-new-branch,openstack/cinder,Thingee/cinder,bswartz/cinder,mahak/cinder,hguemar/cinder,Thingee/cinder,ntt-sic/cinder,leilihh/cinder,saeki-masaki/cinder,blueboxgroup/cinder,ge0rgi/cinder,alex8866/cinder,scality/cinder,spring-week-topos/cinder-week,maelnor/cinder,petrutlucian94/cinder,potsmaster/cinder,Akrog/cinder,scottdangelo/RemoveVolumeMangerLocks,rakeshmi/cinder,Nexenta/cinder,redhat-openstack/cinder,duhzecca/cinder,Nexenta/cinder,NetApp/cinder,Accelerite/cinder,openstack/cinder,eharney/cinder,manojhirway/ExistingImagesOnNFS,JioCloud/cinder,manojhirway/ExistingImagesOnNFS,sasukeh/cinder,saeki-masaki/cinder,NeCTAR-RC/cinder,cloudbase/cinder,Accelerite/cinder,sasukeh/cinder,takeshineshiro/cinder,Hybrid-Cloud/cinder,CloudServer/cinder,tlakshman26/cinder-https-changes,inkerra/cinder,scality/cinder,tlakshman26/cinder-bug-fix-volume-conversion-full,cloudbau/cinder,apporc/cinder"},"ndiff":{"kind":"string","value":" \n \n from sqlalchemy import Column\n from sqlalchemy import MetaData, String, Table\n \n \n def upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n \n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = Column('provider_location', String(255))\n snapshots.create_column(provider_location)\n+ snapshots.update().values(provider_location=None).execute()\n \n \n def downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n \n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = snapshots.columns.provider_location\n- provider_location.drop()\n+ snapshots.drop_column(provider_location)\n "},"instruction":{"kind":"string","value":"Fix provider_location column add for PSQL"},"content":{"kind":"string","value":"## Code Before:\n\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import MetaData, String, Table\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = Column('provider_location', String(255))\n snapshots.create_column(provider_location)\n\n\ndef downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = snapshots.columns.provider_location\n provider_location.drop()\n\n## Instruction:\nFix provider_location column add for PSQL\n## Code After:\n\n\nfrom sqlalchemy import Column\nfrom sqlalchemy import MetaData, String, Table\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = Column('provider_location', String(255))\n snapshots.create_column(provider_location)\n snapshots.update().values(provider_location=None).execute()\n\n\ndef downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = snapshots.columns.provider_location\n snapshots.drop_column(provider_location)\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = Column('provider_location', String(255))\n snapshots.create_column(provider_location)\n snapshots.update().values(provider_location=None).execute()\n\n\ndef downgrade(migrate_engine):\n\n\n ... \n\n\n\n snapshots = Table('snapshots', meta, autoload=True)\n provider_location = snapshots.columns.provider_location\n snapshots.drop_column(provider_location)\n\n\n ... "}}},{"rowIdx":1590,"cells":{"commit":{"kind":"string","value":"71df45002746b162e04a125403cad390accb949e"},"old_file":{"kind":"string","value":"backend/main.py"},"new_file":{"kind":"string","value":"backend/main.py"},"old_contents":{"kind":"string","value":"import logging\n\nfrom firebase import firebase\nfrom flask import Flask, jsonify, request\nimport flask_cors\nfrom google.appengine.ext import ndb\nimport google.auth.transport.requests\nimport google.oauth2.id_token\nimport requests_toolbelt.adapters.appengine\n\nrequests_toolbelt.adapters.appengine.monkeypatch()\nHTTP_REQUEST = google.auth.transport.requests.Request()\n\napp = Flask(__name__)\nfirebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', None)\nflask_cors.CORS(app)\n\n@app.route('/')\ndef index():\n return \"

Welcome To Google HVZ (backend)!

\"\n\n\n@app.route('/test', methods=['GET'])\ndef get_testdata():\n\n testdata = firebase.get('testdata', None)\n\n return jsonify(testdata)\n"},"new_contents":{"kind":"string","value":"import logging\n\nfrom firebase import firebase\nfrom flask import Flask, jsonify, request\nimport flask_cors\nfrom google.appengine.ext import ndb\nimport google.auth.transport.requests\nimport google.oauth2.id_token\nimport requests_toolbelt.adapters.appengine\n\nrequests_toolbelt.adapters.appengine.monkeypatch()\nHTTP_REQUEST = google.auth.transport.requests.Request()\n\n# Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database\nFIREBASE_SECRET = \"\"\nFIREBASE_EMAIL = \"\"\n\napp = Flask(__name__)\nauth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True)\nfirebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth)\nflask_cors.CORS(app)\n\n@app.route('/')\ndef index():\n return \"

Welcome To Google HVZ (backend)!

\"\n\n\n@app.route('/test', methods=['GET'])\ndef get_testdata():\n\n testdata = firebase.get('testdata', None)\n\n return jsonify(testdata)\n"},"subject":{"kind":"string","value":"Add proper authentication for db (without actual key)."},"message":{"kind":"string","value":"Add proper authentication for db (without actual key).\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz"},"ndiff":{"kind":"string","value":" import logging\n \n from firebase import firebase\n from flask import Flask, jsonify, request\n import flask_cors\n from google.appengine.ext import ndb\n import google.auth.transport.requests\n import google.oauth2.id_token\n import requests_toolbelt.adapters.appengine\n \n requests_toolbelt.adapters.appengine.monkeypatch()\n HTTP_REQUEST = google.auth.transport.requests.Request()\n \n+ # Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database\n+ FIREBASE_SECRET = \"\"\n+ FIREBASE_EMAIL = \"\"\n+ \n app = Flask(__name__)\n+ auth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True)\n- firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', None)\n+ firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth)\n flask_cors.CORS(app)\n \n @app.route('/')\n def index():\n return \"

Welcome To Google HVZ (backend)!

\"\n \n \n @app.route('/test', methods=['GET'])\n def get_testdata():\n \n testdata = firebase.get('testdata', None)\n \n return jsonify(testdata)\n "},"instruction":{"kind":"string","value":"Add proper authentication for db (without actual key)."},"content":{"kind":"string","value":"## Code Before:\nimport logging\n\nfrom firebase import firebase\nfrom flask import Flask, jsonify, request\nimport flask_cors\nfrom google.appengine.ext import ndb\nimport google.auth.transport.requests\nimport google.oauth2.id_token\nimport requests_toolbelt.adapters.appengine\n\nrequests_toolbelt.adapters.appengine.monkeypatch()\nHTTP_REQUEST = google.auth.transport.requests.Request()\n\napp = Flask(__name__)\nfirebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', None)\nflask_cors.CORS(app)\n\n@app.route('/')\ndef index():\n return \"

Welcome To Google HVZ (backend)!

\"\n\n\n@app.route('/test', methods=['GET'])\ndef get_testdata():\n\n testdata = firebase.get('testdata', None)\n\n return jsonify(testdata)\n\n## Instruction:\nAdd proper authentication for db (without actual key).\n## Code After:\nimport logging\n\nfrom firebase import firebase\nfrom flask import Flask, jsonify, request\nimport flask_cors\nfrom google.appengine.ext import ndb\nimport google.auth.transport.requests\nimport google.oauth2.id_token\nimport requests_toolbelt.adapters.appengine\n\nrequests_toolbelt.adapters.appengine.monkeypatch()\nHTTP_REQUEST = google.auth.transport.requests.Request()\n\n# Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database\nFIREBASE_SECRET = \"\"\nFIREBASE_EMAIL = \"\"\n\napp = Flask(__name__)\nauth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True)\nfirebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth)\nflask_cors.CORS(app)\n\n@app.route('/')\ndef index():\n return \"

Welcome To Google HVZ (backend)!

\"\n\n\n@app.route('/test', methods=['GET'])\ndef get_testdata():\n\n testdata = firebase.get('testdata', None)\n\n return jsonify(testdata)\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\nrequests_toolbelt.adapters.appengine.monkeypatch()\nHTTP_REQUEST = google.auth.transport.requests.Request()\n\n# Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database\nFIREBASE_SECRET = \"\"\nFIREBASE_EMAIL = \"\"\n\napp = Flask(__name__)\nauth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True)\nfirebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth)\nflask_cors.CORS(app)\n\n@app.route('/')\n\n\n# ... rest of the code ..."}}},{"rowIdx":1591,"cells":{"commit":{"kind":"string","value":"483ba69bca57899054270cb24c41b0d2c01e7ff0"},"old_file":{"kind":"string","value":"opentreemap/stormwater/models.py"},"new_file":{"kind":"string","value":"opentreemap/stormwater/models.py"},"old_contents":{"kind":"string","value":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom django.contrib.gis.db import models\n\nfrom treemap.models import MapFeature\n\n\nclass PolygonalMapFeature(MapFeature):\n area_field_name = 'polygon'\n skip_detail_form = True\n\n polygon = models.MultiPolygonField(srid=3857)\n\n\nclass Bioswale(PolygonalMapFeature):\n pass\n"},"new_contents":{"kind":"string","value":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom django.contrib.gis.db import models\n\nfrom treemap.models import MapFeature\n\n\nclass PolygonalMapFeature(MapFeature):\n area_field_name = 'polygon'\n skip_detail_form = True\n\n polygon = models.MultiPolygonField(srid=3857)\n\n\nclass Bioswale(PolygonalMapFeature):\n collection_udf_defaults = {\n 'Stewardship': [\n {'name': 'Action',\n 'choices': ['Watered',\n 'Pruned',\n 'Mulched, Had Compost Added, or Soil Amended',\n 'Cleared of Trash or Debris'],\n 'type': 'choice'},\n {'type': 'date',\n 'name': 'Date'}],\n }\n"},"subject":{"kind":"string","value":"Add placeholder defaults for bioswale stewardship"},"message":{"kind":"string","value":"Add placeholder defaults for bioswale stewardship\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"agpl-3.0"},"repos":{"kind":"string","value":"clever-crow-consulting/otm-core,clever-crow-consulting/otm-core,recklessromeo/otm-core,maurizi/otm-core,recklessromeo/otm-core,RickMohr/otm-core,RickMohr/otm-core,RickMohr/otm-core,clever-crow-consulting/otm-core,clever-crow-consulting/otm-core,RickMohr/otm-core,recklessromeo/otm-core,maurizi/otm-core,maurizi/otm-core,maurizi/otm-core,recklessromeo/otm-core"},"ndiff":{"kind":"string","value":" from __future__ import print_function\n from __future__ import unicode_literals\n from __future__ import division\n \n from django.contrib.gis.db import models\n \n from treemap.models import MapFeature\n \n \n class PolygonalMapFeature(MapFeature):\n area_field_name = 'polygon'\n skip_detail_form = True\n \n polygon = models.MultiPolygonField(srid=3857)\n \n \n class Bioswale(PolygonalMapFeature):\n- pass\n+ collection_udf_defaults = {\n+ 'Stewardship': [\n+ {'name': 'Action',\n+ 'choices': ['Watered',\n+ 'Pruned',\n+ 'Mulched, Had Compost Added, or Soil Amended',\n+ 'Cleared of Trash or Debris'],\n+ 'type': 'choice'},\n+ {'type': 'date',\n+ 'name': 'Date'}],\n+ }\n "},"instruction":{"kind":"string","value":"Add placeholder defaults for bioswale stewardship"},"content":{"kind":"string","value":"## Code Before:\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom django.contrib.gis.db import models\n\nfrom treemap.models import MapFeature\n\n\nclass PolygonalMapFeature(MapFeature):\n area_field_name = 'polygon'\n skip_detail_form = True\n\n polygon = models.MultiPolygonField(srid=3857)\n\n\nclass Bioswale(PolygonalMapFeature):\n pass\n\n## Instruction:\nAdd placeholder defaults for bioswale stewardship\n## Code After:\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom django.contrib.gis.db import models\n\nfrom treemap.models import MapFeature\n\n\nclass PolygonalMapFeature(MapFeature):\n area_field_name = 'polygon'\n skip_detail_form = True\n\n polygon = models.MultiPolygonField(srid=3857)\n\n\nclass Bioswale(PolygonalMapFeature):\n collection_udf_defaults = {\n 'Stewardship': [\n {'name': 'Action',\n 'choices': ['Watered',\n 'Pruned',\n 'Mulched, Had Compost Added, or Soil Amended',\n 'Cleared of Trash or Debris'],\n 'type': 'choice'},\n {'type': 'date',\n 'name': 'Date'}],\n }\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\n\n\nclass Bioswale(PolygonalMapFeature):\n collection_udf_defaults = {\n 'Stewardship': [\n {'name': 'Action',\n 'choices': ['Watered',\n 'Pruned',\n 'Mulched, Had Compost Added, or Soil Amended',\n 'Cleared of Trash or Debris'],\n 'type': 'choice'},\n {'type': 'date',\n 'name': 'Date'}],\n }\n\n\n ... "}}},{"rowIdx":1592,"cells":{"commit":{"kind":"string","value":"073dd8529c95f44d7d250508dd10b8ffc8208926"},"old_file":{"kind":"string","value":"two_factor/migrations/0003_auto_20150817_1733.py"},"new_file":{"kind":"string","value":"two_factor/migrations/0003_auto_20150817_1733.py"},"old_contents":{"kind":"string","value":"from __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport two_factor.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('two_factor', '0002_auto_20150110_0810'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='phonedevice',\n name='number',\n field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'),\n ),\n ]\n"},"new_contents":{"kind":"string","value":"from __future__ import unicode_literals\nimport logging\n\nfrom django.db import models, migrations\nimport phonenumbers\nimport two_factor.models\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate_phone_numbers(apps, schema_editor):\n PhoneDevice = apps.get_model(\"two_factor\", \"PhoneDevice\")\n for device in PhoneDevice.objects.all():\n try:\n number = phonenumbers.parse(device.number)\n if not phonenumbers.is_valid_number(number):\n logger.info(\"User '%s' has an invalid phone number '%s'.\" % (device.user.username, device.number))\n device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164)\n device.save()\n except phonenumbers.NumberParseException as e:\n # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere,\n # so do log a warning.\n logger.warning(\"User '%s' has an invalid phone number '%s': %s. Please resolve this issue, \"\n \"as it might result in errors.\" % (device.user.username, device.number, e))\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('two_factor', '0002_auto_20150110_0810'),\n ]\n\n operations = [\n migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None),\n migrations.AlterField(\n model_name='phonedevice',\n name='number',\n field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'),\n ),\n ]\n"},"subject":{"kind":"string","value":"Migrate phone numbers to E.164 format"},"message":{"kind":"string","value":"Migrate phone numbers to E.164 format\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"koleror/django-two-factor-auth,Bouke/django-two-factor-auth,koleror/django-two-factor-auth,Bouke/django-two-factor-auth"},"ndiff":{"kind":"string","value":" from __future__ import unicode_literals\n+ import logging\n \n from django.db import models, migrations\n+ import phonenumbers\n import two_factor.models\n+ \n+ logger = logging.getLogger(__name__)\n+ \n+ \n+ def migrate_phone_numbers(apps, schema_editor):\n+ PhoneDevice = apps.get_model(\"two_factor\", \"PhoneDevice\")\n+ for device in PhoneDevice.objects.all():\n+ try:\n+ number = phonenumbers.parse(device.number)\n+ if not phonenumbers.is_valid_number(number):\n+ logger.info(\"User '%s' has an invalid phone number '%s'.\" % (device.user.username, device.number))\n+ device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164)\n+ device.save()\n+ except phonenumbers.NumberParseException as e:\n+ # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere,\n+ # so do log a warning.\n+ logger.warning(\"User '%s' has an invalid phone number '%s': %s. Please resolve this issue, \"\n+ \"as it might result in errors.\" % (device.user.username, device.number, e))\n \n \n class Migration(migrations.Migration):\n \n dependencies = [\n ('two_factor', '0002_auto_20150110_0810'),\n ]\n \n operations = [\n+ migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None),\n migrations.AlterField(\n model_name='phonedevice',\n name='number',\n field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'),\n ),\n ]\n "},"instruction":{"kind":"string","value":"Migrate phone numbers to E.164 format"},"content":{"kind":"string","value":"## Code Before:\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport two_factor.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('two_factor', '0002_auto_20150110_0810'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='phonedevice',\n name='number',\n field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'),\n ),\n ]\n\n## Instruction:\nMigrate phone numbers to E.164 format\n## Code After:\nfrom __future__ import unicode_literals\nimport logging\n\nfrom django.db import models, migrations\nimport phonenumbers\nimport two_factor.models\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate_phone_numbers(apps, schema_editor):\n PhoneDevice = apps.get_model(\"two_factor\", \"PhoneDevice\")\n for device in PhoneDevice.objects.all():\n try:\n number = phonenumbers.parse(device.number)\n if not phonenumbers.is_valid_number(number):\n logger.info(\"User '%s' has an invalid phone number '%s'.\" % (device.user.username, device.number))\n device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164)\n device.save()\n except phonenumbers.NumberParseException as e:\n # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere,\n # so do log a warning.\n logger.warning(\"User '%s' has an invalid phone number '%s': %s. Please resolve this issue, \"\n \"as it might result in errors.\" % (device.user.username, device.number, e))\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('two_factor', '0002_auto_20150110_0810'),\n ]\n\n operations = [\n migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None),\n migrations.AlterField(\n model_name='phonedevice',\n name='number',\n field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'),\n ),\n ]\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\nfrom __future__ import unicode_literals\nimport logging\n\nfrom django.db import models, migrations\nimport phonenumbers\nimport two_factor.models\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate_phone_numbers(apps, schema_editor):\n PhoneDevice = apps.get_model(\"two_factor\", \"PhoneDevice\")\n for device in PhoneDevice.objects.all():\n try:\n number = phonenumbers.parse(device.number)\n if not phonenumbers.is_valid_number(number):\n logger.info(\"User '%s' has an invalid phone number '%s'.\" % (device.user.username, device.number))\n device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164)\n device.save()\n except phonenumbers.NumberParseException as e:\n # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere,\n # so do log a warning.\n logger.warning(\"User '%s' has an invalid phone number '%s': %s. Please resolve this issue, \"\n \"as it might result in errors.\" % (device.user.username, device.number, e))\n\n\nclass Migration(migrations.Migration):\n\n\n ... \n\n\n ]\n\n operations = [\n migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None),\n migrations.AlterField(\n model_name='phonedevice',\n name='number',\n\n\n ... "}}},{"rowIdx":1593,"cells":{"commit":{"kind":"string","value":"5933f9ef0ff7af0fd85a7dbe6578eefe9b8f7cdf"},"old_file":{"kind":"string","value":"seqcluster/create_report.py"},"new_file":{"kind":"string","value":"seqcluster/create_report.py"},"old_contents":{"kind":"string","value":"import os\nimport shutil\nimport logging\n\nfrom bcbio import install\ninstall._set_matplotlib_default_backend()\nimport matplotlib\nmatplotlib.use('Agg', force=True)\n\nfrom libs.read import load_data\nfrom libs.report import make_profile\nfrom libs.utils import safe_dirs\nfrom db import make_database\nimport templates\n\nlogger = logging.getLogger('report')\n\n\ndef report(args):\n \"\"\"\n Create report in html format\n \"\"\"\n logger.info(\"reading sequeces\")\n data = load_data(args.json)\n out_dir = os.path.join(args.out, \"html\")\n safe_dirs(out_dir)\n\n logger.info(\"create profile\")\n data = make_profile(data, out_dir, args)\n logger.info(\"create database\")\n make_database(data, \"seqcluster.db\", args.out)\n\n path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__)))\n css_template = os.path.join(path_template, \"info.css\")\n js_template = os.path.join(path_template, \"jquery.tablesorter.min.js\")\n css = os.path.join(out_dir, \"info.css\")\n js = os.path.join(out_dir, \"jquery.tablesorter.min.js\")\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n logger.info(\"Done\")\n"},"new_contents":{"kind":"string","value":"import os\nimport shutil\nimport logging\n\nfrom bcbio import install\ninstall._set_matplotlib_default_backend()\nimport matplotlib\nmatplotlib.use('Agg', force=True)\n\nfrom libs.read import load_data\nfrom libs.report import make_profile\nfrom libs.utils import safe_dirs\nfrom db import make_database\nimport templates\n\nlogger = logging.getLogger('report')\n\n\ndef report(args):\n \"\"\"\n Create report in html format\n \"\"\"\n logger.info(\"reading sequeces\")\n data = load_data(args.json)\n out_dir = os.path.join(args.out, \"html\")\n safe_dirs(out_dir)\n\n logger.info(\"create profile\")\n data = make_profile(data, out_dir, args)\n logger.info(\"create database\")\n make_database(data, \"seqcluster.db\", args.out)\n\n path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__)))\n css_template = os.path.join(path_template, \"info.css\")\n js_template = os.path.join(path_template, \"jquery.tablesorter.min.js\")\n css = os.path.join(out_dir, \"info.css\")\n js = os.path.join(out_dir, \"jquery.tablesorter.min.js\")\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n logger.info(\"Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.\")\n"},"subject":{"kind":"string","value":"Add message with link to seqclusterViz"},"message":{"kind":"string","value":"Add message with link to seqclusterViz\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster"},"ndiff":{"kind":"string","value":" import os\n import shutil\n import logging\n \n from bcbio import install\n install._set_matplotlib_default_backend()\n import matplotlib\n matplotlib.use('Agg', force=True)\n \n from libs.read import load_data\n from libs.report import make_profile\n from libs.utils import safe_dirs\n from db import make_database\n import templates\n \n logger = logging.getLogger('report')\n \n \n def report(args):\n \"\"\"\n Create report in html format\n \"\"\"\n logger.info(\"reading sequeces\")\n data = load_data(args.json)\n out_dir = os.path.join(args.out, \"html\")\n safe_dirs(out_dir)\n \n logger.info(\"create profile\")\n data = make_profile(data, out_dir, args)\n logger.info(\"create database\")\n make_database(data, \"seqcluster.db\", args.out)\n \n path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__)))\n css_template = os.path.join(path_template, \"info.css\")\n js_template = os.path.join(path_template, \"jquery.tablesorter.min.js\")\n css = os.path.join(out_dir, \"info.css\")\n js = os.path.join(out_dir, \"jquery.tablesorter.min.js\")\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n- logger.info(\"Done\")\n+ logger.info(\"Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.\")\n "},"instruction":{"kind":"string","value":"Add message with link to seqclusterViz"},"content":{"kind":"string","value":"## Code Before:\nimport os\nimport shutil\nimport logging\n\nfrom bcbio import install\ninstall._set_matplotlib_default_backend()\nimport matplotlib\nmatplotlib.use('Agg', force=True)\n\nfrom libs.read import load_data\nfrom libs.report import make_profile\nfrom libs.utils import safe_dirs\nfrom db import make_database\nimport templates\n\nlogger = logging.getLogger('report')\n\n\ndef report(args):\n \"\"\"\n Create report in html format\n \"\"\"\n logger.info(\"reading sequeces\")\n data = load_data(args.json)\n out_dir = os.path.join(args.out, \"html\")\n safe_dirs(out_dir)\n\n logger.info(\"create profile\")\n data = make_profile(data, out_dir, args)\n logger.info(\"create database\")\n make_database(data, \"seqcluster.db\", args.out)\n\n path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__)))\n css_template = os.path.join(path_template, \"info.css\")\n js_template = os.path.join(path_template, \"jquery.tablesorter.min.js\")\n css = os.path.join(out_dir, \"info.css\")\n js = os.path.join(out_dir, \"jquery.tablesorter.min.js\")\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n logger.info(\"Done\")\n\n## Instruction:\nAdd message with link to seqclusterViz\n## Code After:\nimport os\nimport shutil\nimport logging\n\nfrom bcbio import install\ninstall._set_matplotlib_default_backend()\nimport matplotlib\nmatplotlib.use('Agg', force=True)\n\nfrom libs.read import load_data\nfrom libs.report import make_profile\nfrom libs.utils import safe_dirs\nfrom db import make_database\nimport templates\n\nlogger = logging.getLogger('report')\n\n\ndef report(args):\n \"\"\"\n Create report in html format\n \"\"\"\n logger.info(\"reading sequeces\")\n data = load_data(args.json)\n out_dir = os.path.join(args.out, \"html\")\n safe_dirs(out_dir)\n\n logger.info(\"create profile\")\n data = make_profile(data, out_dir, args)\n logger.info(\"create database\")\n make_database(data, \"seqcluster.db\", args.out)\n\n path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__)))\n css_template = os.path.join(path_template, \"info.css\")\n js_template = os.path.join(path_template, \"jquery.tablesorter.min.js\")\n css = os.path.join(out_dir, \"info.css\")\n js = os.path.join(out_dir, \"jquery.tablesorter.min.js\")\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n logger.info(\"Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.\")\n"},"fuzzy_diff":{"kind":"string","value":" ... \n\n\n if not os.path.exists(css):\n shutil.copy(css_template, css)\n shutil.copy(js_template, js)\n logger.info(\"Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.\")\n\n\n ... "}}},{"rowIdx":1594,"cells":{"commit":{"kind":"string","value":"715dcb62966b5c80544ed9eee79a6c69d3b9d927"},"old_file":{"kind":"string","value":"blog/posts/models.py"},"new_file":{"kind":"string","value":"blog/posts/models.py"},"old_contents":{"kind":"string","value":"from django.db import models\n\nclass Post(models.Model):\n body = models.TextField()\n title = models.CharField(max_length=50)\n display_title = models.CharField(max_length=50)\n publication_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.title\n\nclass Comment(models.Model):\n text = models.TextField()\n author = models.CharField(max_length=100)\n date = models.DateTimeField(auto_now_add=True)\n post = models.ForeignKey(Post)\n\n def __unicode__(self):\n return self.author + u\"'s comment on \" + self.post.__unicode__()\n\n"},"new_contents":{"kind":"string","value":"from django.db import models\n\nclass Post(models.Model):\n body = models.TextField()\n title = models.CharField(max_length=50)\n publication_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.title\n\nclass Comment(models.Model):\n text = models.TextField()\n author = models.CharField(max_length=100)\n date = models.DateTimeField(auto_now_add=True)\n post = models.ForeignKey(Post)\n\n def __unicode__(self):\n return self.author + u\"'s comment on \" + self.post.__unicode__()\n\n"},"subject":{"kind":"string","value":"Remove display_title field from Post model."},"message":{"kind":"string","value":"Remove display_title field from Post model.\n\nIt wasn't being used anyway.\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"Lukasa/minimalog"},"ndiff":{"kind":"string","value":" from django.db import models\n \n class Post(models.Model):\n body = models.TextField()\n title = models.CharField(max_length=50)\n- display_title = models.CharField(max_length=50)\n publication_date = models.DateTimeField(auto_now_add=True)\n \n def __unicode__(self):\n return self.title\n \n class Comment(models.Model):\n text = models.TextField()\n author = models.CharField(max_length=100)\n date = models.DateTimeField(auto_now_add=True)\n post = models.ForeignKey(Post)\n \n def __unicode__(self):\n return self.author + u\"'s comment on \" + self.post.__unicode__()\n \n "},"instruction":{"kind":"string","value":"Remove display_title field from Post model."},"content":{"kind":"string","value":"## Code Before:\nfrom django.db import models\n\nclass Post(models.Model):\n body = models.TextField()\n title = models.CharField(max_length=50)\n display_title = models.CharField(max_length=50)\n publication_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.title\n\nclass Comment(models.Model):\n text = models.TextField()\n author = models.CharField(max_length=100)\n date = models.DateTimeField(auto_now_add=True)\n post = models.ForeignKey(Post)\n\n def __unicode__(self):\n return self.author + u\"'s comment on \" + self.post.__unicode__()\n\n\n## Instruction:\nRemove display_title field from Post model.\n## Code After:\nfrom django.db import models\n\nclass Post(models.Model):\n body = models.TextField()\n title = models.CharField(max_length=50)\n publication_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.title\n\nclass Comment(models.Model):\n text = models.TextField()\n author = models.CharField(max_length=100)\n date = models.DateTimeField(auto_now_add=True)\n post = models.ForeignKey(Post)\n\n def __unicode__(self):\n return self.author + u\"'s comment on \" + self.post.__unicode__()\n\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\nclass Post(models.Model):\n body = models.TextField()\n title = models.CharField(max_length=50)\n publication_date = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n\n\n// ... rest of the code ..."}}},{"rowIdx":1595,"cells":{"commit":{"kind":"string","value":"2adc021a520baa356c46ad1316893c1cd96f3147"},"old_file":{"kind":"string","value":"knights/lexer.py"},"new_file":{"kind":"string","value":"knights/lexer.py"},"old_contents":{"kind":"string","value":"from enum import Enum\nimport re\n\nToken = Enum('Token', 'load comment text var block',)\n\ntag_re = re.compile(\n '|'.join([\n r'{\\!\\s*(?P.+?)\\s*\\!}',\n r'{%\\s*(?P.+?)\\s*%}',\n r'{{\\s*(?P.+?)\\s*}}',\n r'{#\\s*(?P.+?)\\s*#}'\n ]),\n re.DOTALL\n)\n\n\ndef tokenise(template):\n '''A generator which yields (type, content) pairs'''\n upto = 0\n # XXX Track line numbers and update nodes, so we can annotate the code\n for m in tag_re.finditer(template):\n start, end = m.span()\n if upto < start:\n yield (Token.text, template[upto:start])\n upto = end\n load, tag, var, comment = m.groups()\n if load is not None:\n yield (Token.load, load)\n elif tag is not None:\n yield (Token.block, tag)\n elif var is not None:\n yield (Token.var, var)\n else:\n yield (Token.comment, comment)\n if upto < len(template):\n yield (Token.text, template[upto:])\n"},"new_contents":{"kind":"string","value":"from enum import Enum\nimport re\n\nTokenType = Enum('Token', 'load comment text var block',)\n\n\ntag_re = re.compile(\n '|'.join([\n r'{\\!\\s*(?P.+?)\\s*\\!}',\n r'{%\\s*(?P.+?)\\s*%}',\n r'{{\\s*(?P.+?)\\s*}}',\n r'{#\\s*(?P.+?)\\s*#}'\n ]),\n re.DOTALL\n)\n\n\nclass Token:\n def __init__(self, mode, token, lineno=None):\n self.mode = mode\n self.token = token\n self.lineno = lineno\n\n\ndef tokenise(template):\n '''A generator which yields Token instances'''\n upto = 0\n # XXX Track line numbers and update nodes, so we can annotate the code\n for m in tag_re.finditer(template):\n start, end = m.span()\n lineno = template.count('\\n', 0, start)\n if upto < start:\n yield Token(TokenType.text, template[upto:start], lineno)\n upto = end\n load, tag, var, comment = m.groups()\n if load is not None:\n yield Token(TokenType.load, load, lineno)\n elif tag is not None:\n yield Token(TokenType.block, tag, lineno)\n elif var is not None:\n yield Token(TokenType.var, var, lineno)\n else:\n yield Token(TokenType.comment, comment, lineno)\n if upto < len(template):\n yield Token(TokenType.text, template[upto:], lineno)\n"},"subject":{"kind":"string","value":"Rework Lexer to use Token object"},"message":{"kind":"string","value":"Rework Lexer to use Token object\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"funkybob/knights-templater,funkybob/knights-templater"},"ndiff":{"kind":"string","value":" from enum import Enum\n import re\n \n- Token = Enum('Token', 'load comment text var block',)\n+ TokenType = Enum('Token', 'load comment text var block',)\n+ \n \n tag_re = re.compile(\n '|'.join([\n r'{\\!\\s*(?P.+?)\\s*\\!}',\n r'{%\\s*(?P.+?)\\s*%}',\n r'{{\\s*(?P.+?)\\s*}}',\n r'{#\\s*(?P.+?)\\s*#}'\n ]),\n re.DOTALL\n )\n \n \n+ class Token:\n+ def __init__(self, mode, token, lineno=None):\n+ self.mode = mode\n+ self.token = token\n+ self.lineno = lineno\n+ \n+ \n def tokenise(template):\n- '''A generator which yields (type, content) pairs'''\n+ '''A generator which yields Token instances'''\n upto = 0\n # XXX Track line numbers and update nodes, so we can annotate the code\n for m in tag_re.finditer(template):\n start, end = m.span()\n+ lineno = template.count('\\n', 0, start)\n if upto < start:\n- yield (Token.text, template[upto:start])\n+ yield Token(TokenType.text, template[upto:start], lineno)\n upto = end\n load, tag, var, comment = m.groups()\n if load is not None:\n- yield (Token.load, load)\n+ yield Token(TokenType.load, load, lineno)\n elif tag is not None:\n- yield (Token.block, tag)\n+ yield Token(TokenType.block, tag, lineno)\n elif var is not None:\n- yield (Token.var, var)\n+ yield Token(TokenType.var, var, lineno)\n else:\n- yield (Token.comment, comment)\n+ yield Token(TokenType.comment, comment, lineno)\n if upto < len(template):\n- yield (Token.text, template[upto:])\n+ yield Token(TokenType.text, template[upto:], lineno)\n "},"instruction":{"kind":"string","value":"Rework Lexer to use Token object"},"content":{"kind":"string","value":"## Code Before:\nfrom enum import Enum\nimport re\n\nToken = Enum('Token', 'load comment text var block',)\n\ntag_re = re.compile(\n '|'.join([\n r'{\\!\\s*(?P.+?)\\s*\\!}',\n r'{%\\s*(?P.+?)\\s*%}',\n r'{{\\s*(?P.+?)\\s*}}',\n r'{#\\s*(?P.+?)\\s*#}'\n ]),\n re.DOTALL\n)\n\n\ndef tokenise(template):\n '''A generator which yields (type, content) pairs'''\n upto = 0\n # XXX Track line numbers and update nodes, so we can annotate the code\n for m in tag_re.finditer(template):\n start, end = m.span()\n if upto < start:\n yield (Token.text, template[upto:start])\n upto = end\n load, tag, var, comment = m.groups()\n if load is not None:\n yield (Token.load, load)\n elif tag is not None:\n yield (Token.block, tag)\n elif var is not None:\n yield (Token.var, var)\n else:\n yield (Token.comment, comment)\n if upto < len(template):\n yield (Token.text, template[upto:])\n\n## Instruction:\nRework Lexer to use Token object\n## Code After:\nfrom enum import Enum\nimport re\n\nTokenType = Enum('Token', 'load comment text var block',)\n\n\ntag_re = re.compile(\n '|'.join([\n r'{\\!\\s*(?P.+?)\\s*\\!}',\n r'{%\\s*(?P.+?)\\s*%}',\n r'{{\\s*(?P.+?)\\s*}}',\n r'{#\\s*(?P.+?)\\s*#}'\n ]),\n re.DOTALL\n)\n\n\nclass Token:\n def __init__(self, mode, token, lineno=None):\n self.mode = mode\n self.token = token\n self.lineno = lineno\n\n\ndef tokenise(template):\n '''A generator which yields Token instances'''\n upto = 0\n # XXX Track line numbers and update nodes, so we can annotate the code\n for m in tag_re.finditer(template):\n start, end = m.span()\n lineno = template.count('\\n', 0, start)\n if upto < start:\n yield Token(TokenType.text, template[upto:start], lineno)\n upto = end\n load, tag, var, comment = m.groups()\n if load is not None:\n yield Token(TokenType.load, load, lineno)\n elif tag is not None:\n yield Token(TokenType.block, tag, lineno)\n elif var is not None:\n yield Token(TokenType.var, var, lineno)\n else:\n yield Token(TokenType.comment, comment, lineno)\n if upto < len(template):\n yield Token(TokenType.text, template[upto:], lineno)\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\nfrom enum import Enum\nimport re\n\nTokenType = Enum('Token', 'load comment text var block',)\n\n\ntag_re = re.compile(\n '|'.join([\n\n\n// ... modified code ... \n\n\n)\n\n\nclass Token:\n def __init__(self, mode, token, lineno=None):\n self.mode = mode\n self.token = token\n self.lineno = lineno\n\n\ndef tokenise(template):\n '''A generator which yields Token instances'''\n upto = 0\n # XXX Track line numbers and update nodes, so we can annotate the code\n for m in tag_re.finditer(template):\n start, end = m.span()\n lineno = template.count('\\n', 0, start)\n if upto < start:\n yield Token(TokenType.text, template[upto:start], lineno)\n upto = end\n load, tag, var, comment = m.groups()\n if load is not None:\n yield Token(TokenType.load, load, lineno)\n elif tag is not None:\n yield Token(TokenType.block, tag, lineno)\n elif var is not None:\n yield Token(TokenType.var, var, lineno)\n else:\n yield Token(TokenType.comment, comment, lineno)\n if upto < len(template):\n yield Token(TokenType.text, template[upto:], lineno)\n\n\n// ... rest of the code ..."}}},{"rowIdx":1596,"cells":{"commit":{"kind":"string","value":"7b939076fba1bb11d0ded504bcf10da457b3d092"},"old_file":{"kind":"string","value":"scripts/add_identifiers_to_existing_preprints.py"},"new_file":{"kind":"string","value":"scripts/add_identifiers_to_existing_preprints.py"},"old_contents":{"kind":"string","value":"import logging\nimport time\n\nfrom website.app import init_app\nfrom website.identifiers.utils import get_top_level_domain, request_identifiers_from_ezid\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef add_identifiers_to_preprints():\n from osf.models import PreprintService\n\n preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)\n logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n for preprint in preprints_without_identifiers:\n logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))\n\n ezid_response = request_identifiers_from_ezid(preprint)\n preprint.set_preprint_identifiers(ezid_response)\n preprint.save()\n\n doi = preprint.get_identifier('doi')\n subdomain = get_top_level_domain(preprint.provider.external_url)\n assert subdomain.upper() in doi.value\n assert preprint._id.upper() in doi.value\n\n logger.info('Created DOI {} for Preprint from service {}'.format(doi.value, preprint.provider.name))\n time.sleep(1)\n\n logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n\nif __name__ == '__main__':\n init_app(routes=False)\n add_identifiers_to_preprints()\n"},"new_contents":{"kind":"string","value":"import logging\nimport time\n\nfrom website.app import init_app\nfrom website.identifiers.utils import request_identifiers_from_ezid\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef add_identifiers_to_preprints():\n from osf.models import PreprintService\n\n preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)\n logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n for preprint in preprints_without_identifiers:\n logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))\n\n ezid_response = request_identifiers_from_ezid(preprint)\n preprint.set_preprint_identifiers(ezid_response)\n preprint.save()\n\n doi = preprint.get_identifier('doi')\n assert preprint._id.upper() in doi.value\n\n logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))\n time.sleep(1)\n\n logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n\nif __name__ == '__main__':\n init_app(routes=False)\n add_identifiers_to_preprints()\n"},"subject":{"kind":"string","value":"Remove check for domain in DOI"},"message":{"kind":"string","value":"Remove check for domain in DOI\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"mattclark/osf.io,crcresearch/osf.io,aaxelb/osf.io,saradbowman/osf.io,adlius/osf.io,mattclark/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,sloria/osf.io,mfraezz/osf.io,chrisseto/osf.io,pattisdr/osf.io,sloria/osf.io,adlius/osf.io,felliott/osf.io,cslzchen/osf.io,laurenrevere/osf.io,crcresearch/osf.io,saradbowman/osf.io,sloria/osf.io,felliott/osf.io,binoculars/osf.io,caneruguz/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,crcresearch/osf.io,adlius/osf.io,erinspace/osf.io,aaxelb/osf.io,baylee-d/osf.io,icereval/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,icereval/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,cwisecarver/osf.io,caneruguz/osf.io,cslzchen/osf.io,mfraezz/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,felliott/osf.io,TomBaxter/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,binoculars/osf.io,chennan47/osf.io,erinspace/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,caneruguz/osf.io,chrisseto/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,binoculars/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,erinspace/osf.io,mattclark/osf.io,baylee-d/osf.io,chrisseto/osf.io,laurenrevere/osf.io,cwisecarver/osf.io,felliott/osf.io,cslzchen/osf.io,leb2dg/osf.io"},"ndiff":{"kind":"string","value":" import logging\n import time\n \n from website.app import init_app\n- from website.identifiers.utils import get_top_level_domain, request_identifiers_from_ezid\n+ from website.identifiers.utils import request_identifiers_from_ezid\n \n logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO)\n \n \n def add_identifiers_to_preprints():\n from osf.models import PreprintService\n \n preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)\n logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n \n for preprint in preprints_without_identifiers:\n logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))\n \n ezid_response = request_identifiers_from_ezid(preprint)\n preprint.set_preprint_identifiers(ezid_response)\n preprint.save()\n \n doi = preprint.get_identifier('doi')\n- subdomain = get_top_level_domain(preprint.provider.external_url)\n- assert subdomain.upper() in doi.value\n assert preprint._id.upper() in doi.value\n \n- logger.info('Created DOI {} for Preprint from service {}'.format(doi.value, preprint.provider.name))\n+ logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))\n time.sleep(1)\n \n logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n \n \n if __name__ == '__main__':\n init_app(routes=False)\n add_identifiers_to_preprints()\n "},"instruction":{"kind":"string","value":"Remove check for domain in DOI"},"content":{"kind":"string","value":"## Code Before:\nimport logging\nimport time\n\nfrom website.app import init_app\nfrom website.identifiers.utils import get_top_level_domain, request_identifiers_from_ezid\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef add_identifiers_to_preprints():\n from osf.models import PreprintService\n\n preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)\n logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n for preprint in preprints_without_identifiers:\n logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))\n\n ezid_response = request_identifiers_from_ezid(preprint)\n preprint.set_preprint_identifiers(ezid_response)\n preprint.save()\n\n doi = preprint.get_identifier('doi')\n subdomain = get_top_level_domain(preprint.provider.external_url)\n assert subdomain.upper() in doi.value\n assert preprint._id.upper() in doi.value\n\n logger.info('Created DOI {} for Preprint from service {}'.format(doi.value, preprint.provider.name))\n time.sleep(1)\n\n logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n\nif __name__ == '__main__':\n init_app(routes=False)\n add_identifiers_to_preprints()\n\n## Instruction:\nRemove check for domain in DOI\n## Code After:\nimport logging\nimport time\n\nfrom website.app import init_app\nfrom website.identifiers.utils import request_identifiers_from_ezid\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef add_identifiers_to_preprints():\n from osf.models import PreprintService\n\n preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)\n logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n for preprint in preprints_without_identifiers:\n logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))\n\n ezid_response = request_identifiers_from_ezid(preprint)\n preprint.set_preprint_identifiers(ezid_response)\n preprint.save()\n\n doi = preprint.get_identifier('doi')\n assert preprint._id.upper() in doi.value\n\n logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))\n time.sleep(1)\n\n logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n\nif __name__ == '__main__':\n init_app(routes=False)\n add_identifiers_to_preprints()\n"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\nimport time\n\nfrom website.app import init_app\nfrom website.identifiers.utils import request_identifiers_from_ezid\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\n# ... modified code ... \n\n\n preprint.save()\n\n doi = preprint.get_identifier('doi')\n assert preprint._id.upper() in doi.value\n\n logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))\n time.sleep(1)\n\n logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))\n\n\n# ... rest of the code ..."}}},{"rowIdx":1597,"cells":{"commit":{"kind":"string","value":"3d91950735d8b42e030f6f479a32369804e90ac0"},"old_file":{"kind":"string","value":"gaphas/picklers.py"},"new_file":{"kind":"string","value":"gaphas/picklers.py"},"old_contents":{"kind":"string","value":"\nimport copyreg\nimport types\n\nimport cairo\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\n\n# Allow instancemethod to be pickled:\ndef construct_instancemethod(funcname, self, clazz):\n func = getattr(clazz, funcname)\n return types.MethodType(func, self)\n\n\ndef reduce_instancemethod(im):\n return (\n construct_instancemethod,\n (im.__func__.__name__, im.__self__, im.__self__.__class__),\n )\n\n\ncopyreg.pickle(types.MethodType, reduce_instancemethod, construct_instancemethod)\n\n\n# Allow cairo.Matrix to be pickled:\ndef construct_cairo_matrix(*args):\n return cairo.Matrix(*args)\n\n\ndef reduce_cairo_matrix(m):\n return construct_cairo_matrix, tuple(m)\n\n\ncopyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)\n"},"new_contents":{"kind":"string","value":"\nimport copyreg\nimport types\n\nimport cairo\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\n# Allow cairo.Matrix to be pickled:\ndef construct_cairo_matrix(*args):\n return cairo.Matrix(*args)\n\n\ndef reduce_cairo_matrix(m):\n return construct_cairo_matrix, tuple(m)\n\n\ncopyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)\n"},"subject":{"kind":"string","value":"Remove ununsed pickle code for instance methods"},"message":{"kind":"string","value":"Remove ununsed pickle code for instance methods\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"lgpl-2.1"},"repos":{"kind":"string","value":"amolenaar/gaphas"},"ndiff":{"kind":"string","value":" \n import copyreg\n import types\n \n import cairo\n from future import standard_library\n \n standard_library.install_aliases()\n- \n- \n- # Allow instancemethod to be pickled:\n- def construct_instancemethod(funcname, self, clazz):\n- func = getattr(clazz, funcname)\n- return types.MethodType(func, self)\n- \n- \n- def reduce_instancemethod(im):\n- return (\n- construct_instancemethod,\n- (im.__func__.__name__, im.__self__, im.__self__.__class__),\n- )\n- \n- \n- copyreg.pickle(types.MethodType, reduce_instancemethod, construct_instancemethod)\n- \n \n # Allow cairo.Matrix to be pickled:\n def construct_cairo_matrix(*args):\n return cairo.Matrix(*args)\n \n \n def reduce_cairo_matrix(m):\n return construct_cairo_matrix, tuple(m)\n \n \n copyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)\n "},"instruction":{"kind":"string","value":"Remove ununsed pickle code for instance methods"},"content":{"kind":"string","value":"## Code Before:\n\nimport copyreg\nimport types\n\nimport cairo\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\n\n# Allow instancemethod to be pickled:\ndef construct_instancemethod(funcname, self, clazz):\n func = getattr(clazz, funcname)\n return types.MethodType(func, self)\n\n\ndef reduce_instancemethod(im):\n return (\n construct_instancemethod,\n (im.__func__.__name__, im.__self__, im.__self__.__class__),\n )\n\n\ncopyreg.pickle(types.MethodType, reduce_instancemethod, construct_instancemethod)\n\n\n# Allow cairo.Matrix to be pickled:\ndef construct_cairo_matrix(*args):\n return cairo.Matrix(*args)\n\n\ndef reduce_cairo_matrix(m):\n return construct_cairo_matrix, tuple(m)\n\n\ncopyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)\n\n## Instruction:\nRemove ununsed pickle code for instance methods\n## Code After:\n\nimport copyreg\nimport types\n\nimport cairo\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\n# Allow cairo.Matrix to be pickled:\ndef construct_cairo_matrix(*args):\n return cairo.Matrix(*args)\n\n\ndef reduce_cairo_matrix(m):\n return construct_cairo_matrix, tuple(m)\n\n\ncopyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\nfrom future import standard_library\n\nstandard_library.install_aliases()\n\n# Allow cairo.Matrix to be pickled:\ndef construct_cairo_matrix(*args):\n\n\n// ... rest of the code ..."}}},{"rowIdx":1598,"cells":{"commit":{"kind":"string","value":"f275c8cc020119b52ed01bc6b56946279853d854"},"old_file":{"kind":"string","value":"src/mmw/apps/bigcz/clients/cuahsi/details.py"},"new_file":{"kind":"string","value":"src/mmw/apps/bigcz/clients/cuahsi/details.py"},"old_contents":{"kind":"string","value":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom datetime import date, timedelta\n\nfrom rest_framework.exceptions import ValidationError\n\nDATE_FORMAT = '%m/%d/%Y'\n\n\ndef details(wsdl, site):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_site_info(wsdl, site)\n\n\ndef values(wsdl, site, variable, from_date=None, to_date=None):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not variable:\n raise ValidationError({\n 'error': 'Required argument: variable'})\n\n if not to_date:\n # Set to default value of today\n to_date = date.today().strftime(DATE_FORMAT)\n\n if not from_date:\n # Set to default value of one week ago\n from_date = (date.today() -\n timedelta(days=7)).strftime(DATE_FORMAT)\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_values(wsdl, site, variable, from_date, to_date)\n"},"new_contents":{"kind":"string","value":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom datetime import date, timedelta\n\nfrom rest_framework.exceptions import ValidationError\n\nDATE_FORMAT = '%m/%d/%Y'\n\n\ndef details(wsdl, site):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_site_info(wsdl, site, None)\n\n\ndef values(wsdl, site, variable, from_date=None, to_date=None):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not variable:\n raise ValidationError({\n 'error': 'Required argument: variable'})\n\n if not to_date:\n # Set to default value of today\n to_date = date.today().strftime(DATE_FORMAT)\n\n if not from_date:\n # Set to default value of one week ago\n from_date = (date.today() -\n timedelta(days=7)).strftime(DATE_FORMAT)\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_values(wsdl, site, variable, from_date, to_date, None)\n"},"subject":{"kind":"string","value":"Stop ulmo caching for suds-jurko compliance"},"message":{"kind":"string","value":"Stop ulmo caching for suds-jurko compliance\n\nPreviously we were using ulmo with suds-jurko 0.6, which is\nthe current latest release, but it is 4 years old. Most recent\nwork on suds-jurko has been done on the development branch,\nincluding optimizations to memory use (which we need).\nUnfortunately, the development branch also includes some\nbreaking changes, including one which \"cleans up\" the caching\nmodule: https://bitbucket.org/jurko/suds/commits/6b24afe3206fc648605cc8d19f7c58c605d9df5f?at=default\nThis change renames .setduration() to .__set_duration(),\nwhich is called by ulmo here: https://github.com/emiliom/ulmo/blob/90dbfe31f38a72ea4cee9a52e572cfa8f8484adc/ulmo/cuahsi/wof/core.py#L290\n\nBy explicitly setting the caching to None, we ensure that\nline isn't executed and those errors don't crop up.\n\nThe performance improvements we get from using the development\nbranch of suds-jurko outweigh the benefits of caching for one\nday, since it is unlikely we will be accessing the exact same\ncontent repeatedly.\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"apache-2.0"},"repos":{"kind":"string","value":"WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed"},"ndiff":{"kind":"string","value":" from __future__ import print_function\n from __future__ import unicode_literals\n from __future__ import division\n \n from datetime import date, timedelta\n \n from rest_framework.exceptions import ValidationError\n \n DATE_FORMAT = '%m/%d/%Y'\n \n \n def details(wsdl, site):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n \n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n \n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n \n from ulmo.cuahsi import wof\n- return wof.get_site_info(wsdl, site)\n+ return wof.get_site_info(wsdl, site, None)\n \n \n def values(wsdl, site, variable, from_date=None, to_date=None):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n \n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n \n if not variable:\n raise ValidationError({\n 'error': 'Required argument: variable'})\n \n if not to_date:\n # Set to default value of today\n to_date = date.today().strftime(DATE_FORMAT)\n \n if not from_date:\n # Set to default value of one week ago\n from_date = (date.today() -\n timedelta(days=7)).strftime(DATE_FORMAT)\n \n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n \n from ulmo.cuahsi import wof\n- return wof.get_values(wsdl, site, variable, from_date, to_date)\n+ return wof.get_values(wsdl, site, variable, from_date, to_date, None)\n "},"instruction":{"kind":"string","value":"Stop ulmo caching for suds-jurko compliance"},"content":{"kind":"string","value":"## Code Before:\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom datetime import date, timedelta\n\nfrom rest_framework.exceptions import ValidationError\n\nDATE_FORMAT = '%m/%d/%Y'\n\n\ndef details(wsdl, site):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_site_info(wsdl, site)\n\n\ndef values(wsdl, site, variable, from_date=None, to_date=None):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not variable:\n raise ValidationError({\n 'error': 'Required argument: variable'})\n\n if not to_date:\n # Set to default value of today\n to_date = date.today().strftime(DATE_FORMAT)\n\n if not from_date:\n # Set to default value of one week ago\n from_date = (date.today() -\n timedelta(days=7)).strftime(DATE_FORMAT)\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_values(wsdl, site, variable, from_date, to_date)\n\n## Instruction:\nStop ulmo caching for suds-jurko compliance\n## Code After:\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nfrom datetime import date, timedelta\n\nfrom rest_framework.exceptions import ValidationError\n\nDATE_FORMAT = '%m/%d/%Y'\n\n\ndef details(wsdl, site):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_site_info(wsdl, site, None)\n\n\ndef values(wsdl, site, variable, from_date=None, to_date=None):\n if not wsdl:\n raise ValidationError({\n 'error': 'Required argument: wsdl'})\n\n if not site:\n raise ValidationError({\n 'error': 'Required argument: site'})\n\n if not variable:\n raise ValidationError({\n 'error': 'Required argument: variable'})\n\n if not to_date:\n # Set to default value of today\n to_date = date.today().strftime(DATE_FORMAT)\n\n if not from_date:\n # Set to default value of one week ago\n from_date = (date.today() -\n timedelta(days=7)).strftime(DATE_FORMAT)\n\n if not wsdl.upper().endswith('?WSDL'):\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_values(wsdl, site, variable, from_date, to_date, None)\n"},"fuzzy_diff":{"kind":"string","value":"// ... existing code ... \n\n\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_site_info(wsdl, site, None)\n\n\ndef values(wsdl, site, variable, from_date=None, to_date=None):\n\n\n// ... modified code ... \n\n\n wsdl += '?WSDL'\n\n from ulmo.cuahsi import wof\n return wof.get_values(wsdl, site, variable, from_date, to_date, None)\n\n\n// ... rest of the code ..."}}},{"rowIdx":1599,"cells":{"commit":{"kind":"string","value":"f1c09bc9969cf9d66179baef80b5cbb3d28d5596"},"old_file":{"kind":"string","value":"app/report/views.py"},"new_file":{"kind":"string","value":"app/report/views.py"},"old_contents":{"kind":"string","value":"from flask import render_template\n\nfrom app import app\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/report/')\ndef report():\n pass\n"},"new_contents":{"kind":"string","value":"from flask import flash, g, redirect, render_template, request, url_for\n\nfrom app import app\nfrom vcs.repository import is_valid_github_repository, parse_url_and_get_repo\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/check', methods=['POST'])\ndef check():\n url = request.form['url']\n if not is_valid_github_repository(url):\n flash('Given repository url is not valid')\n return redirect(url_for('index'))\n return redirect(url_for('report', repo_url=url))\n\n\n@app.route('/report/', methods=['GET'])\ndef report(repo_url):\n repo = parse_url_and_get_repo(repo_url)\n if repo is None:\n flash('Given repository does not exists')\n return redirect(url_for('index'))\n results = {}\n # Analysis processing\n return render_template('report/results.html', results=results)"},"subject":{"kind":"string","value":"Create default behaviour for all routers"},"message":{"kind":"string","value":"Create default behaviour for all routers\n"},"lang":{"kind":"string","value":"Python"},"license":{"kind":"string","value":"mit"},"repos":{"kind":"string","value":"mingrammer/pyreportcard,mingrammer/pyreportcard"},"ndiff":{"kind":"string","value":"- from flask import render_template\n+ from flask import flash, g, redirect, render_template, request, url_for\n \n from app import app\n- \n+ from vcs.repository import is_valid_github_repository, parse_url_and_get_repo\n \n @app.route('/')\n def index():\n return render_template('index.html')\n \n \n- @app.route('/report/')\n- def report():\n- pass\n+ @app.route('/about')\n+ def about():\n+ return render_template('about.html')\n \n+ \n+ @app.route('/check', methods=['POST'])\n+ def check():\n+ url = request.form['url']\n+ if not is_valid_github_repository(url):\n+ flash('Given repository url is not valid')\n+ return redirect(url_for('index'))\n+ return redirect(url_for('report', repo_url=url))\n+ \n+ \n+ @app.route('/report/', methods=['GET'])\n+ def report(repo_url):\n+ repo = parse_url_and_get_repo(repo_url)\n+ if repo is None:\n+ flash('Given repository does not exists')\n+ return redirect(url_for('index'))\n+ results = {}\n+ # Analysis processing\n+ return render_template('report/results.html', results=results)"},"instruction":{"kind":"string","value":"Create default behaviour for all routers"},"content":{"kind":"string","value":"## Code Before:\nfrom flask import render_template\n\nfrom app import app\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/report/')\ndef report():\n pass\n\n## Instruction:\nCreate default behaviour for all routers\n## Code After:\nfrom flask import flash, g, redirect, render_template, request, url_for\n\nfrom app import app\nfrom vcs.repository import is_valid_github_repository, parse_url_and_get_repo\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/check', methods=['POST'])\ndef check():\n url = request.form['url']\n if not is_valid_github_repository(url):\n flash('Given repository url is not valid')\n return redirect(url_for('index'))\n return redirect(url_for('report', repo_url=url))\n\n\n@app.route('/report/', methods=['GET'])\ndef report(repo_url):\n repo = parse_url_and_get_repo(repo_url)\n if repo is None:\n flash('Given repository does not exists')\n return redirect(url_for('index'))\n results = {}\n # Analysis processing\n return render_template('report/results.html', results=results)"},"fuzzy_diff":{"kind":"string","value":"# ... existing code ... \n\n\nfrom flask import flash, g, redirect, render_template, request, url_for\n\nfrom app import app\nfrom vcs.repository import is_valid_github_repository, parse_url_and_get_repo\n\n@app.route('/')\ndef index():\n\n\n# ... modified code ... \n\n\n return render_template('index.html')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/check', methods=['POST'])\ndef check():\n url = request.form['url']\n if not is_valid_github_repository(url):\n flash('Given repository url is not valid')\n return redirect(url_for('index'))\n return redirect(url_for('report', repo_url=url))\n\n\n@app.route('/report/', methods=['GET'])\ndef report(repo_url):\n repo = parse_url_and_get_repo(repo_url)\n if repo is None:\n flash('Given repository does not exists')\n return redirect(url_for('index'))\n results = {}\n # Analysis processing\n return render_template('report/results.html', results=results)\n\n\n# ... rest of the code ..."}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":15,"numItemsPerPage":100,"numTotalItems":21467,"offset":1500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODI0NDcwMSwic3ViIjoiL2RhdGFzZXRzL2tzZW5pYXN5Y2gvRWRpdFBhY2tGVC1hcHBseS1mdXp6eS1kaWZmcy1oZXVyaXN0aWNzX2NvbnRleHQtMyIsImV4cCI6MTc1ODI0ODMwMSwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.PSGOvamspqtq8vGOzZIaClO_celDdL5i4ldLxKP6XEJtgfVCGfPC0C6Glgu119YG1Acx0X2hpB3Hd9hg-72tCA","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
10
2.94k
new_contents
stringlengths
21
3.18k
subject
stringlengths
16
444
message
stringlengths
17
2.63k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43k
ndiff
stringlengths
52
3.32k
instruction
stringlengths
16
444
content
stringlengths
133
4.32k
fuzzy_diff
stringlengths
33
3.23k
f6b7a4ec8aa72acfd93e7f85199b251e91ca4465
cherrypy/test/test_refleaks.py
cherrypy/test/test_refleaks.py
"""Tests for refleaks.""" from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading import cherrypy data = object() from cherrypy.test import helper class ReferenceTests(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def index(self, *args, **kwargs): cherrypy.request.thing = data return "Hello world!" cherrypy.tree.mount(Root()) def test_threadlocal_garbage(self): success = [] def getpage(): host = '%s:%s' % (self.interface(), self.PORT) if self.scheme == 'https': c = HTTPSConnection(host) else: c = HTTPConnection(host) try: c.putrequest('GET', '/') c.endheaders() response = c.getresponse() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello world!")) finally: c.close() success.append(True) ITERATIONS = 25 ts = [] for _ in range(ITERATIONS): t = threading.Thread(target=getpage) ts.append(t) t.start() for t in ts: t.join() self.assertEqual(len(success), ITERATIONS)
"""Tests for refleaks.""" import itertools from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading import cherrypy data = object() from cherrypy.test import helper class ReferenceTests(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def index(self, *args, **kwargs): cherrypy.request.thing = data return "Hello world!" cherrypy.tree.mount(Root()) def test_threadlocal_garbage(self): success = itertools.count() def getpage(): host = '%s:%s' % (self.interface(), self.PORT) if self.scheme == 'https': c = HTTPSConnection(host) else: c = HTTPConnection(host) try: c.putrequest('GET', '/') c.endheaders() response = c.getresponse() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello world!")) finally: c.close() next(success) ITERATIONS = 25 ts = [] for _ in range(ITERATIONS): t = threading.Thread(target=getpage) ts.append(t) t.start() for t in ts: t.join() self.assertEqual(next(success), ITERATIONS)
Use a simple counter rather than appending booleans to a list and counting them.
Use a simple counter rather than appending booleans to a list and counting them.
Python
bsd-3-clause
cherrypy/cheroot,Safihre/cherrypy,cherrypy/cherrypy,cherrypy/cherrypy,Safihre/cherrypy
"""Tests for refleaks.""" + + import itertools from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading import cherrypy data = object() from cherrypy.test import helper class ReferenceTests(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def index(self, *args, **kwargs): cherrypy.request.thing = data return "Hello world!" cherrypy.tree.mount(Root()) def test_threadlocal_garbage(self): - success = [] + success = itertools.count() def getpage(): host = '%s:%s' % (self.interface(), self.PORT) if self.scheme == 'https': c = HTTPSConnection(host) else: c = HTTPConnection(host) try: c.putrequest('GET', '/') c.endheaders() response = c.getresponse() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello world!")) finally: c.close() - success.append(True) + next(success) ITERATIONS = 25 ts = [] for _ in range(ITERATIONS): t = threading.Thread(target=getpage) ts.append(t) t.start() for t in ts: t.join() - self.assertEqual(len(success), ITERATIONS) + self.assertEqual(next(success), ITERATIONS)
Use a simple counter rather than appending booleans to a list and counting them.
## Code Before: """Tests for refleaks.""" from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading import cherrypy data = object() from cherrypy.test import helper class ReferenceTests(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def index(self, *args, **kwargs): cherrypy.request.thing = data return "Hello world!" cherrypy.tree.mount(Root()) def test_threadlocal_garbage(self): success = [] def getpage(): host = '%s:%s' % (self.interface(), self.PORT) if self.scheme == 'https': c = HTTPSConnection(host) else: c = HTTPConnection(host) try: c.putrequest('GET', '/') c.endheaders() response = c.getresponse() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello world!")) finally: c.close() success.append(True) ITERATIONS = 25 ts = [] for _ in range(ITERATIONS): t = threading.Thread(target=getpage) ts.append(t) t.start() for t in ts: t.join() self.assertEqual(len(success), ITERATIONS) ## Instruction: Use a simple counter rather than appending booleans to a list and counting them. ## Code After: """Tests for refleaks.""" import itertools from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading import cherrypy data = object() from cherrypy.test import helper class ReferenceTests(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def index(self, *args, **kwargs): cherrypy.request.thing = data return "Hello world!" cherrypy.tree.mount(Root()) def test_threadlocal_garbage(self): success = itertools.count() def getpage(): host = '%s:%s' % (self.interface(), self.PORT) if self.scheme == 'https': c = HTTPSConnection(host) else: c = HTTPConnection(host) try: c.putrequest('GET', '/') c.endheaders() response = c.getresponse() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello world!")) finally: c.close() next(success) ITERATIONS = 25 ts = [] for _ in range(ITERATIONS): t = threading.Thread(target=getpage) ts.append(t) t.start() for t in ts: t.join() self.assertEqual(next(success), ITERATIONS)
# ... existing code ... """Tests for refleaks.""" import itertools from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading # ... modified code ... cherrypy.tree.mount(Root()) def test_threadlocal_garbage(self): success = itertools.count() def getpage(): host = '%s:%s' % (self.interface(), self.PORT) ... self.assertEqual(body, ntob("Hello world!")) finally: c.close() next(success) ITERATIONS = 25 ts = [] ... for t in ts: t.join() self.assertEqual(next(success), ITERATIONS) # ... rest of the code ...
aaaf8ef7433418f7a195c79674db56e03fc58f10
apps/bplan/models.py
apps/bplan/models.py
from django.db import models from adhocracy4.models.base import TimeStampedModel from adhocracy4.modules import models as module_models from apps.extprojects.models import ExternalProject class Bplan(ExternalProject): office_worker_email = models.EmailField() class AnonymousItem(TimeStampedModel): module = models.ForeignKey(module_models.Module, on_delete=models.CASCADE) @property def project(self): return self.module.project class Meta: abstract = True class Statement(AnonymousItem): name = models.CharField(max_length=255) email = models.EmailField(blank=True) statement = models.TextField(max_length=17500) street_number = models.CharField(max_length=255) postal_code_city = models.CharField(max_length=255)
from django.contrib.auth.models import AnonymousUser from django.db import models from adhocracy4.models.base import TimeStampedModel from adhocracy4.modules import models as module_models from apps.extprojects.models import ExternalProject class Bplan(ExternalProject): office_worker_email = models.EmailField() class AnonymousItem(TimeStampedModel): module = models.ForeignKey(module_models.Module, on_delete=models.CASCADE) @property def project(self): return self.module.project @property def creator(self): return AnonymousUser() @creator.setter def creator(self, value): pass class Meta: abstract = True class Statement(AnonymousItem): name = models.CharField(max_length=255) email = models.EmailField(blank=True) statement = models.TextField(max_length=17500) street_number = models.CharField(max_length=255) postal_code_city = models.CharField(max_length=255)
Add mockup creator property to AnonymousItems
Add mockup creator property to AnonymousItems
Python
agpl-3.0
liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin
+ from django.contrib.auth.models import AnonymousUser from django.db import models from adhocracy4.models.base import TimeStampedModel from adhocracy4.modules import models as module_models from apps.extprojects.models import ExternalProject class Bplan(ExternalProject): office_worker_email = models.EmailField() class AnonymousItem(TimeStampedModel): module = models.ForeignKey(module_models.Module, on_delete=models.CASCADE) @property def project(self): return self.module.project + @property + def creator(self): + return AnonymousUser() + + @creator.setter + def creator(self, value): + pass + class Meta: abstract = True class Statement(AnonymousItem): name = models.CharField(max_length=255) email = models.EmailField(blank=True) statement = models.TextField(max_length=17500) street_number = models.CharField(max_length=255) postal_code_city = models.CharField(max_length=255)
Add mockup creator property to AnonymousItems
## Code Before: from django.db import models from adhocracy4.models.base import TimeStampedModel from adhocracy4.modules import models as module_models from apps.extprojects.models import ExternalProject class Bplan(ExternalProject): office_worker_email = models.EmailField() class AnonymousItem(TimeStampedModel): module = models.ForeignKey(module_models.Module, on_delete=models.CASCADE) @property def project(self): return self.module.project class Meta: abstract = True class Statement(AnonymousItem): name = models.CharField(max_length=255) email = models.EmailField(blank=True) statement = models.TextField(max_length=17500) street_number = models.CharField(max_length=255) postal_code_city = models.CharField(max_length=255) ## Instruction: Add mockup creator property to AnonymousItems ## Code After: from django.contrib.auth.models import AnonymousUser from django.db import models from adhocracy4.models.base import TimeStampedModel from adhocracy4.modules import models as module_models from apps.extprojects.models import ExternalProject class Bplan(ExternalProject): office_worker_email = models.EmailField() class AnonymousItem(TimeStampedModel): module = models.ForeignKey(module_models.Module, on_delete=models.CASCADE) @property def project(self): return self.module.project @property def creator(self): return AnonymousUser() @creator.setter def creator(self, value): pass class Meta: abstract = True class Statement(AnonymousItem): name = models.CharField(max_length=255) email = models.EmailField(blank=True) statement = models.TextField(max_length=17500) street_number = models.CharField(max_length=255) postal_code_city = models.CharField(max_length=255)
// ... existing code ... from django.contrib.auth.models import AnonymousUser from django.db import models from adhocracy4.models.base import TimeStampedModel // ... modified code ... def project(self): return self.module.project @property def creator(self): return AnonymousUser() @creator.setter def creator(self, value): pass class Meta: abstract = True // ... rest of the code ...
07c9b76d63714c431a983f0506ff71f19face3bd
astroquery/alma/tests/setup_package.py
astroquery/alma/tests/setup_package.py
import os def get_package_data(): paths = [os.path.join('data', '*.txt')] return {'astroquery.alma.tests': paths}
import os def get_package_data(): paths = [os.path.join('data', '*.txt'), os.path.join('data', '*.xml')] return {'astroquery.alma.tests': paths}
Include xml datafile for alma tests
Include xml datafile for alma tests
Python
bsd-3-clause
imbasimba/astroquery,imbasimba/astroquery,ceb8/astroquery,ceb8/astroquery
import os def get_package_data(): - paths = [os.path.join('data', '*.txt')] + paths = [os.path.join('data', '*.txt'), os.path.join('data', '*.xml')] return {'astroquery.alma.tests': paths}
Include xml datafile for alma tests
## Code Before: import os def get_package_data(): paths = [os.path.join('data', '*.txt')] return {'astroquery.alma.tests': paths} ## Instruction: Include xml datafile for alma tests ## Code After: import os def get_package_data(): paths = [os.path.join('data', '*.txt'), os.path.join('data', '*.xml')] return {'astroquery.alma.tests': paths}
... def get_package_data(): paths = [os.path.join('data', '*.txt'), os.path.join('data', '*.xml')] return {'astroquery.alma.tests': paths} ...
376fcbd76bb2f0de3c738ac66ac5526b6685d18a
plim/extensions.py
plim/extensions.py
from docutils.core import publish_parts import coffeescript from scss import Scss from stylus import Stylus from .util import as_unicode def rst_to_html(source): # This code was taken from http://wiki.python.org/moin/ReStructuredText # You may also be interested in http://www.tele3.cz/jbar/rest/about.html html = publish_parts(source=source, writer_name='html') return html['html_body'] def coffee_to_js(source): return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source)) def scss_to_css(source): css = Scss().compile(source).strip() return as_unicode('<style>{css}</style>').format(css=css) def stylus_to_css(source): compiler = Stylus(plugins={'nib':{}}) return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip())
from docutils.core import publish_parts import coffeescript from scss import Scss from stylus import Stylus from .util import as_unicode def rst_to_html(source): # This code was taken from http://wiki.python.org/moin/ReStructuredText # You may also be interested in http://www.tele3.cz/jbar/rest/about.html html = publish_parts(source=source, writer_name='html') return html['html_body'] def coffee_to_js(source): return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source)) def scss_to_css(source): css = Scss().compile(source).strip() return as_unicode('<style>{css}</style>').format(css=css) def stylus_to_css(source): compiler = Stylus() return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip())
Fix execjs.ProgramError: Error: Cannot find module 'nib' for -stylus
Fix execjs.ProgramError: Error: Cannot find module 'nib' for -stylus
Python
mit
kxxoling/Plim
from docutils.core import publish_parts import coffeescript from scss import Scss from stylus import Stylus from .util import as_unicode def rst_to_html(source): # This code was taken from http://wiki.python.org/moin/ReStructuredText # You may also be interested in http://www.tele3.cz/jbar/rest/about.html html = publish_parts(source=source, writer_name='html') return html['html_body'] def coffee_to_js(source): return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source)) def scss_to_css(source): css = Scss().compile(source).strip() return as_unicode('<style>{css}</style>').format(css=css) def stylus_to_css(source): - compiler = Stylus(plugins={'nib':{}}) + compiler = Stylus() return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip())
Fix execjs.ProgramError: Error: Cannot find module 'nib' for -stylus
## Code Before: from docutils.core import publish_parts import coffeescript from scss import Scss from stylus import Stylus from .util import as_unicode def rst_to_html(source): # This code was taken from http://wiki.python.org/moin/ReStructuredText # You may also be interested in http://www.tele3.cz/jbar/rest/about.html html = publish_parts(source=source, writer_name='html') return html['html_body'] def coffee_to_js(source): return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source)) def scss_to_css(source): css = Scss().compile(source).strip() return as_unicode('<style>{css}</style>').format(css=css) def stylus_to_css(source): compiler = Stylus(plugins={'nib':{}}) return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip()) ## Instruction: Fix execjs.ProgramError: Error: Cannot find module 'nib' for -stylus ## Code After: from docutils.core import publish_parts import coffeescript from scss import Scss from stylus import Stylus from .util import as_unicode def rst_to_html(source): # This code was taken from http://wiki.python.org/moin/ReStructuredText # You may also be interested in http://www.tele3.cz/jbar/rest/about.html html = publish_parts(source=source, writer_name='html') return html['html_body'] def coffee_to_js(source): return as_unicode('<script>{js}</script>').format(js=coffeescript.compile(source)) def scss_to_css(source): css = Scss().compile(source).strip() return as_unicode('<style>{css}</style>').format(css=css) def stylus_to_css(source): compiler = Stylus() return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip())
... def stylus_to_css(source): compiler = Stylus() return as_unicode('<style>{css}</style>').format(css=compiler.compile(source).strip()) ...
785fcdca3c9bfb908444d3b9339457c616761f2c
tests/flights_to_test.py
tests/flights_to_test.py
import unittest import datetime import json import sys sys.path.append('..') import sabre_dev_studio import sabre_dev_studio.sabre_exceptions as sabre_exceptions ''' requires config.json in the same directory for api authentication { "sabre_client_id": -----, "sabre_client_secret": ----- } ''' class TestBasicInstaflights(unittest.TestCase): def read_config(self): raw_data = open('config.json').read() data = json.loads(raw_data) client_secret = data['sabre_client_secret'] client_id = data['sabre_client_id'] return (client_id, client_secret) def setUp(self): # Read from config self.client_id, self.client_secret = self.read_config() self.sds = sabre_dev_studio.SabreDevStudio() self.sds.set_credentials(self.client_id, self.client_secret) self.sds.authenticate() def test_basic_request(self): city = 'YTO' instaf = self.sds.flights_to(city) self.assertIsNotNone(instaf) def test_no_authorization(self): sds = sabre_dev_studio.SabreDevStudio() with self.assertRaises(sabre_exceptions.NotAuthorizedError): resp = sds.flights_to('YTO') if __name__ == '__main__': unittest.main()
import unittest import datetime import json import sys sys.path.append('..') import sabre_dev_studio import sabre_dev_studio.sabre_exceptions as sabre_exceptions ''' requires config.json in the same directory for api authentication { "sabre_client_id": -----, "sabre_client_secret": ----- } ''' class TestBasicFlightsTo(unittest.TestCase): def read_config(self): raw_data = open('config.json').read() data = json.loads(raw_data) client_secret = data['sabre_client_secret'] client_id = data['sabre_client_id'] return (client_id, client_secret) def setUp(self): # Read from config self.client_id, self.client_secret = self.read_config() self.sds = sabre_dev_studio.SabreDevStudio() self.sds.set_credentials(self.client_id, self.client_secret) self.sds.authenticate() def test_basic_request(self): city = 'YTO' flights_to_city = self.sds.flights_to(city) print(flights_to_city) self.assertIsNotNone(flights_to_city) def test_no_authorization(self): sds = sabre_dev_studio.SabreDevStudio() with self.assertRaises(sabre_exceptions.NotAuthorizedError): resp = sds.flights_to('YTO') if __name__ == '__main__': unittest.main()
Change instaflights name in flights_to tests
Change instaflights name in flights_to tests
Python
mit
Jamil/sabre_dev_studio
import unittest import datetime import json import sys sys.path.append('..') import sabre_dev_studio import sabre_dev_studio.sabre_exceptions as sabre_exceptions ''' requires config.json in the same directory for api authentication { "sabre_client_id": -----, "sabre_client_secret": ----- } ''' - class TestBasicInstaflights(unittest.TestCase): + class TestBasicFlightsTo(unittest.TestCase): def read_config(self): raw_data = open('config.json').read() data = json.loads(raw_data) client_secret = data['sabre_client_secret'] client_id = data['sabre_client_id'] return (client_id, client_secret) def setUp(self): # Read from config self.client_id, self.client_secret = self.read_config() self.sds = sabre_dev_studio.SabreDevStudio() self.sds.set_credentials(self.client_id, self.client_secret) self.sds.authenticate() def test_basic_request(self): city = 'YTO' - instaf = self.sds.flights_to(city) + flights_to_city = self.sds.flights_to(city) + print(flights_to_city) - self.assertIsNotNone(instaf) + self.assertIsNotNone(flights_to_city) def test_no_authorization(self): sds = sabre_dev_studio.SabreDevStudio() with self.assertRaises(sabre_exceptions.NotAuthorizedError): resp = sds.flights_to('YTO') if __name__ == '__main__': unittest.main()
Change instaflights name in flights_to tests
## Code Before: import unittest import datetime import json import sys sys.path.append('..') import sabre_dev_studio import sabre_dev_studio.sabre_exceptions as sabre_exceptions ''' requires config.json in the same directory for api authentication { "sabre_client_id": -----, "sabre_client_secret": ----- } ''' class TestBasicInstaflights(unittest.TestCase): def read_config(self): raw_data = open('config.json').read() data = json.loads(raw_data) client_secret = data['sabre_client_secret'] client_id = data['sabre_client_id'] return (client_id, client_secret) def setUp(self): # Read from config self.client_id, self.client_secret = self.read_config() self.sds = sabre_dev_studio.SabreDevStudio() self.sds.set_credentials(self.client_id, self.client_secret) self.sds.authenticate() def test_basic_request(self): city = 'YTO' instaf = self.sds.flights_to(city) self.assertIsNotNone(instaf) def test_no_authorization(self): sds = sabre_dev_studio.SabreDevStudio() with self.assertRaises(sabre_exceptions.NotAuthorizedError): resp = sds.flights_to('YTO') if __name__ == '__main__': unittest.main() ## Instruction: Change instaflights name in flights_to tests ## Code After: import unittest import datetime import json import sys sys.path.append('..') import sabre_dev_studio import sabre_dev_studio.sabre_exceptions as sabre_exceptions ''' requires config.json in the same directory for api authentication { "sabre_client_id": -----, "sabre_client_secret": ----- } ''' class TestBasicFlightsTo(unittest.TestCase): def read_config(self): raw_data = open('config.json').read() data = json.loads(raw_data) client_secret = data['sabre_client_secret'] client_id = data['sabre_client_id'] return (client_id, client_secret) def setUp(self): # Read from config self.client_id, self.client_secret = self.read_config() self.sds = sabre_dev_studio.SabreDevStudio() self.sds.set_credentials(self.client_id, self.client_secret) self.sds.authenticate() def test_basic_request(self): city = 'YTO' flights_to_city = self.sds.flights_to(city) print(flights_to_city) self.assertIsNotNone(flights_to_city) def test_no_authorization(self): sds = sabre_dev_studio.SabreDevStudio() with self.assertRaises(sabre_exceptions.NotAuthorizedError): resp = sds.flights_to('YTO') if __name__ == '__main__': unittest.main()
// ... existing code ... } ''' class TestBasicFlightsTo(unittest.TestCase): def read_config(self): raw_data = open('config.json').read() // ... modified code ... def test_basic_request(self): city = 'YTO' flights_to_city = self.sds.flights_to(city) print(flights_to_city) self.assertIsNotNone(flights_to_city) def test_no_authorization(self): sds = sabre_dev_studio.SabreDevStudio() // ... rest of the code ...
a04f3d167011e6d0e50d6a088f5877769fbedaa2
testfixtures/shop_order.py
testfixtures/shop_order.py
from byceps.services.shop.order.models.order import Order from byceps.services.shop.order.models.orderer import Orderer from byceps.services.shop.order import service from byceps.services.shop.order.transfer.models import PaymentMethod ANY_ORDER_NUMBER = 'AEC-03-B00074' def create_orderer(user): return Orderer( user.id, user.detail.first_names, user.detail.last_name, user.detail.country, user.detail.zip_code, user.detail.city, user.detail.street) def create_order(shop_id, placed_by, *, order_number=ANY_ORDER_NUMBER, payment_method=PaymentMethod.bank_transfer, shipping_required=False): order = Order( shop_id, order_number, placed_by.id, placed_by.detail.first_names, placed_by.detail.last_name, placed_by.detail.country, placed_by.detail.zip_code, placed_by.detail.city, placed_by.detail.street, payment_method, ) order.shipping_required = shipping_required return order def create_order_item(order, article, quantity): return service._add_article_to_order(order, article, quantity)
from byceps.services.shop.order.models.order import Order from byceps.services.shop.order.models.orderer import Orderer from byceps.services.shop.order.transfer.models import PaymentMethod ANY_ORDER_NUMBER = 'AEC-03-B00074' def create_orderer(user): return Orderer( user.id, user.detail.first_names, user.detail.last_name, user.detail.country, user.detail.zip_code, user.detail.city, user.detail.street) def create_order(shop_id, placed_by, *, order_number=ANY_ORDER_NUMBER, payment_method=PaymentMethod.bank_transfer, shipping_required=False): order = Order( shop_id, order_number, placed_by.id, placed_by.detail.first_names, placed_by.detail.last_name, placed_by.detail.country, placed_by.detail.zip_code, placed_by.detail.city, placed_by.detail.street, payment_method, ) order.shipping_required = shipping_required return order
Remove unused test fixture `create_order_item`
Remove unused test fixture `create_order_item`
Python
bsd-3-clause
homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
from byceps.services.shop.order.models.order import Order from byceps.services.shop.order.models.orderer import Orderer - from byceps.services.shop.order import service from byceps.services.shop.order.transfer.models import PaymentMethod ANY_ORDER_NUMBER = 'AEC-03-B00074' def create_orderer(user): return Orderer( user.id, user.detail.first_names, user.detail.last_name, user.detail.country, user.detail.zip_code, user.detail.city, user.detail.street) def create_order(shop_id, placed_by, *, order_number=ANY_ORDER_NUMBER, payment_method=PaymentMethod.bank_transfer, shipping_required=False): order = Order( shop_id, order_number, placed_by.id, placed_by.detail.first_names, placed_by.detail.last_name, placed_by.detail.country, placed_by.detail.zip_code, placed_by.detail.city, placed_by.detail.street, payment_method, ) order.shipping_required = shipping_required return order - - def create_order_item(order, article, quantity): - return service._add_article_to_order(order, article, quantity) -
Remove unused test fixture `create_order_item`
## Code Before: from byceps.services.shop.order.models.order import Order from byceps.services.shop.order.models.orderer import Orderer from byceps.services.shop.order import service from byceps.services.shop.order.transfer.models import PaymentMethod ANY_ORDER_NUMBER = 'AEC-03-B00074' def create_orderer(user): return Orderer( user.id, user.detail.first_names, user.detail.last_name, user.detail.country, user.detail.zip_code, user.detail.city, user.detail.street) def create_order(shop_id, placed_by, *, order_number=ANY_ORDER_NUMBER, payment_method=PaymentMethod.bank_transfer, shipping_required=False): order = Order( shop_id, order_number, placed_by.id, placed_by.detail.first_names, placed_by.detail.last_name, placed_by.detail.country, placed_by.detail.zip_code, placed_by.detail.city, placed_by.detail.street, payment_method, ) order.shipping_required = shipping_required return order def create_order_item(order, article, quantity): return service._add_article_to_order(order, article, quantity) ## Instruction: Remove unused test fixture `create_order_item` ## Code After: from byceps.services.shop.order.models.order import Order from byceps.services.shop.order.models.orderer import Orderer from byceps.services.shop.order.transfer.models import PaymentMethod ANY_ORDER_NUMBER = 'AEC-03-B00074' def create_orderer(user): return Orderer( user.id, user.detail.first_names, user.detail.last_name, user.detail.country, user.detail.zip_code, user.detail.city, user.detail.street) def create_order(shop_id, placed_by, *, order_number=ANY_ORDER_NUMBER, payment_method=PaymentMethod.bank_transfer, shipping_required=False): order = Order( shop_id, order_number, placed_by.id, placed_by.detail.first_names, placed_by.detail.last_name, placed_by.detail.country, placed_by.detail.zip_code, placed_by.detail.city, placed_by.detail.street, payment_method, ) order.shipping_required = shipping_required return order
// ... existing code ... from byceps.services.shop.order.models.order import Order from byceps.services.shop.order.models.orderer import Orderer from byceps.services.shop.order.transfer.models import PaymentMethod // ... modified code ... order.shipping_required = shipping_required return order // ... rest of the code ...
f4f5852944d1fd1b9e96a70cb4496ee6e1e66dc0
genome_designer/main/celery_util.py
genome_designer/main/celery_util.py
from errno import errorcode from celery.task.control import inspect CELERY_ERROR_KEY = 'ERROR' def get_celery_worker_status(): """Checks whether celery is running and reports the error if not. Source: http://stackoverflow.com/questions/8506914/detect-whether-celery-is-available-running """ try: insp = inspect() d = insp.stats() if not d: d = { CELERY_ERROR_KEY: 'No running Celery workers were found.' } except IOError as e: msg = "Error connecting to the backend: " + str(e) if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED': msg += ' Check that the RabbitMQ server is running.' d = { CELERY_ERROR_KEY: msg } except ImportError as e: d = { CELERY_ERROR_KEY: str(e)} return d
from errno import errorcode from celery.task.control import inspect from django.conf import settings CELERY_ERROR_KEY = 'ERROR' def get_celery_worker_status(): """Checks whether celery is running and reports the error if not. Source: http://stackoverflow.com/questions/8506914/detect-whether-celery-is-available-running """ if settings.BROKER_BACKEND == 'memory': # We are testing with in-memory celery. Celery is effectively running. return {} try: insp = inspect() d = insp.stats() if not d: d = { CELERY_ERROR_KEY: 'No running Celery workers were found.' } except IOError as e: msg = "Error connecting to the backend: " + str(e) if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED': msg += ' Check that the RabbitMQ server is running.' d = { CELERY_ERROR_KEY: msg } except ImportError as e: d = { CELERY_ERROR_KEY: str(e)} return d
Fix tests: Allow for celery not to be running when doing in-memory celery for tests.
Fix tests: Allow for celery not to be running when doing in-memory celery for tests.
Python
mit
churchlab/millstone,churchlab/millstone,churchlab/millstone,churchlab/millstone,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,woodymit/millstone,woodymit/millstone,woodymit/millstone_accidental_source,woodymit/millstone,woodymit/millstone
from errno import errorcode from celery.task.control import inspect + from django.conf import settings CELERY_ERROR_KEY = 'ERROR' def get_celery_worker_status(): """Checks whether celery is running and reports the error if not. Source: http://stackoverflow.com/questions/8506914/detect-whether-celery-is-available-running """ + if settings.BROKER_BACKEND == 'memory': + # We are testing with in-memory celery. Celery is effectively running. + return {} + try: insp = inspect() d = insp.stats() if not d: d = { CELERY_ERROR_KEY: 'No running Celery workers were found.' } except IOError as e: msg = "Error connecting to the backend: " + str(e) if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED': msg += ' Check that the RabbitMQ server is running.' d = { CELERY_ERROR_KEY: msg } except ImportError as e: d = { CELERY_ERROR_KEY: str(e)} return d
Fix tests: Allow for celery not to be running when doing in-memory celery for tests.
## Code Before: from errno import errorcode from celery.task.control import inspect CELERY_ERROR_KEY = 'ERROR' def get_celery_worker_status(): """Checks whether celery is running and reports the error if not. Source: http://stackoverflow.com/questions/8506914/detect-whether-celery-is-available-running """ try: insp = inspect() d = insp.stats() if not d: d = { CELERY_ERROR_KEY: 'No running Celery workers were found.' } except IOError as e: msg = "Error connecting to the backend: " + str(e) if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED': msg += ' Check that the RabbitMQ server is running.' d = { CELERY_ERROR_KEY: msg } except ImportError as e: d = { CELERY_ERROR_KEY: str(e)} return d ## Instruction: Fix tests: Allow for celery not to be running when doing in-memory celery for tests. ## Code After: from errno import errorcode from celery.task.control import inspect from django.conf import settings CELERY_ERROR_KEY = 'ERROR' def get_celery_worker_status(): """Checks whether celery is running and reports the error if not. Source: http://stackoverflow.com/questions/8506914/detect-whether-celery-is-available-running """ if settings.BROKER_BACKEND == 'memory': # We are testing with in-memory celery. Celery is effectively running. return {} try: insp = inspect() d = insp.stats() if not d: d = { CELERY_ERROR_KEY: 'No running Celery workers were found.' } except IOError as e: msg = "Error connecting to the backend: " + str(e) if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED': msg += ' Check that the RabbitMQ server is running.' d = { CELERY_ERROR_KEY: msg } except ImportError as e: d = { CELERY_ERROR_KEY: str(e)} return d
// ... existing code ... from errno import errorcode from celery.task.control import inspect from django.conf import settings CELERY_ERROR_KEY = 'ERROR' // ... modified code ... Source: http://stackoverflow.com/questions/8506914/detect-whether-celery-is-available-running """ if settings.BROKER_BACKEND == 'memory': # We are testing with in-memory celery. Celery is effectively running. return {} try: insp = inspect() d = insp.stats() // ... rest of the code ...
5dcdfa510e62d754bce6270286e42a76b37c23c4
inpassing/worker/util.py
inpassing/worker/util.py
from datetime import datetime, timezone DATE_FMT = '%Y-%m-%d' def date_to_str(day): return day.strftime(DATE_FMT) def str_to_date(s): return datetime.strptime(s, DATE_FMT).replace(tzinfo=timezone.utc)
from datetime import datetime, timezone DATE_FMT = '%Y-%m-%d' def date_to_str(day): return day.strftime(DATE_FMT) def str_to_date(s, tz=None): ret = datetime.strptime(s, DATE_FMT) if tz: return tz.localize(ret) else: return ret
Support use of local timezones when parsing date strings
Support use of local timezones when parsing date strings
Python
mit
lukesanantonio/inpassing-backend,lukesanantonio/inpassing-backend
from datetime import datetime, timezone DATE_FMT = '%Y-%m-%d' def date_to_str(day): return day.strftime(DATE_FMT) - def str_to_date(s): + def str_to_date(s, tz=None): - return datetime.strptime(s, DATE_FMT).replace(tzinfo=timezone.utc) + ret = datetime.strptime(s, DATE_FMT) + if tz: + return tz.localize(ret) + else: + return ret
Support use of local timezones when parsing date strings
## Code Before: from datetime import datetime, timezone DATE_FMT = '%Y-%m-%d' def date_to_str(day): return day.strftime(DATE_FMT) def str_to_date(s): return datetime.strptime(s, DATE_FMT).replace(tzinfo=timezone.utc) ## Instruction: Support use of local timezones when parsing date strings ## Code After: from datetime import datetime, timezone DATE_FMT = '%Y-%m-%d' def date_to_str(day): return day.strftime(DATE_FMT) def str_to_date(s, tz=None): ret = datetime.strptime(s, DATE_FMT) if tz: return tz.localize(ret) else: return ret
... return day.strftime(DATE_FMT) def str_to_date(s, tz=None): ret = datetime.strptime(s, DATE_FMT) if tz: return tz.localize(ret) else: return ret ...
70808dfd53c5a5760a13252a72caf229793e8225
crawl.py
crawl.py
import urllib2; from bs4 import BeautifulSoup;
import urllib.parse; import urllib.request; from bs4 import BeautifulSoup; def searchLink(search): BASE_URL = "http://www.990.ro/" key = urllib.parse.urlencode({'kw': search}).encode('ascii'); re = urllib.request.Request(BASE_URL + 'functions/search3/live_search_using_jquery_ajax/search.php', key); re_link = urllib.request.urlopen(re); soup = BeautifulSoup(re_link.read(), "lxml"); ref = soup.find_all('a'); names = soup.find_all('div', id="rest"); if(ref != []): print("Search returned:") i = 1; for name in names: print(str(i) + ". " + name.get_text()); i+=1; select = int(input("\nPlease select the corresponding number: ")); return BASE_URL + ref[select - 1].get('href'); else: print("Nothing found!"); return ''; movie = input("search: "); print(searchLink(movie));
Add search method to find the movies/series home url
Add search method to find the movies/series home url
Python
mit
raztechs/py-video-crawler
- import urllib2; + import urllib.parse; + import urllib.request; from bs4 import BeautifulSoup; + + def searchLink(search): + + BASE_URL = "http://www.990.ro/" + key = urllib.parse.urlencode({'kw': search}).encode('ascii'); + + re = urllib.request.Request(BASE_URL + 'functions/search3/live_search_using_jquery_ajax/search.php', key); + re_link = urllib.request.urlopen(re); + soup = BeautifulSoup(re_link.read(), "lxml"); + + ref = soup.find_all('a'); + names = soup.find_all('div', id="rest"); + + if(ref != []): + print("Search returned:") + i = 1; + + for name in names: + print(str(i) + ". " + name.get_text()); + i+=1; + + select = int(input("\nPlease select the corresponding number: ")); + + return BASE_URL + ref[select - 1].get('href'); + + else: + print("Nothing found!"); + return ''; + + movie = input("search: "); + print(searchLink(movie)); +
Add search method to find the movies/series home url
## Code Before: import urllib2; from bs4 import BeautifulSoup; ## Instruction: Add search method to find the movies/series home url ## Code After: import urllib.parse; import urllib.request; from bs4 import BeautifulSoup; def searchLink(search): BASE_URL = "http://www.990.ro/" key = urllib.parse.urlencode({'kw': search}).encode('ascii'); re = urllib.request.Request(BASE_URL + 'functions/search3/live_search_using_jquery_ajax/search.php', key); re_link = urllib.request.urlopen(re); soup = BeautifulSoup(re_link.read(), "lxml"); ref = soup.find_all('a'); names = soup.find_all('div', id="rest"); if(ref != []): print("Search returned:") i = 1; for name in names: print(str(i) + ". " + name.get_text()); i+=1; select = int(input("\nPlease select the corresponding number: ")); return BASE_URL + ref[select - 1].get('href'); else: print("Nothing found!"); return ''; movie = input("search: "); print(searchLink(movie));
// ... existing code ... import urllib.parse; import urllib.request; from bs4 import BeautifulSoup; def searchLink(search): BASE_URL = "http://www.990.ro/" key = urllib.parse.urlencode({'kw': search}).encode('ascii'); re = urllib.request.Request(BASE_URL + 'functions/search3/live_search_using_jquery_ajax/search.php', key); re_link = urllib.request.urlopen(re); soup = BeautifulSoup(re_link.read(), "lxml"); ref = soup.find_all('a'); names = soup.find_all('div', id="rest"); if(ref != []): print("Search returned:") i = 1; for name in names: print(str(i) + ". " + name.get_text()); i+=1; select = int(input("\nPlease select the corresponding number: ")); return BASE_URL + ref[select - 1].get('href'); else: print("Nothing found!"); return ''; movie = input("search: "); print(searchLink(movie)); // ... rest of the code ...
ceeb64c9e46a74f95178be88566fba3d7f080fa1
mica/stats/tests/test_acq_stats.py
mica/stats/tests/test_acq_stats.py
from .. import acq_stats def test_calc_stats(): acq_stats.calc_stats(17210)
import tempfile import os from .. import acq_stats def test_calc_stats(): acq_stats.calc_stats(17210) def test_make_acq_stats(): """ Save the acq stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) acq_stats.table_file = fn obsid = 20001 obsid_info, acq, star_info, catalog, temp = acq_stats.calc_stats(obsid) t = acq_stats.table_acq_stats(obsid_info, acq, star_info, catalog, temp) acq_stats._save_acq_stats(t) os.unlink(fn)
Add a test that makes a new acq stats database
Add a test that makes a new acq stats database
Python
bsd-3-clause
sot/mica,sot/mica
+ import tempfile + import os + from .. import acq_stats def test_calc_stats(): acq_stats.calc_stats(17210) + + def test_make_acq_stats(): + """ + Save the acq stats for one obsid into a newly-created table + """ + # Get a temporary file, but then delete it, because _save_acq_stats will only + # make a new table if the supplied file doesn't exist + fh, fn = tempfile.mkstemp(suffix='.h5') + os.unlink(fn) + acq_stats.table_file = fn + obsid = 20001 + obsid_info, acq, star_info, catalog, temp = acq_stats.calc_stats(obsid) + t = acq_stats.table_acq_stats(obsid_info, acq, star_info, catalog, temp) + acq_stats._save_acq_stats(t) + os.unlink(fn) +
Add a test that makes a new acq stats database
## Code Before: from .. import acq_stats def test_calc_stats(): acq_stats.calc_stats(17210) ## Instruction: Add a test that makes a new acq stats database ## Code After: import tempfile import os from .. import acq_stats def test_calc_stats(): acq_stats.calc_stats(17210) def test_make_acq_stats(): """ Save the acq stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) acq_stats.table_file = fn obsid = 20001 obsid_info, acq, star_info, catalog, temp = acq_stats.calc_stats(obsid) t = acq_stats.table_acq_stats(obsid_info, acq, star_info, catalog, temp) acq_stats._save_acq_stats(t) os.unlink(fn)
# ... existing code ... import tempfile import os from .. import acq_stats def test_calc_stats(): acq_stats.calc_stats(17210) def test_make_acq_stats(): """ Save the acq stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) acq_stats.table_file = fn obsid = 20001 obsid_info, acq, star_info, catalog, temp = acq_stats.calc_stats(obsid) t = acq_stats.table_acq_stats(obsid_info, acq, star_info, catalog, temp) acq_stats._save_acq_stats(t) os.unlink(fn) # ... rest of the code ...
1c14d45ba620118401728c56e5ef3a189f9b4145
samples/fire.py
samples/fire.py
from asciimatics.renderers import FigletText, Fire from asciimatics.scene import Scene from asciimatics.screen import Screen from asciimatics.effects import Print from asciimatics.exceptions import ResizeScreenError from pyfiglet import Figlet import sys def demo(screen): scenes = [] effects = [ Print(screen, Fire(screen.height, 80, Figlet(font="banner", width=200).renderText("ASCIIMATICS"), 100), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=3, colour=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, 600)) screen.play(scenes, stop_on_resize=True) if __name__ == "__main__": while True: try: Screen.wrapper(demo) sys.exit(0) except ResizeScreenError: pass
from asciimatics.renderers import FigletText, Fire from asciimatics.scene import Scene from asciimatics.screen import Screen from asciimatics.effects import Print from asciimatics.exceptions import ResizeScreenError from pyfiglet import Figlet import sys def demo(screen): scenes = [] text = Figlet(font="banner", width=200).renderText("ASCIIMATICS") width = max([len(x) for x in text.split("\n")]) effects = [ Print(screen, Fire(screen.height, 80, text, 100), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=(screen.width - width) // 2 + 1, colour=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, 600)) screen.play(scenes, stop_on_resize=True) if __name__ == "__main__": while True: try: Screen.wrapper(demo) sys.exit(0) except ResizeScreenError: pass
Fix shadow for wide screens.
Fix shadow for wide screens.
Python
apache-2.0
peterbrittain/asciimatics,peterbrittain/asciimatics
from asciimatics.renderers import FigletText, Fire from asciimatics.scene import Scene from asciimatics.screen import Screen from asciimatics.effects import Print from asciimatics.exceptions import ResizeScreenError from pyfiglet import Figlet import sys def demo(screen): scenes = [] + text = Figlet(font="banner", width=200).renderText("ASCIIMATICS") + width = max([len(x) for x in text.split("\n")]) + effects = [ Print(screen, - Fire(screen.height, 80, + Fire(screen.height, 80, text, 100), - Figlet(font="banner", width=200).renderText("ASCIIMATICS"), - 100), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), - screen.height - 9, x=3, + screen.height - 9, x=(screen.width - width) // 2 + 1, colour=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, 600)) screen.play(scenes, stop_on_resize=True) if __name__ == "__main__": while True: try: Screen.wrapper(demo) sys.exit(0) except ResizeScreenError: pass
Fix shadow for wide screens.
## Code Before: from asciimatics.renderers import FigletText, Fire from asciimatics.scene import Scene from asciimatics.screen import Screen from asciimatics.effects import Print from asciimatics.exceptions import ResizeScreenError from pyfiglet import Figlet import sys def demo(screen): scenes = [] effects = [ Print(screen, Fire(screen.height, 80, Figlet(font="banner", width=200).renderText("ASCIIMATICS"), 100), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=3, colour=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, 600)) screen.play(scenes, stop_on_resize=True) if __name__ == "__main__": while True: try: Screen.wrapper(demo) sys.exit(0) except ResizeScreenError: pass ## Instruction: Fix shadow for wide screens. ## Code After: from asciimatics.renderers import FigletText, Fire from asciimatics.scene import Scene from asciimatics.screen import Screen from asciimatics.effects import Print from asciimatics.exceptions import ResizeScreenError from pyfiglet import Figlet import sys def demo(screen): scenes = [] text = Figlet(font="banner", width=200).renderText("ASCIIMATICS") width = max([len(x) for x in text.split("\n")]) effects = [ Print(screen, Fire(screen.height, 80, text, 100), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=(screen.width - width) // 2 + 1, colour=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, 600)) screen.play(scenes, stop_on_resize=True) if __name__ == "__main__": while True: try: Screen.wrapper(demo) sys.exit(0) except ResizeScreenError: pass
// ... existing code ... def demo(screen): scenes = [] text = Figlet(font="banner", width=200).renderText("ASCIIMATICS") width = max([len(x) for x in text.split("\n")]) effects = [ Print(screen, Fire(screen.height, 80, text, 100), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=(screen.width - width) // 2 + 1, colour=Screen.COLOUR_BLACK, speed=1), Print(screen, // ... rest of the code ...
82756e5314c2768bb3acf03cf542929d23b73f82
bot/logger/message_sender/synchronized.py
bot/logger/message_sender/synchronized.py
import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) self.lock = threading.Lock() def send(self, text): with self.lock: self.sender.send(text)
import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) # Using a reentrant lock to play safe in case the send function somewhat invokes this send function again # maybe because a send triggers another send on the same message sender. # Note that if this send throws an exception the lock is released when dealing with it from outside, # so this is not a problem. # But if the exception is handled inside this send call, the lock is still hold. self.lock = threading.RLock() def send(self, text): with self.lock: self.sender.send(text)
Use reentrant lock on SynchronizedMessageSender
Use reentrant lock on SynchronizedMessageSender
Python
agpl-3.0
alvarogzp/telegram-bot,alvarogzp/telegram-bot
import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) + # Using a reentrant lock to play safe in case the send function somewhat invokes this send function again + # maybe because a send triggers another send on the same message sender. + # Note that if this send throws an exception the lock is released when dealing with it from outside, + # so this is not a problem. + # But if the exception is handled inside this send call, the lock is still hold. - self.lock = threading.Lock() + self.lock = threading.RLock() def send(self, text): with self.lock: self.sender.send(text)
Use reentrant lock on SynchronizedMessageSender
## Code Before: import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) self.lock = threading.Lock() def send(self, text): with self.lock: self.sender.send(text) ## Instruction: Use reentrant lock on SynchronizedMessageSender ## Code After: import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) # Using a reentrant lock to play safe in case the send function somewhat invokes this send function again # maybe because a send triggers another send on the same message sender. # Note that if this send throws an exception the lock is released when dealing with it from outside, # so this is not a problem. # But if the exception is handled inside this send call, the lock is still hold. self.lock = threading.RLock() def send(self, text): with self.lock: self.sender.send(text)
# ... existing code ... def __init__(self, sender: MessageSender): super().__init__(sender) # Using a reentrant lock to play safe in case the send function somewhat invokes this send function again # maybe because a send triggers another send on the same message sender. # Note that if this send throws an exception the lock is released when dealing with it from outside, # so this is not a problem. # But if the exception is handled inside this send call, the lock is still hold. self.lock = threading.RLock() def send(self, text): with self.lock: # ... rest of the code ...
4e40575147fd9af02c0e0a380e4d35f6c5d8f67a
polling_stations/apps/data_collection/management/commands/import_breckland.py
polling_stations/apps/data_collection/management/commands/import_breckland.py
from data_collection.management.commands import BaseXpressWebLookupCsvImporter class Command(BaseXpressWebLookupCsvImporter): council_id = 'E07000143' addresses_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' stations_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' elections = [ 'local.norfolk.2017-05-04', 'parl.2017-06-08' ] csv_delimiter = '\t'
from data_collection.management.commands import BaseXpressWebLookupCsvImporter class Command(BaseXpressWebLookupCsvImporter): council_id = 'E07000143' addresses_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' stations_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' elections = [ 'local.norfolk.2017-05-04', 'parl.2017-06-08' ] csv_delimiter = '\t' def station_record_to_dict(self, record): """ File supplied contained obviously inaccurate points remove and fall back to geocoding """ if record.pollingplaceid in ['5151', '5370', '5418', '5319']: record = record._replace(pollingplaceeasting = '0') record = record._replace(pollingplacenorthing = '0') return super().station_record_to_dict(record)
Discard dodgy points in Breckland
Discard dodgy points in Breckland These are clearly very wrong use geocoding instead
Python
bsd-3-clause
chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
from data_collection.management.commands import BaseXpressWebLookupCsvImporter class Command(BaseXpressWebLookupCsvImporter): council_id = 'E07000143' addresses_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' stations_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' elections = [ 'local.norfolk.2017-05-04', 'parl.2017-06-08' ] csv_delimiter = '\t' + def station_record_to_dict(self, record): + + """ + File supplied contained obviously inaccurate points + remove and fall back to geocoding + """ + if record.pollingplaceid in ['5151', '5370', '5418', '5319']: + record = record._replace(pollingplaceeasting = '0') + record = record._replace(pollingplacenorthing = '0') + + return super().station_record_to_dict(record) +
Discard dodgy points in Breckland
## Code Before: from data_collection.management.commands import BaseXpressWebLookupCsvImporter class Command(BaseXpressWebLookupCsvImporter): council_id = 'E07000143' addresses_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' stations_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' elections = [ 'local.norfolk.2017-05-04', 'parl.2017-06-08' ] csv_delimiter = '\t' ## Instruction: Discard dodgy points in Breckland ## Code After: from data_collection.management.commands import BaseXpressWebLookupCsvImporter class Command(BaseXpressWebLookupCsvImporter): council_id = 'E07000143' addresses_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' stations_name = 'May 2017/BrecklandPropertyPostCodePollingStationWebLookup-2017-02-20.TSV' elections = [ 'local.norfolk.2017-05-04', 'parl.2017-06-08' ] csv_delimiter = '\t' def station_record_to_dict(self, record): """ File supplied contained obviously inaccurate points remove and fall back to geocoding """ if record.pollingplaceid in ['5151', '5370', '5418', '5319']: record = record._replace(pollingplaceeasting = '0') record = record._replace(pollingplacenorthing = '0') return super().station_record_to_dict(record)
... 'parl.2017-06-08' ] csv_delimiter = '\t' def station_record_to_dict(self, record): """ File supplied contained obviously inaccurate points remove and fall back to geocoding """ if record.pollingplaceid in ['5151', '5370', '5418', '5319']: record = record._replace(pollingplaceeasting = '0') record = record._replace(pollingplacenorthing = '0') return super().station_record_to_dict(record) ...
45c400e02fbeb5b455e27fef81e47e45f274eaec
core/forms.py
core/forms.py
from django import forms class GameForm(forms.Form): amount = forms.IntegerField() def __init__(self, *args, **kwargs): super(GameForm, self).__init__(*args, **kwargs) for name, field in self.fields.items(): if isinstance(field, forms.IntegerField): self.fields[name].widget.input_type = "number" if field.required: self.fields[name].widget.attrs["required"] = ""
from django import forms class GameForm(forms.Form): amount = forms.IntegerField(initial=100) def __init__(self, *args, **kwargs): super(GameForm, self).__init__(*args, **kwargs) for name, field in self.fields.items(): if isinstance(field, forms.IntegerField): self.fields[name].widget.input_type = "number" if field.required: self.fields[name].widget.attrs["required"] = ""
Add a default bet amount.
Add a default bet amount.
Python
bsd-2-clause
stephenmcd/gamblor,stephenmcd/gamblor
from django import forms class GameForm(forms.Form): - amount = forms.IntegerField() + amount = forms.IntegerField(initial=100) def __init__(self, *args, **kwargs): super(GameForm, self).__init__(*args, **kwargs) for name, field in self.fields.items(): if isinstance(field, forms.IntegerField): self.fields[name].widget.input_type = "number" if field.required: self.fields[name].widget.attrs["required"] = ""
Add a default bet amount.
## Code Before: from django import forms class GameForm(forms.Form): amount = forms.IntegerField() def __init__(self, *args, **kwargs): super(GameForm, self).__init__(*args, **kwargs) for name, field in self.fields.items(): if isinstance(field, forms.IntegerField): self.fields[name].widget.input_type = "number" if field.required: self.fields[name].widget.attrs["required"] = "" ## Instruction: Add a default bet amount. ## Code After: from django import forms class GameForm(forms.Form): amount = forms.IntegerField(initial=100) def __init__(self, *args, **kwargs): super(GameForm, self).__init__(*args, **kwargs) for name, field in self.fields.items(): if isinstance(field, forms.IntegerField): self.fields[name].widget.input_type = "number" if field.required: self.fields[name].widget.attrs["required"] = ""
# ... existing code ... class GameForm(forms.Form): amount = forms.IntegerField(initial=100) def __init__(self, *args, **kwargs): super(GameForm, self).__init__(*args, **kwargs) # ... rest of the code ...
46b8a4d0668c764df85f1e8a94672d81dd112beb
maps/api/views.py
maps/api/views.py
from django.http import HttpResponse def list_question_sets(request): return HttpResponse('Lol, udachi')
import json from django.http import HttpResponse from maps.models import QuestionSet def list_question_sets(request): objects = QuestionSet.objects.all() items = [] for obj in objects: items.append({ 'title': obj.title, 'max_duration': obj.max_duration.seconds, 'creator': { 'full_name': obj.creator.get_full_name() } }) return HttpResponse(json.dumps(items))
Add API method for question sets list
Add API method for question sets list
Python
mit
sevazhidkov/greenland,sevazhidkov/greenland
+ import json from django.http import HttpResponse + from maps.models import QuestionSet def list_question_sets(request): - return HttpResponse('Lol, udachi') + objects = QuestionSet.objects.all() + items = [] + for obj in objects: + items.append({ + 'title': obj.title, + 'max_duration': obj.max_duration.seconds, + 'creator': { + 'full_name': obj.creator.get_full_name() + } + }) + return HttpResponse(json.dumps(items))
Add API method for question sets list
## Code Before: from django.http import HttpResponse def list_question_sets(request): return HttpResponse('Lol, udachi') ## Instruction: Add API method for question sets list ## Code After: import json from django.http import HttpResponse from maps.models import QuestionSet def list_question_sets(request): objects = QuestionSet.objects.all() items = [] for obj in objects: items.append({ 'title': obj.title, 'max_duration': obj.max_duration.seconds, 'creator': { 'full_name': obj.creator.get_full_name() } }) return HttpResponse(json.dumps(items))
... import json from django.http import HttpResponse from maps.models import QuestionSet def list_question_sets(request): objects = QuestionSet.objects.all() items = [] for obj in objects: items.append({ 'title': obj.title, 'max_duration': obj.max_duration.seconds, 'creator': { 'full_name': obj.creator.get_full_name() } }) return HttpResponse(json.dumps(items)) ...
8284a8e61ed6c4e6b3402c55d2247f7e468a6872
tests/test_integrations/test_get_a_token.py
tests/test_integrations/test_get_a_token.py
import os import unittest from dotenv import load_dotenv from auth0plus.oauth import get_token load_dotenv('.env') class TestGetAToken(unittest.TestCase): def setUp(self): """ Get a non-interactive client secret """ self.domain = os.getenv('DOMAIN') self.client_id = os.getenv('CLIENT_ID') self.secret_id = os.getenv('CLIENT_SECRET') def test_get_a_token(self): """ Test getting a 24 hour token from the oauth token endpoint """ token = get_token(self.domain, self.client_id, self.secret_id) assert token['access_token']
import os import unittest from dotenv import load_dotenv from auth0plus.oauth import get_token load_dotenv('.env') class TestGetAToken(unittest.TestCase): @unittest.skipIf(skip, 'SKIP_INTEGRATION_TESTS==1') def setUp(self): """ Get a non-interactive client secret """ self.domain = os.getenv('DOMAIN') self.client_id = os.getenv('CLIENT_ID') self.secret_id = os.getenv('CLIENT_SECRET') def test_get_a_token(self): """ Test getting a 24 hour token from the oauth token endpoint """ token = get_token(self.domain, self.client_id, self.secret_id) assert token['access_token']
Add unittest skip for CI
Add unittest skip for CI
Python
isc
bretth/auth0plus
import os import unittest from dotenv import load_dotenv from auth0plus.oauth import get_token load_dotenv('.env') class TestGetAToken(unittest.TestCase): + @unittest.skipIf(skip, 'SKIP_INTEGRATION_TESTS==1') def setUp(self): """ Get a non-interactive client secret """ self.domain = os.getenv('DOMAIN') self.client_id = os.getenv('CLIENT_ID') self.secret_id = os.getenv('CLIENT_SECRET') def test_get_a_token(self): """ Test getting a 24 hour token from the oauth token endpoint """ token = get_token(self.domain, self.client_id, self.secret_id) assert token['access_token']
Add unittest skip for CI
## Code Before: import os import unittest from dotenv import load_dotenv from auth0plus.oauth import get_token load_dotenv('.env') class TestGetAToken(unittest.TestCase): def setUp(self): """ Get a non-interactive client secret """ self.domain = os.getenv('DOMAIN') self.client_id = os.getenv('CLIENT_ID') self.secret_id = os.getenv('CLIENT_SECRET') def test_get_a_token(self): """ Test getting a 24 hour token from the oauth token endpoint """ token = get_token(self.domain, self.client_id, self.secret_id) assert token['access_token'] ## Instruction: Add unittest skip for CI ## Code After: import os import unittest from dotenv import load_dotenv from auth0plus.oauth import get_token load_dotenv('.env') class TestGetAToken(unittest.TestCase): @unittest.skipIf(skip, 'SKIP_INTEGRATION_TESTS==1') def setUp(self): """ Get a non-interactive client secret """ self.domain = os.getenv('DOMAIN') self.client_id = os.getenv('CLIENT_ID') self.secret_id = os.getenv('CLIENT_SECRET') def test_get_a_token(self): """ Test getting a 24 hour token from the oauth token endpoint """ token = get_token(self.domain, self.client_id, self.secret_id) assert token['access_token']
# ... existing code ... class TestGetAToken(unittest.TestCase): @unittest.skipIf(skip, 'SKIP_INTEGRATION_TESTS==1') def setUp(self): """ Get a non-interactive client secret # ... rest of the code ...
745568d54b705cf767142911556c7d87a0397919
lfs/shipping/migrations/0002_auto_20170216_0739.py
lfs/shipping/migrations/0002_auto_20170216_0739.py
from __future__ import unicode_literals from django.db import migrations def update_price_calculator(apps, schema_editor): ShippingMethod = apps.get_model("shipping", "ShippingMethod") for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.NetShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.NetPriceCalculator" shipping_method.save() for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.GrossShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.GrossPriceCalculator" shipping_method.save() class Migration(migrations.Migration): dependencies = [ ('shipping', '0001_initial'), ] operations = [ migrations.RunPython(update_price_calculator), ]
from __future__ import unicode_literals from django.db import migrations def update_price_calculator(apps, schema_editor): ShippingMethod = apps.get_model("shipping", "ShippingMethod") for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.NetShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.NetShippingMethodPriceCalculator" shipping_method.save() for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.GrossShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.GrossShippingMethodPriceCalculator" shipping_method.save() class Migration(migrations.Migration): dependencies = [ ('shipping', '0001_initial'), ] operations = [ migrations.RunPython(update_price_calculator), ]
Fix price calculator class names
Fix price calculator class names
Python
bsd-3-clause
diefenbach/django-lfs,diefenbach/django-lfs,diefenbach/django-lfs
from __future__ import unicode_literals from django.db import migrations def update_price_calculator(apps, schema_editor): ShippingMethod = apps.get_model("shipping", "ShippingMethod") for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.NetShippingMethodPriceCalculator"): - shipping_method.price_calculator = "lfs.shipping.calculator.NetPriceCalculator" + shipping_method.price_calculator = "lfs.shipping.calculator.NetShippingMethodPriceCalculator" shipping_method.save() for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.GrossShippingMethodPriceCalculator"): - shipping_method.price_calculator = "lfs.shipping.calculator.GrossPriceCalculator" + shipping_method.price_calculator = "lfs.shipping.calculator.GrossShippingMethodPriceCalculator" shipping_method.save() class Migration(migrations.Migration): dependencies = [ ('shipping', '0001_initial'), ] operations = [ migrations.RunPython(update_price_calculator), ]
Fix price calculator class names
## Code Before: from __future__ import unicode_literals from django.db import migrations def update_price_calculator(apps, schema_editor): ShippingMethod = apps.get_model("shipping", "ShippingMethod") for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.NetShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.NetPriceCalculator" shipping_method.save() for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.GrossShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.GrossPriceCalculator" shipping_method.save() class Migration(migrations.Migration): dependencies = [ ('shipping', '0001_initial'), ] operations = [ migrations.RunPython(update_price_calculator), ] ## Instruction: Fix price calculator class names ## Code After: from __future__ import unicode_literals from django.db import migrations def update_price_calculator(apps, schema_editor): ShippingMethod = apps.get_model("shipping", "ShippingMethod") for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.NetShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.NetShippingMethodPriceCalculator" shipping_method.save() for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.GrossShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.GrossShippingMethodPriceCalculator" shipping_method.save() class Migration(migrations.Migration): dependencies = [ ('shipping', '0001_initial'), ] operations = [ migrations.RunPython(update_price_calculator), ]
// ... existing code ... def update_price_calculator(apps, schema_editor): ShippingMethod = apps.get_model("shipping", "ShippingMethod") for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.NetShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.NetShippingMethodPriceCalculator" shipping_method.save() for shipping_method in ShippingMethod.objects.filter(price_calculator="lfs.shipping.GrossShippingMethodPriceCalculator"): shipping_method.price_calculator = "lfs.shipping.calculator.GrossShippingMethodPriceCalculator" shipping_method.save() // ... rest of the code ...
feb88aa30b362e02671d51d8b3e03a7194d99646
kobra/urls.py
kobra/urls.py
from django.conf.urls import include, url from django.contrib import admin from .views import web_client_view urlpatterns = [ # url(r'^', include('kobra.api.v1.urls', namespace='legacy')), url(r'^api/v1/', include('kobra.api.v1.urls', namespace='v1')), url(r'^admin/', include(admin.site.urls)), # Matches everything and therefore must come last. url(r'^', include([ url(r'^$', web_client_view, name='home'), url(r'^.*/$', web_client_view) ], namespace='web-client')) ]
from django.conf.urls import include, url from django.contrib import admin from .views import web_client_view urlpatterns = [ # url(r'^', include('kobra.api.v1.urls', namespace='legacy')), url(r'^api/v1/', include('kobra.api.v1.urls', namespace='v1')), url(r'^admin/', include(admin.site.urls)), # Matches everything* and therefore must come last. # *everything except /static/... since this breaks the static file serving. url(r'^(?!static/)', include([ url(r'^$', web_client_view, name='home'), url(r'^.*/$', web_client_view) ], namespace='web-client')) ]
Fix for broken static file serving
Fix for broken static file serving
Python
mit
karservice/kobra,karservice/kobra,karservice/kobra,karservice/kobra
from django.conf.urls import include, url from django.contrib import admin from .views import web_client_view urlpatterns = [ # url(r'^', include('kobra.api.v1.urls', namespace='legacy')), url(r'^api/v1/', include('kobra.api.v1.urls', namespace='v1')), url(r'^admin/', include(admin.site.urls)), - # Matches everything and therefore must come last. + # Matches everything* and therefore must come last. + # *everything except /static/... since this breaks the static file serving. - url(r'^', include([ + url(r'^(?!static/)', include([ url(r'^$', web_client_view, name='home'), url(r'^.*/$', web_client_view) ], namespace='web-client')) ]
Fix for broken static file serving
## Code Before: from django.conf.urls import include, url from django.contrib import admin from .views import web_client_view urlpatterns = [ # url(r'^', include('kobra.api.v1.urls', namespace='legacy')), url(r'^api/v1/', include('kobra.api.v1.urls', namespace='v1')), url(r'^admin/', include(admin.site.urls)), # Matches everything and therefore must come last. url(r'^', include([ url(r'^$', web_client_view, name='home'), url(r'^.*/$', web_client_view) ], namespace='web-client')) ] ## Instruction: Fix for broken static file serving ## Code After: from django.conf.urls import include, url from django.contrib import admin from .views import web_client_view urlpatterns = [ # url(r'^', include('kobra.api.v1.urls', namespace='legacy')), url(r'^api/v1/', include('kobra.api.v1.urls', namespace='v1')), url(r'^admin/', include(admin.site.urls)), # Matches everything* and therefore must come last. # *everything except /static/... since this breaks the static file serving. url(r'^(?!static/)', include([ url(r'^$', web_client_view, name='home'), url(r'^.*/$', web_client_view) ], namespace='web-client')) ]
# ... existing code ... url(r'^admin/', include(admin.site.urls)), # Matches everything* and therefore must come last. # *everything except /static/... since this breaks the static file serving. url(r'^(?!static/)', include([ url(r'^$', web_client_view, name='home'), url(r'^.*/$', web_client_view) ], namespace='web-client')) # ... rest of the code ...
d3caf80485da78c8eb050ff4d9e33a2ee6c8feda
tests/rietveld/test_event_handler.py
tests/rietveld/test_event_handler.py
from __future__ import absolute_import, print_function import unittest from qtpy.QtWidgets import QApplication from addie.rietveld import event_handler class RietveldEventHandlerTests(unittest.TestCase): def setUp(self): self.main_window = QApplication([]) ''' def tearDown(self): self.main_window.quit() ''' def test_evt_change_gss_mode_exception(self): """Test we can extract a bank id from bank workspace name""" f = event_handler.evt_change_gss_mode self.assertRaises(NotImplementedError, f, None) if __name__ == '__main__': unittest.main() # pragma: no cover
from __future__ import absolute_import, print_function import pytest from addie.rietveld import event_handler @pytest.fixture def rietveld_event_handler(qtbot): return event_handler def test_evt_change_gss_mode_exception(qtbot, rietveld_event_handler): """Test we can extract a bank id from bank workspace name""" with pytest.raises(NotImplementedError) as e: rietveld_event_handler.evt_change_gss_mode(None)
Refactor rietveld.event_handler test to use pytest-qt
Refactor rietveld.event_handler test to use pytest-qt
Python
mit
neutrons/FastGR,neutrons/FastGR,neutrons/FastGR
from __future__ import absolute_import, print_function - import unittest + import pytest - from qtpy.QtWidgets import QApplication from addie.rietveld import event_handler + @pytest.fixture + def rietveld_event_handler(qtbot): + return event_handler - class RietveldEventHandlerTests(unittest.TestCase): - def setUp(self): - self.main_window = QApplication([]) - - ''' - def tearDown(self): - self.main_window.quit() - ''' - - def test_evt_change_gss_mode_exception(self): - """Test we can extract a bank id from bank workspace name""" - f = event_handler.evt_change_gss_mode - self.assertRaises(NotImplementedError, f, None) - if __name__ == '__main__': - unittest.main() # pragma: no cover + def test_evt_change_gss_mode_exception(qtbot, rietveld_event_handler): + """Test we can extract a bank id from bank workspace name""" + with pytest.raises(NotImplementedError) as e: + rietveld_event_handler.evt_change_gss_mode(None)
Refactor rietveld.event_handler test to use pytest-qt
## Code Before: from __future__ import absolute_import, print_function import unittest from qtpy.QtWidgets import QApplication from addie.rietveld import event_handler class RietveldEventHandlerTests(unittest.TestCase): def setUp(self): self.main_window = QApplication([]) ''' def tearDown(self): self.main_window.quit() ''' def test_evt_change_gss_mode_exception(self): """Test we can extract a bank id from bank workspace name""" f = event_handler.evt_change_gss_mode self.assertRaises(NotImplementedError, f, None) if __name__ == '__main__': unittest.main() # pragma: no cover ## Instruction: Refactor rietveld.event_handler test to use pytest-qt ## Code After: from __future__ import absolute_import, print_function import pytest from addie.rietveld import event_handler @pytest.fixture def rietveld_event_handler(qtbot): return event_handler def test_evt_change_gss_mode_exception(qtbot, rietveld_event_handler): """Test we can extract a bank id from bank workspace name""" with pytest.raises(NotImplementedError) as e: rietveld_event_handler.evt_change_gss_mode(None)
# ... existing code ... from __future__ import absolute_import, print_function import pytest from addie.rietveld import event_handler @pytest.fixture def rietveld_event_handler(qtbot): return event_handler def test_evt_change_gss_mode_exception(qtbot, rietveld_event_handler): """Test we can extract a bank id from bank workspace name""" with pytest.raises(NotImplementedError) as e: rietveld_event_handler.evt_change_gss_mode(None) # ... rest of the code ...
46f3067650001454ed99351cc5569813a378dcec
mopidy_jukebox/frontend.py
mopidy_jukebox/frontend.py
import pykka from mopidy import core class JukeboxFrontend(pykka.ThreadingActor, core.CoreListener): def __init__(self, config, core): super(JukeboxFrontend, self).__init__() self.core = core def track_playback_ended(self, tl_track, time_position): # Remove old votes pass def track_playback_started(self, tl_track): pass
import pykka from mopidy import core from models import Vote class JukeboxFrontend(pykka.ThreadingActor, core.CoreListener): def __init__(self, config, core): super(JukeboxFrontend, self).__init__() self.core = core core.tracklist.set_consume(True) def track_playback_ended(self, tl_track, time_position): # Remove old votes Vote.delete().where(Vote.track_uri == tl_track.track.uri).execute() def track_playback_started(self, tl_track): pass
Delete votes when track is over.
Delete votes when track is over.
Python
mit
qurben/mopidy-jukebox,qurben/mopidy-jukebox,qurben/mopidy-jukebox
import pykka from mopidy import core + + from models import Vote class JukeboxFrontend(pykka.ThreadingActor, core.CoreListener): def __init__(self, config, core): super(JukeboxFrontend, self).__init__() self.core = core + core.tracklist.set_consume(True) def track_playback_ended(self, tl_track, time_position): # Remove old votes - pass + Vote.delete().where(Vote.track_uri == tl_track.track.uri).execute() def track_playback_started(self, tl_track): pass
Delete votes when track is over.
## Code Before: import pykka from mopidy import core class JukeboxFrontend(pykka.ThreadingActor, core.CoreListener): def __init__(self, config, core): super(JukeboxFrontend, self).__init__() self.core = core def track_playback_ended(self, tl_track, time_position): # Remove old votes pass def track_playback_started(self, tl_track): pass ## Instruction: Delete votes when track is over. ## Code After: import pykka from mopidy import core from models import Vote class JukeboxFrontend(pykka.ThreadingActor, core.CoreListener): def __init__(self, config, core): super(JukeboxFrontend, self).__init__() self.core = core core.tracklist.set_consume(True) def track_playback_ended(self, tl_track, time_position): # Remove old votes Vote.delete().where(Vote.track_uri == tl_track.track.uri).execute() def track_playback_started(self, tl_track): pass
# ... existing code ... import pykka from mopidy import core from models import Vote class JukeboxFrontend(pykka.ThreadingActor, core.CoreListener): # ... modified code ... def __init__(self, config, core): super(JukeboxFrontend, self).__init__() self.core = core core.tracklist.set_consume(True) def track_playback_ended(self, tl_track, time_position): # Remove old votes Vote.delete().where(Vote.track_uri == tl_track.track.uri).execute() def track_playback_started(self, tl_track): pass # ... rest of the code ...
1a5583fdba626059e5481e6099b14b8988316dfe
server/superdesk/locators/__init__.py
server/superdesk/locators/__init__.py
import json import os def _load_json(file_path): """ Reads JSON string from the file located in file_path. :param file_path: path of the file having JSON string. :return: JSON Object """ with open(file_path, 'r') as f: return json.load(f) _dir_name = os.path.dirname(os.path.realpath(__file__)) _locators_file_path = os.path.join(_dir_name, 'data', 'locators.json') locators = _load_json(_locators_file_path)
import json import os def _load_json(file_path): """ Reads JSON string from the file located in file_path. :param file_path: path of the file having JSON string. :return: JSON Object """ with open(file_path, 'r', encoding='utf-8') as f: return json.load(f) _dir_name = os.path.dirname(os.path.realpath(__file__)) _locators_file_path = os.path.join(_dir_name, 'data', 'locators.json') locators = _load_json(_locators_file_path)
Fix locators reading on ubuntu
Fix locators reading on ubuntu
Python
agpl-3.0
thnkloud9/superdesk,superdesk/superdesk,marwoodandrew/superdesk-aap,ancafarcas/superdesk,ioanpocol/superdesk-ntb,gbbr/superdesk,liveblog/superdesk,plamut/superdesk,pavlovicnemanja92/superdesk,akintolga/superdesk,pavlovicnemanja92/superdesk,pavlovicnemanja/superdesk,verifiedpixel/superdesk,amagdas/superdesk,akintolga/superdesk-aap,petrjasek/superdesk,akintolga/superdesk,amagdas/superdesk,darconny/superdesk,plamut/superdesk,mdhaman/superdesk,ioanpocol/superdesk-ntb,marwoodandrew/superdesk,mugurrus/superdesk,plamut/superdesk,Aca-jov/superdesk,gbbr/superdesk,sivakuna-aap/superdesk,Aca-jov/superdesk,marwoodandrew/superdesk,mdhaman/superdesk-aap,petrjasek/superdesk-ntb,mdhaman/superdesk,ioanpocol/superdesk,hlmnrmr/superdesk,plamut/superdesk,verifiedpixel/superdesk,marwoodandrew/superdesk,liveblog/superdesk,superdesk/superdesk,pavlovicnemanja/superdesk,petrjasek/superdesk-ntb,petrjasek/superdesk-ntb,mdhaman/superdesk-aap,ancafarcas/superdesk,mdhaman/superdesk-aap,sivakuna-aap/superdesk,hlmnrmr/superdesk,akintolga/superdesk-aap,marwoodandrew/superdesk-aap,petrjasek/superdesk,superdesk/superdesk-aap,pavlovicnemanja92/superdesk,darconny/superdesk,liveblog/superdesk,superdesk/superdesk,verifiedpixel/superdesk,amagdas/superdesk,petrjasek/superdesk,superdesk/superdesk-ntb,Aca-jov/superdesk,superdesk/superdesk,pavlovicnemanja92/superdesk,akintolga/superdesk,marwoodandrew/superdesk,akintolga/superdesk-aap,fritzSF/superdesk,superdesk/superdesk-aap,superdesk/superdesk-ntb,superdesk/superdesk-ntb,hlmnrmr/superdesk,ioanpocol/superdesk,ioanpocol/superdesk-ntb,fritzSF/superdesk,superdesk/superdesk-ntb,sjunaid/superdesk,pavlovicnemanja/superdesk,sivakuna-aap/superdesk,liveblog/superdesk,amagdas/superdesk,thnkloud9/superdesk,akintolga/superdesk,sjunaid/superdesk,marwoodandrew/superdesk-aap,superdesk/superdesk-aap,sivakuna-aap/superdesk,mdhaman/superdesk,pavlovicnemanja/superdesk,akintolga/superdesk,fritzSF/superdesk,sjunaid/superdesk,verifiedpixel/superdesk,ioanpocol/superdesk,plamut/superdesk,petrjasek/superdesk,mugurrus/superdesk,mdhaman/superdesk-aap,ancafarcas/superdesk,akintolga/superdesk-aap,darconny/superdesk,petrjasek/superdesk-ntb,amagdas/superdesk,sivakuna-aap/superdesk,thnkloud9/superdesk,marwoodandrew/superdesk,verifiedpixel/superdesk,fritzSF/superdesk,marwoodandrew/superdesk-aap,pavlovicnemanja92/superdesk,fritzSF/superdesk,liveblog/superdesk,gbbr/superdesk,superdesk/superdesk-aap,mugurrus/superdesk
import json import os def _load_json(file_path): """ Reads JSON string from the file located in file_path. :param file_path: path of the file having JSON string. :return: JSON Object """ - with open(file_path, 'r') as f: + with open(file_path, 'r', encoding='utf-8') as f: return json.load(f) _dir_name = os.path.dirname(os.path.realpath(__file__)) _locators_file_path = os.path.join(_dir_name, 'data', 'locators.json') locators = _load_json(_locators_file_path)
Fix locators reading on ubuntu
## Code Before: import json import os def _load_json(file_path): """ Reads JSON string from the file located in file_path. :param file_path: path of the file having JSON string. :return: JSON Object """ with open(file_path, 'r') as f: return json.load(f) _dir_name = os.path.dirname(os.path.realpath(__file__)) _locators_file_path = os.path.join(_dir_name, 'data', 'locators.json') locators = _load_json(_locators_file_path) ## Instruction: Fix locators reading on ubuntu ## Code After: import json import os def _load_json(file_path): """ Reads JSON string from the file located in file_path. :param file_path: path of the file having JSON string. :return: JSON Object """ with open(file_path, 'r', encoding='utf-8') as f: return json.load(f) _dir_name = os.path.dirname(os.path.realpath(__file__)) _locators_file_path = os.path.join(_dir_name, 'data', 'locators.json') locators = _load_json(_locators_file_path)
... :param file_path: path of the file having JSON string. :return: JSON Object """ with open(file_path, 'r', encoding='utf-8') as f: return json.load(f) ...
96199b0d6dfea835d6bb23bc87060e5732ef4094
server/lib/python/cartodb_services/cartodb_services/mapzen/matrix_client.py
server/lib/python/cartodb_services/cartodb_services/mapzen/matrix_client.py
import requests import json class MatrixClient: ONE_TO_MANY_URL = 'https://matrix.mapzen.com/one_to_many' def __init__(self, matrix_key): self._matrix_key = matrix_key """Get distances and times to a set of locations. See https://mapzen.com/documentation/matrix/api-reference/ Args: locations Array of {lat: y, lon: x} costing Costing model to use Returns: A dict with one_to_many, units and locations """ def one_to_many(self, locations, costing): request_params = { 'json': json.dumps({'locations': locations}), 'costing': costing, 'api_key': self._matrix_key } response = requests.get(self.ONE_TO_MANY_URL, params=request_params) return response.json()
import requests import json class MatrixClient: """ A minimal client for Mapzen Time-Distance Matrix Service Example: client = MatrixClient('your_api_key') locations = [{"lat":40.744014,"lon":-73.990508},{"lat":40.739735,"lon":-73.979713},{"lat":40.752522,"lon":-73.985015},{"lat":40.750117,"lon":-73.983704},{"lat":40.750552,"lon":-73.993519}] costing = 'pedestrian' print client.one_to_many(locations, costing) """ ONE_TO_MANY_URL = 'https://matrix.mapzen.com/one_to_many' def __init__(self, matrix_key): self._matrix_key = matrix_key """Get distances and times to a set of locations. See https://mapzen.com/documentation/matrix/api-reference/ Args: locations Array of {lat: y, lon: x} costing Costing model to use Returns: A dict with one_to_many, units and locations """ def one_to_many(self, locations, costing): request_params = { 'json': json.dumps({'locations': locations}), 'costing': costing, 'api_key': self._matrix_key } response = requests.get(self.ONE_TO_MANY_URL, params=request_params) return response.json()
Add example to code doc
Add example to code doc
Python
bsd-3-clause
CartoDB/geocoder-api,CartoDB/geocoder-api,CartoDB/geocoder-api,CartoDB/dataservices-api,CartoDB/dataservices-api,CartoDB/dataservices-api,CartoDB/geocoder-api,CartoDB/dataservices-api
import requests import json class MatrixClient: + + """ + A minimal client for Mapzen Time-Distance Matrix Service + + Example: + + client = MatrixClient('your_api_key') + locations = [{"lat":40.744014,"lon":-73.990508},{"lat":40.739735,"lon":-73.979713},{"lat":40.752522,"lon":-73.985015},{"lat":40.750117,"lon":-73.983704},{"lat":40.750552,"lon":-73.993519}] + costing = 'pedestrian' + + print client.one_to_many(locations, costing) + """ ONE_TO_MANY_URL = 'https://matrix.mapzen.com/one_to_many' def __init__(self, matrix_key): self._matrix_key = matrix_key """Get distances and times to a set of locations. See https://mapzen.com/documentation/matrix/api-reference/ Args: locations Array of {lat: y, lon: x} costing Costing model to use Returns: A dict with one_to_many, units and locations - """ + """ def one_to_many(self, locations, costing): request_params = { 'json': json.dumps({'locations': locations}), 'costing': costing, 'api_key': self._matrix_key } response = requests.get(self.ONE_TO_MANY_URL, params=request_params) return response.json()
Add example to code doc
## Code Before: import requests import json class MatrixClient: ONE_TO_MANY_URL = 'https://matrix.mapzen.com/one_to_many' def __init__(self, matrix_key): self._matrix_key = matrix_key """Get distances and times to a set of locations. See https://mapzen.com/documentation/matrix/api-reference/ Args: locations Array of {lat: y, lon: x} costing Costing model to use Returns: A dict with one_to_many, units and locations """ def one_to_many(self, locations, costing): request_params = { 'json': json.dumps({'locations': locations}), 'costing': costing, 'api_key': self._matrix_key } response = requests.get(self.ONE_TO_MANY_URL, params=request_params) return response.json() ## Instruction: Add example to code doc ## Code After: import requests import json class MatrixClient: """ A minimal client for Mapzen Time-Distance Matrix Service Example: client = MatrixClient('your_api_key') locations = [{"lat":40.744014,"lon":-73.990508},{"lat":40.739735,"lon":-73.979713},{"lat":40.752522,"lon":-73.985015},{"lat":40.750117,"lon":-73.983704},{"lat":40.750552,"lon":-73.993519}] costing = 'pedestrian' print client.one_to_many(locations, costing) """ ONE_TO_MANY_URL = 'https://matrix.mapzen.com/one_to_many' def __init__(self, matrix_key): self._matrix_key = matrix_key """Get distances and times to a set of locations. See https://mapzen.com/documentation/matrix/api-reference/ Args: locations Array of {lat: y, lon: x} costing Costing model to use Returns: A dict with one_to_many, units and locations """ def one_to_many(self, locations, costing): request_params = { 'json': json.dumps({'locations': locations}), 'costing': costing, 'api_key': self._matrix_key } response = requests.get(self.ONE_TO_MANY_URL, params=request_params) return response.json()
... import json class MatrixClient: """ A minimal client for Mapzen Time-Distance Matrix Service Example: client = MatrixClient('your_api_key') locations = [{"lat":40.744014,"lon":-73.990508},{"lat":40.739735,"lon":-73.979713},{"lat":40.752522,"lon":-73.985015},{"lat":40.750117,"lon":-73.983704},{"lat":40.750552,"lon":-73.993519}] costing = 'pedestrian' print client.one_to_many(locations, costing) """ ONE_TO_MANY_URL = 'https://matrix.mapzen.com/one_to_many' ... Returns: A dict with one_to_many, units and locations """ def one_to_many(self, locations, costing): request_params = { 'json': json.dumps({'locations': locations}), ...
ad21c9255f6246944cd032ad50082c0aca46fcb3
neurokernel/tools/mpi.py
neurokernel/tools/mpi.py
from mpi4py import MPI import twiggy class MPIOutput(twiggy.outputs.Output): """ Output messages to a file via MPI I/O. """ def __init__(self, name, format, comm, mode=MPI.MODE_CREATE | MPI.MODE_WRONLY, close_atexit=True): self.filename = name self._format = format if format is not None else self._noop_format self.comm = comm self.mode = mode super(MPIOutput, self).__init__(format, close_atexit) def _open(self): self.file = MPI.File.Open(self.comm, self.filename, self.mode) def _close(self): self.file.Close() def _write(self, x): self.file.Iwrite_shared(x)
from mpi4py import MPI import twiggy class MPIOutput(twiggy.outputs.Output): """ Output messages to a file via MPI I/O. """ def __init__(self, name, format, comm, mode=MPI.MODE_CREATE | MPI.MODE_WRONLY, close_atexit=True): self.filename = name self._format = format if format is not None else self._noop_format self.comm = comm self.mode = mode super(MPIOutput, self).__init__(format, close_atexit) def _open(self): self.file = MPI.File.Open(self.comm, self.filename, self.mode) def _close(self): self.file.Close() def _write(self, x): self.file.Iwrite_shared(x) # This seems to be necessary to prevent some log lines from being lost: self.file.Sync()
Call MPIOutput.file.Sync() in MPIOutput.file._write() to prevent log lines from intermittently being lost.
Call MPIOutput.file.Sync() in MPIOutput.file._write() to prevent log lines from intermittently being lost.
Python
bsd-3-clause
cerrno/neurokernel
from mpi4py import MPI import twiggy class MPIOutput(twiggy.outputs.Output): """ Output messages to a file via MPI I/O. """ def __init__(self, name, format, comm, mode=MPI.MODE_CREATE | MPI.MODE_WRONLY, close_atexit=True): self.filename = name self._format = format if format is not None else self._noop_format self.comm = comm self.mode = mode super(MPIOutput, self).__init__(format, close_atexit) def _open(self): self.file = MPI.File.Open(self.comm, self.filename, self.mode) def _close(self): self.file.Close() def _write(self, x): self.file.Iwrite_shared(x) + # This seems to be necessary to prevent some log lines from being lost: + self.file.Sync()
Call MPIOutput.file.Sync() in MPIOutput.file._write() to prevent log lines from intermittently being lost.
## Code Before: from mpi4py import MPI import twiggy class MPIOutput(twiggy.outputs.Output): """ Output messages to a file via MPI I/O. """ def __init__(self, name, format, comm, mode=MPI.MODE_CREATE | MPI.MODE_WRONLY, close_atexit=True): self.filename = name self._format = format if format is not None else self._noop_format self.comm = comm self.mode = mode super(MPIOutput, self).__init__(format, close_atexit) def _open(self): self.file = MPI.File.Open(self.comm, self.filename, self.mode) def _close(self): self.file.Close() def _write(self, x): self.file.Iwrite_shared(x) ## Instruction: Call MPIOutput.file.Sync() in MPIOutput.file._write() to prevent log lines from intermittently being lost. ## Code After: from mpi4py import MPI import twiggy class MPIOutput(twiggy.outputs.Output): """ Output messages to a file via MPI I/O. """ def __init__(self, name, format, comm, mode=MPI.MODE_CREATE | MPI.MODE_WRONLY, close_atexit=True): self.filename = name self._format = format if format is not None else self._noop_format self.comm = comm self.mode = mode super(MPIOutput, self).__init__(format, close_atexit) def _open(self): self.file = MPI.File.Open(self.comm, self.filename, self.mode) def _close(self): self.file.Close() def _write(self, x): self.file.Iwrite_shared(x) # This seems to be necessary to prevent some log lines from being lost: self.file.Sync()
... def _write(self, x): self.file.Iwrite_shared(x) # This seems to be necessary to prevent some log lines from being lost: self.file.Sync() ...
55b878bae84fea91bf210e3b30a726877990732e
config.py
config.py
import os # # This is the configuration file of the application # # Please make sure you don't store here any secret information and use environment # variables # SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME') SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD') SQLALCHEMY_POOL_RECYCLE = 60 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf' # available languages LANGUAGES = { 'en': 'English', 'he': 'עברית', }
import os # # This is the configuration file of the application # # Please make sure you don't store here any secret information and use environment # variables # SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') SQLALCHEMY_TRACK_MODIFICATIONS = True SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME') SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD') SQLALCHEMY_POOL_RECYCLE = 60 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf' # available languages LANGUAGES = { 'en': 'English', 'he': 'עברית', }
Set SQLALCHEMY_TRACK_MODIFICATIONS = True to avoid warnings
Set SQLALCHEMY_TRACK_MODIFICATIONS = True to avoid warnings
Python
bsd-3-clause
boazin/anyway,yosinv/anyway,hasadna/anyway,yosinv/anyway,hasadna/anyway,boazin/anyway,hasadna/anyway,hasadna/anyway,boazin/anyway,yosinv/anyway
import os # # This is the configuration file of the application # # Please make sure you don't store here any secret information and use environment # variables # SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') + SQLALCHEMY_TRACK_MODIFICATIONS = True SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME') SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD') SQLALCHEMY_POOL_RECYCLE = 60 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf' # available languages LANGUAGES = { 'en': 'English', 'he': 'עברית', } +
Set SQLALCHEMY_TRACK_MODIFICATIONS = True to avoid warnings
## Code Before: import os # # This is the configuration file of the application # # Please make sure you don't store here any secret information and use environment # variables # SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME') SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD') SQLALCHEMY_POOL_RECYCLE = 60 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf' # available languages LANGUAGES = { 'en': 'English', 'he': 'עברית', } ## Instruction: Set SQLALCHEMY_TRACK_MODIFICATIONS = True to avoid warnings ## Code After: import os # # This is the configuration file of the application # # Please make sure you don't store here any secret information and use environment # variables # SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') SQLALCHEMY_TRACK_MODIFICATIONS = True SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME') SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD') SQLALCHEMY_POOL_RECYCLE = 60 SECRET_KEY = 'aiosdjsaodjoidjioewnioewfnoeijfoisdjf' # available languages LANGUAGES = { 'en': 'English', 'he': 'עברית', }
// ... existing code ... SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') SQLALCHEMY_TRACK_MODIFICATIONS = True SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME') SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD') SQLALCHEMY_POOL_RECYCLE = 60 // ... rest of the code ...
417415283d87654b066c11d807516d3cd5b5bf3d
tests/test_probabilistic_interleave_speed.py
tests/test_probabilistic_interleave_speed.py
import interleaving as il import numpy as np import pytest np.random.seed(0) from .test_methods import TestMethods class TestProbabilisticInterleaveSpeed(TestMethods): def test_interleave(self): r1 = list(range(100)) r2 = list(range(100, 200)) for i in range(1000): method = il.Probabilistic([r1, r2]) ranking = method.interleave() print(list(ranking))
import interleaving as il import numpy as np import pytest np.random.seed(0) from .test_methods import TestMethods class TestProbabilisticInterleaveSpeed(TestMethods): def test_interleave(self): r1 = list(range(100)) r2 = list(range(50, 150)) r3 = list(range(100, 200)) r4 = list(range(150, 250)) for i in range(1000): method = il.Probabilistic([r1, r2, r3, r4]) ranking = method.interleave() method.evaluate(ranking, [0, 1, 2])
Add tests for measuring the speed of probabilistic interleaving
Add tests for measuring the speed of probabilistic interleaving
Python
mit
mpkato/interleaving
import interleaving as il import numpy as np import pytest np.random.seed(0) from .test_methods import TestMethods class TestProbabilisticInterleaveSpeed(TestMethods): def test_interleave(self): r1 = list(range(100)) + r2 = list(range(50, 150)) - r2 = list(range(100, 200)) + r3 = list(range(100, 200)) + r4 = list(range(150, 250)) for i in range(1000): - method = il.Probabilistic([r1, r2]) + method = il.Probabilistic([r1, r2, r3, r4]) ranking = method.interleave() - print(list(ranking)) + method.evaluate(ranking, [0, 1, 2])
Add tests for measuring the speed of probabilistic interleaving
## Code Before: import interleaving as il import numpy as np import pytest np.random.seed(0) from .test_methods import TestMethods class TestProbabilisticInterleaveSpeed(TestMethods): def test_interleave(self): r1 = list(range(100)) r2 = list(range(100, 200)) for i in range(1000): method = il.Probabilistic([r1, r2]) ranking = method.interleave() print(list(ranking)) ## Instruction: Add tests for measuring the speed of probabilistic interleaving ## Code After: import interleaving as il import numpy as np import pytest np.random.seed(0) from .test_methods import TestMethods class TestProbabilisticInterleaveSpeed(TestMethods): def test_interleave(self): r1 = list(range(100)) r2 = list(range(50, 150)) r3 = list(range(100, 200)) r4 = list(range(150, 250)) for i in range(1000): method = il.Probabilistic([r1, r2, r3, r4]) ranking = method.interleave() method.evaluate(ranking, [0, 1, 2])
... def test_interleave(self): r1 = list(range(100)) r2 = list(range(50, 150)) r3 = list(range(100, 200)) r4 = list(range(150, 250)) for i in range(1000): method = il.Probabilistic([r1, r2, r3, r4]) ranking = method.interleave() method.evaluate(ranking, [0, 1, 2]) ...
2bc249dc4996c0cccfe61a3d8bf1658fa987e7cf
costcocr/writers/csv.py
costcocr/writers/csv.py
def csv(): def Receipt(meta, body, variables): output = [] def add(s) : output.append(s) if "store" in meta: add("# Store: {}".format(meta["store"])) if "date" in meta: add("# Date: {}".format(meta["date"])) if "location" in meta: add("# Location: {}".format(meta["location"])) add(body) return "\n".join(output) def ItemList(s): return s def ItemListSep(): return "\n" ## Could also be used to collapse the fields, or exclude some. def Item(name, cost, discount, tax): return "{}, {}, {}, {}".format(name, cost, discount, tax) return { "Receipt" : Receipt, "ItemList" : ItemList, "ItemListSep" : ItemListSep, "Item" : Item, }
def Receipt(meta, body, variables): output = [] def add(s) : output.append(s) if "store" in meta: add("# Store: {}".format(meta["store"])) if "date" in meta: add("# Date: {}".format(meta["date"])) if "location" in meta: add("# Location: {}".format(meta["location"])) add(body) return "\n".join(output) def ItemList(s): return s def ItemListSep(): return "\n" ## Could also be used to collapse the fields, or exclude some. def Item(name, cost, discount, tax): return "{}, {}, {}, {}".format(name, cost, discount, tax)
Convert CSV writer to a module definition.
Convert CSV writer to a module definition. Use __import__ to import it as a dictionary.
Python
bsd-3-clause
rdodesigns/costcocr
+ def Receipt(meta, body, variables): + output = [] - def csv(): + def add(s) : output.append(s) - def Receipt(meta, body, variables): - output = [] + if "store" in meta: + add("# Store: {}".format(meta["store"])) + if "date" in meta: + add("# Date: {}".format(meta["date"])) + if "location" in meta: + add("# Location: {}".format(meta["location"])) - def add(s) : output.append(s) + add(body) + return "\n".join(output) - if "store" in meta: - add("# Store: {}".format(meta["store"])) - if "date" in meta: - add("# Date: {}".format(meta["date"])) - if "location" in meta: - add("# Location: {}".format(meta["location"])) - add(body) + def ItemList(s): return s + def ItemListSep(): return "\n" - return "\n".join(output) + ## Could also be used to collapse the fields, or exclude some. + def Item(name, cost, discount, tax): + return "{}, {}, {}, {}".format(name, cost, discount, tax) - def ItemList(s): return s - def ItemListSep(): return "\n" - - ## Could also be used to collapse the fields, or exclude some. - def Item(name, cost, discount, tax): - return "{}, {}, {}, {}".format(name, cost, discount, tax) - - return { - "Receipt" : Receipt, - "ItemList" : ItemList, - "ItemListSep" : ItemListSep, - "Item" : Item, - } - -
Convert CSV writer to a module definition.
## Code Before: def csv(): def Receipt(meta, body, variables): output = [] def add(s) : output.append(s) if "store" in meta: add("# Store: {}".format(meta["store"])) if "date" in meta: add("# Date: {}".format(meta["date"])) if "location" in meta: add("# Location: {}".format(meta["location"])) add(body) return "\n".join(output) def ItemList(s): return s def ItemListSep(): return "\n" ## Could also be used to collapse the fields, or exclude some. def Item(name, cost, discount, tax): return "{}, {}, {}, {}".format(name, cost, discount, tax) return { "Receipt" : Receipt, "ItemList" : ItemList, "ItemListSep" : ItemListSep, "Item" : Item, } ## Instruction: Convert CSV writer to a module definition. ## Code After: def Receipt(meta, body, variables): output = [] def add(s) : output.append(s) if "store" in meta: add("# Store: {}".format(meta["store"])) if "date" in meta: add("# Date: {}".format(meta["date"])) if "location" in meta: add("# Location: {}".format(meta["location"])) add(body) return "\n".join(output) def ItemList(s): return s def ItemListSep(): return "\n" ## Could also be used to collapse the fields, or exclude some. def Item(name, cost, discount, tax): return "{}, {}, {}, {}".format(name, cost, discount, tax)
# ... existing code ... def Receipt(meta, body, variables): output = [] def add(s) : output.append(s) if "store" in meta: add("# Store: {}".format(meta["store"])) if "date" in meta: add("# Date: {}".format(meta["date"])) if "location" in meta: add("# Location: {}".format(meta["location"])) add(body) return "\n".join(output) def ItemList(s): return s def ItemListSep(): return "\n" ## Could also be used to collapse the fields, or exclude some. def Item(name, cost, discount, tax): return "{}, {}, {}, {}".format(name, cost, discount, tax) # ... rest of the code ...
adee7a2530d22d1242f89cddc84795efd1d02653
imagesift/cms_plugins.py
imagesift/cms_plugins.py
import datetime from django.utils.translation import ugettext_lazy as _ from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import GalleryPlugin class ImagesiftPlugin(CMSPluginBase): model = GalleryPlugin name = _('Imagesift Plugin') render_template = "imagesift_plugin.html" def date_digest(self, images): """ return a list of unique dates, for all the images passed """ dates = {} for i in images: dates.setdefault(i.overrideable_date().date(), None) return sorted(dates.keys()) def render(self, context, instance, placeholder): url = context['request'].get_full_path() date = context['request'].GET.get('date') limit = instance.thumbnail_limit qs = instance.get_images_queryset() if limit: qs = qs[:limit] filtered = False if date: date = datetime.datetime.strptime(date, "%Y-%m-%d").date() qs = list(qs) qs = [i for i in qs if i.overrideable_date().date() == date] filtered = _('The set of images is filtered to %s' % unicode(date)) context.update({ 'dates': [d.isoformat() for d in self.date_digest(qs)], 'filtered':filtered, 'images': qs, 'instance': instance, 'placeholder': placeholder, 'url':url, }) return context plugin_pool.register_plugin(ImagesiftPlugin)
import datetime from django.utils.translation import ugettext_lazy as _ from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import GalleryPlugin class ImagesiftPlugin(CMSPluginBase): model = GalleryPlugin name = _('Imagesift Plugin') render_template = "imagesift_plugin.html" def date_digest(self, images): """ return a list of unique dates, for all the images passed """ dates = {} for i in images: dates.setdefault(i.overrideable_date().date(), None) return sorted(dates.keys()) def render(self, context, instance, placeholder): url = context['request'].get_full_path() date = context['request'].GET.get('date') limit = instance.thumbnail_limit qs = instance.get_images_queryset() # there's no way around listing, sorry. qs = list(qs) filtered = False if date: date = datetime.datetime.strptime(date, "%Y-%m-%d").date() qs = [i for i in qs if i.overrideable_date().date() == date] filtered = _('The set of images is filtered to %s' % unicode(date)) # sort before limit qs.sort(key=lambda i: i.overrideable_date()) if limit: qs = qs[:limit] context.update({ 'dates': [d.isoformat() for d in self.date_digest(qs)], 'filtered':filtered, 'images': qs, 'instance': instance, 'placeholder': placeholder, 'url':url, }) return context plugin_pool.register_plugin(ImagesiftPlugin)
Sort returned images by date, taking into account overrides
Sort returned images by date, taking into account overrides
Python
bsd-3-clause
topiaruss/cmsplugin-imagesift,topiaruss/cmsplugin-imagesift,topiaruss/cmsplugin-imagesift
import datetime from django.utils.translation import ugettext_lazy as _ from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import GalleryPlugin class ImagesiftPlugin(CMSPluginBase): model = GalleryPlugin name = _('Imagesift Plugin') render_template = "imagesift_plugin.html" def date_digest(self, images): """ return a list of unique dates, for all the images passed """ dates = {} for i in images: dates.setdefault(i.overrideable_date().date(), None) return sorted(dates.keys()) def render(self, context, instance, placeholder): url = context['request'].get_full_path() date = context['request'].GET.get('date') limit = instance.thumbnail_limit qs = instance.get_images_queryset() - if limit: - qs = qs[:limit] + # there's no way around listing, sorry. + qs = list(qs) + filtered = False if date: date = datetime.datetime.strptime(date, "%Y-%m-%d").date() - qs = list(qs) qs = [i for i in qs if i.overrideable_date().date() == date] filtered = _('The set of images is filtered to %s' % unicode(date)) + + # sort before limit + qs.sort(key=lambda i: i.overrideable_date()) + + if limit: + qs = qs[:limit] context.update({ 'dates': [d.isoformat() for d in self.date_digest(qs)], 'filtered':filtered, 'images': qs, 'instance': instance, 'placeholder': placeholder, 'url':url, }) return context plugin_pool.register_plugin(ImagesiftPlugin)
Sort returned images by date, taking into account overrides
## Code Before: import datetime from django.utils.translation import ugettext_lazy as _ from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import GalleryPlugin class ImagesiftPlugin(CMSPluginBase): model = GalleryPlugin name = _('Imagesift Plugin') render_template = "imagesift_plugin.html" def date_digest(self, images): """ return a list of unique dates, for all the images passed """ dates = {} for i in images: dates.setdefault(i.overrideable_date().date(), None) return sorted(dates.keys()) def render(self, context, instance, placeholder): url = context['request'].get_full_path() date = context['request'].GET.get('date') limit = instance.thumbnail_limit qs = instance.get_images_queryset() if limit: qs = qs[:limit] filtered = False if date: date = datetime.datetime.strptime(date, "%Y-%m-%d").date() qs = list(qs) qs = [i for i in qs if i.overrideable_date().date() == date] filtered = _('The set of images is filtered to %s' % unicode(date)) context.update({ 'dates': [d.isoformat() for d in self.date_digest(qs)], 'filtered':filtered, 'images': qs, 'instance': instance, 'placeholder': placeholder, 'url':url, }) return context plugin_pool.register_plugin(ImagesiftPlugin) ## Instruction: Sort returned images by date, taking into account overrides ## Code After: import datetime from django.utils.translation import ugettext_lazy as _ from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import GalleryPlugin class ImagesiftPlugin(CMSPluginBase): model = GalleryPlugin name = _('Imagesift Plugin') render_template = "imagesift_plugin.html" def date_digest(self, images): """ return a list of unique dates, for all the images passed """ dates = {} for i in images: dates.setdefault(i.overrideable_date().date(), None) return sorted(dates.keys()) def render(self, context, instance, placeholder): url = context['request'].get_full_path() date = context['request'].GET.get('date') limit = instance.thumbnail_limit qs = instance.get_images_queryset() # there's no way around listing, sorry. qs = list(qs) filtered = False if date: date = datetime.datetime.strptime(date, "%Y-%m-%d").date() qs = [i for i in qs if i.overrideable_date().date() == date] filtered = _('The set of images is filtered to %s' % unicode(date)) # sort before limit qs.sort(key=lambda i: i.overrideable_date()) if limit: qs = qs[:limit] context.update({ 'dates': [d.isoformat() for d in self.date_digest(qs)], 'filtered':filtered, 'images': qs, 'instance': instance, 'placeholder': placeholder, 'url':url, }) return context plugin_pool.register_plugin(ImagesiftPlugin)
# ... existing code ... date = context['request'].GET.get('date') limit = instance.thumbnail_limit qs = instance.get_images_queryset() # there's no way around listing, sorry. qs = list(qs) filtered = False if date: date = datetime.datetime.strptime(date, "%Y-%m-%d").date() qs = [i for i in qs if i.overrideable_date().date() == date] filtered = _('The set of images is filtered to %s' % unicode(date)) # sort before limit qs.sort(key=lambda i: i.overrideable_date()) if limit: qs = qs[:limit] context.update({ 'dates': [d.isoformat() for d in self.date_digest(qs)], # ... rest of the code ...
3f7ccf17528b91b0b1145ad81c3f5aad68085aa5
varify/variants/translators.py
varify/variants/translators.py
from avocado.query.translators import Translator, registry from modeltree.tree import trees class AllowNullsTranslator(Translator): """For data sources that only apply to SNPs, this translator ensures only SNPs are filtered down and not other types of variants. """ def translate(self, field, roperator, rvalue, tree, **kwargs): output = super(AllowNullsTranslator, self).translate( field, roperator, rvalue, tree, **kwargs) # Create a null condition for this field null_condition = trees[tree].query_condition( field.field, 'isnull', True) # Allow the null condition output['query_modifiers']['condition'] |= null_condition return output registry.register(AllowNullsTranslator, 'Allow Nulls')
from avocado.query.translators import Translator, registry from modeltree.tree import trees class AllowNullsTranslator(Translator): """For data sources that only apply to SNPs, this translator ensures only SNPs are filtered down and not other types of variants. """ def translate(self, field, roperator, rvalue, tree, **kwargs): output = super(AllowNullsTranslator, self).translate( field, roperator, rvalue, tree, **kwargs) # We are excluding nulls in the case of range, gt, and gte operators. # If we did not do this, then null values would be included all the # time which would be confusing, especially then they are included # for both lt and gt queries as it appears nulls are simultaneously # 0 and infinity. if roperator not in ('range', 'gt', 'gte'): # Create a null condition for this field null_condition = trees[tree].query_condition( field.field, 'isnull', True) # Allow the null condition output['query_modifiers']['condition'] |= null_condition return output registry.register(AllowNullsTranslator, 'Allow Nulls')
Exclude nulls in translator when operator is range, gt, gte
Exclude nulls in translator when operator is range, gt, gte Previously, nulls were included in all cases making it appear that null was but 0 and infinity. Now, null is effectively treated as 0. Signed-off-by: Don Naegely <[email protected]>
Python
bsd-2-clause
chop-dbhi/varify,chop-dbhi/varify,chop-dbhi/varify,chop-dbhi/varify
from avocado.query.translators import Translator, registry from modeltree.tree import trees class AllowNullsTranslator(Translator): """For data sources that only apply to SNPs, this translator ensures only SNPs are filtered down and not other types of variants. """ def translate(self, field, roperator, rvalue, tree, **kwargs): output = super(AllowNullsTranslator, self).translate( field, roperator, rvalue, tree, **kwargs) + + # We are excluding nulls in the case of range, gt, and gte operators. + # If we did not do this, then null values would be included all the + # time which would be confusing, especially then they are included + # for both lt and gt queries as it appears nulls are simultaneously + # 0 and infinity. + if roperator not in ('range', 'gt', 'gte'): - # Create a null condition for this field + # Create a null condition for this field - null_condition = trees[tree].query_condition( + null_condition = trees[tree].query_condition( - field.field, 'isnull', True) + field.field, 'isnull', True) - # Allow the null condition + # Allow the null condition - output['query_modifiers']['condition'] |= null_condition + output['query_modifiers']['condition'] |= null_condition + return output registry.register(AllowNullsTranslator, 'Allow Nulls')
Exclude nulls in translator when operator is range, gt, gte
## Code Before: from avocado.query.translators import Translator, registry from modeltree.tree import trees class AllowNullsTranslator(Translator): """For data sources that only apply to SNPs, this translator ensures only SNPs are filtered down and not other types of variants. """ def translate(self, field, roperator, rvalue, tree, **kwargs): output = super(AllowNullsTranslator, self).translate( field, roperator, rvalue, tree, **kwargs) # Create a null condition for this field null_condition = trees[tree].query_condition( field.field, 'isnull', True) # Allow the null condition output['query_modifiers']['condition'] |= null_condition return output registry.register(AllowNullsTranslator, 'Allow Nulls') ## Instruction: Exclude nulls in translator when operator is range, gt, gte ## Code After: from avocado.query.translators import Translator, registry from modeltree.tree import trees class AllowNullsTranslator(Translator): """For data sources that only apply to SNPs, this translator ensures only SNPs are filtered down and not other types of variants. """ def translate(self, field, roperator, rvalue, tree, **kwargs): output = super(AllowNullsTranslator, self).translate( field, roperator, rvalue, tree, **kwargs) # We are excluding nulls in the case of range, gt, and gte operators. # If we did not do this, then null values would be included all the # time which would be confusing, especially then they are included # for both lt and gt queries as it appears nulls are simultaneously # 0 and infinity. if roperator not in ('range', 'gt', 'gte'): # Create a null condition for this field null_condition = trees[tree].query_condition( field.field, 'isnull', True) # Allow the null condition output['query_modifiers']['condition'] |= null_condition return output registry.register(AllowNullsTranslator, 'Allow Nulls')
# ... existing code ... def translate(self, field, roperator, rvalue, tree, **kwargs): output = super(AllowNullsTranslator, self).translate( field, roperator, rvalue, tree, **kwargs) # We are excluding nulls in the case of range, gt, and gte operators. # If we did not do this, then null values would be included all the # time which would be confusing, especially then they are included # for both lt and gt queries as it appears nulls are simultaneously # 0 and infinity. if roperator not in ('range', 'gt', 'gte'): # Create a null condition for this field null_condition = trees[tree].query_condition( field.field, 'isnull', True) # Allow the null condition output['query_modifiers']['condition'] |= null_condition return output # ... rest of the code ...
355b1a91edf2dfcff66c2a02e034977f65d0690c
influxdb/dataframe_client.py
influxdb/dataframe_client.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals __all__ = ['DataFrameClient'] try: import pandas del pandas except ImportError as err: from .client import InfluxDBClient class DataFrameClient(InfluxDBClient): def __init__(self, *a, **kw): raise ImportError("DataFrameClient requires Pandas " "which couldn't be imported: %s" % err) else: from ._dataframe_client import DataFrameClient
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals __all__ = ['DataFrameClient'] try: import pandas del pandas except ImportError as err: from .client import InfluxDBClient class DataFrameClient(InfluxDBClient): err = err def __init__(self, *a, **kw): raise ImportError("DataFrameClient requires Pandas " "which couldn't be imported: %s" % self.err) else: from ._dataframe_client import DataFrameClient
Fix DataFrameClient import error on python3.5
Fix DataFrameClient import error on python3.5
Python
mit
BenHewins/influxdb-python,omki2005/influxdb-python,omki2005/influxdb-python,Asimmetric/influxdb-python,tzonghao/influxdb-python,influxdata/influxdb-python,Asimmetric/influxdb-python,tzonghao/influxdb-python,BenHewins/influxdb-python,influxdb/influxdb-python,influxdata/influxdb-python,influxdb/influxdb-python
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals __all__ = ['DataFrameClient'] try: import pandas del pandas except ImportError as err: from .client import InfluxDBClient class DataFrameClient(InfluxDBClient): + err = err + def __init__(self, *a, **kw): raise ImportError("DataFrameClient requires Pandas " - "which couldn't be imported: %s" % err) + "which couldn't be imported: %s" % self.err) else: from ._dataframe_client import DataFrameClient
Fix DataFrameClient import error on python3.5
## Code Before: from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals __all__ = ['DataFrameClient'] try: import pandas del pandas except ImportError as err: from .client import InfluxDBClient class DataFrameClient(InfluxDBClient): def __init__(self, *a, **kw): raise ImportError("DataFrameClient requires Pandas " "which couldn't be imported: %s" % err) else: from ._dataframe_client import DataFrameClient ## Instruction: Fix DataFrameClient import error on python3.5 ## Code After: from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals __all__ = ['DataFrameClient'] try: import pandas del pandas except ImportError as err: from .client import InfluxDBClient class DataFrameClient(InfluxDBClient): err = err def __init__(self, *a, **kw): raise ImportError("DataFrameClient requires Pandas " "which couldn't be imported: %s" % self.err) else: from ._dataframe_client import DataFrameClient
... from .client import InfluxDBClient class DataFrameClient(InfluxDBClient): err = err def __init__(self, *a, **kw): raise ImportError("DataFrameClient requires Pandas " "which couldn't be imported: %s" % self.err) else: from ._dataframe_client import DataFrameClient ...
3daa15b0ccb3fc4891daf55724cbeaa705f923e5
scripts/clio_daemon.py
scripts/clio_daemon.py
import logging import simpledaemon class clio_daemon(simpledaemon.Daemon): default_conf = 'clio_daemon.conf' section = 'clio' def run(self): import eventlet from clio.store import app logger = logging.getLogger() if logger.handlers: [app.logger.addHandler(h) for h in logger.handlers] app.logger.setLevel(logger.level) eventlet.serve(eventlet.listen((app.config['HOST'], app.config['PORT']), backlog=2048), app) if __name__ == '__main__': clio_daemon().main()
import simpledaemon class clio_daemon(simpledaemon.Daemon): default_conf = 'clio_daemon.conf' section = 'clio' def run(self): import eventlet from clio.store import app eventlet.serve(eventlet.listen((app.config['HOST'], app.config['PORT']), backlog=2048), app) if __name__ == '__main__': clio_daemon().main()
Revert "output flask logging into simpledaemon's log file."
Revert "output flask logging into simpledaemon's log file." This is completely superfluous - logging does this already automatically. This reverts commit 18091efef351ecddb1d29ee7d01d0a7fb567a7b7.
Python
apache-2.0
geodelic/clio,geodelic/clio
- - import logging import simpledaemon class clio_daemon(simpledaemon.Daemon): default_conf = 'clio_daemon.conf' section = 'clio' def run(self): import eventlet from clio.store import app - logger = logging.getLogger() - if logger.handlers: - [app.logger.addHandler(h) for h in logger.handlers] - app.logger.setLevel(logger.level) eventlet.serve(eventlet.listen((app.config['HOST'], app.config['PORT']), backlog=2048), app) if __name__ == '__main__': clio_daemon().main()
Revert "output flask logging into simpledaemon's log file."
## Code Before: import logging import simpledaemon class clio_daemon(simpledaemon.Daemon): default_conf = 'clio_daemon.conf' section = 'clio' def run(self): import eventlet from clio.store import app logger = logging.getLogger() if logger.handlers: [app.logger.addHandler(h) for h in logger.handlers] app.logger.setLevel(logger.level) eventlet.serve(eventlet.listen((app.config['HOST'], app.config['PORT']), backlog=2048), app) if __name__ == '__main__': clio_daemon().main() ## Instruction: Revert "output flask logging into simpledaemon's log file." ## Code After: import simpledaemon class clio_daemon(simpledaemon.Daemon): default_conf = 'clio_daemon.conf' section = 'clio' def run(self): import eventlet from clio.store import app eventlet.serve(eventlet.listen((app.config['HOST'], app.config['PORT']), backlog=2048), app) if __name__ == '__main__': clio_daemon().main()
# ... existing code ... import simpledaemon # ... modified code ... def run(self): import eventlet from clio.store import app eventlet.serve(eventlet.listen((app.config['HOST'], app.config['PORT']), backlog=2048), app) if __name__ == '__main__': # ... rest of the code ...
a02739581d6c9dbde900c226d121b4fb889b4e2d
window.py
window.py
from PySide import QtGui from editor import Editor class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) editor = Editor() self.setCentralWidget(editor) self.setWindowTitle("RST Previewer") self.showMaximized()
from PySide import QtGui, QtCore from editor import Editor class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) splitter = QtGui.QSplitter(QtCore.Qt.Horizontal) treeview = QtGui.QTreeView() editor = Editor() self.setCentralWidget(splitter) splitter.addWidget(treeview) splitter.addWidget(editor) self.setWindowTitle("RST Previewer") self.showMaximized()
Add splitter with treeview/editor split.
Add splitter with treeview/editor split.
Python
bsd-3-clause
audreyr/sphinx-gui,techdragon/sphinx-gui,audreyr/sphinx-gui,techdragon/sphinx-gui
- from PySide import QtGui + from PySide import QtGui, QtCore from editor import Editor class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) + splitter = QtGui.QSplitter(QtCore.Qt.Horizontal) + treeview = QtGui.QTreeView() editor = Editor() - self.setCentralWidget(editor) + self.setCentralWidget(splitter) + + splitter.addWidget(treeview) + splitter.addWidget(editor) + self.setWindowTitle("RST Previewer") self.showMaximized()
Add splitter with treeview/editor split.
## Code Before: from PySide import QtGui from editor import Editor class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) editor = Editor() self.setCentralWidget(editor) self.setWindowTitle("RST Previewer") self.showMaximized() ## Instruction: Add splitter with treeview/editor split. ## Code After: from PySide import QtGui, QtCore from editor import Editor class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) splitter = QtGui.QSplitter(QtCore.Qt.Horizontal) treeview = QtGui.QTreeView() editor = Editor() self.setCentralWidget(splitter) splitter.addWidget(treeview) splitter.addWidget(editor) self.setWindowTitle("RST Previewer") self.showMaximized()
... from PySide import QtGui, QtCore from editor import Editor ... class MainWindow(QtGui.QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) splitter = QtGui.QSplitter(QtCore.Qt.Horizontal) treeview = QtGui.QTreeView() editor = Editor() self.setCentralWidget(splitter) splitter.addWidget(treeview) splitter.addWidget(editor) self.setWindowTitle("RST Previewer") self.showMaximized() ...
23d5d0e0e77dc0b0816df51a8a1e42bc4069112b
rst2pdf/style2yaml.py
rst2pdf/style2yaml.py
import argparse import json import yaml from rst2pdf.dumpstyle import fixstyle from rst2pdf.rson import loads as rloads def main(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( 'paths', metavar='PATH', nargs='+', help='An RSON-formatted file to convert.', ) args = parser.parse_args() for path in args.paths: # read rson from a file with open(path, 'rb') as fh: style_data = fixstyle(rloads(fh.read())) # output the style as json, then parse that json_style = json.dumps(style_data) reparsed_style = json.loads(json_style) yaml_style = yaml.dump(reparsed_style, default_flow_style=None) print(yaml_style) if __name__ == '__main__': main()
import argparse import json import os import yaml from rst2pdf.dumpstyle import fixstyle from rst2pdf.rson import loads as rloads def main(): # set up the command, optional --save parameter, and a list of paths parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '--save', action='store_true', help='Save .yaml version of the file (rather than output to stdout)', ) parser.add_argument( 'paths', metavar='PATH', nargs='+', help='An RSON-formatted file to convert.', ) args = parser.parse_args() # loop over the files for path in args.paths: # read rson from a file with open(path, 'rb') as fh: style_data = fixstyle(rloads(fh.read())) # output the style as json (already supported), then parse that json_style = json.dumps(style_data) reparsed_style = json.loads(json_style) yaml_style = yaml.dump(reparsed_style, default_flow_style=None) # output the yaml or save to a file if args.save: new_path = '.'.join((os.path.splitext(path)[0], 'yaml')) if os.path.exists(new_path): print("File " + new_path + " exists, cannot overwrite") else: print("Creating file " + new_path) with open(new_path, 'w') as file: file.write(yaml_style) else: print(yaml_style) if __name__ == '__main__': main()
Add save functionality to the conversion script
Add save functionality to the conversion script
Python
mit
rst2pdf/rst2pdf,rst2pdf/rst2pdf
import argparse import json + import os import yaml from rst2pdf.dumpstyle import fixstyle from rst2pdf.rson import loads as rloads def main(): + # set up the command, optional --save parameter, and a list of paths parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + '--save', + action='store_true', + help='Save .yaml version of the file (rather than output to stdout)', ) parser.add_argument( 'paths', metavar='PATH', nargs='+', help='An RSON-formatted file to convert.', ) args = parser.parse_args() + + # loop over the files for path in args.paths: # read rson from a file with open(path, 'rb') as fh: style_data = fixstyle(rloads(fh.read())) - # output the style as json, then parse that + # output the style as json (already supported), then parse that json_style = json.dumps(style_data) reparsed_style = json.loads(json_style) yaml_style = yaml.dump(reparsed_style, default_flow_style=None) + + # output the yaml or save to a file + if args.save: + new_path = '.'.join((os.path.splitext(path)[0], 'yaml')) + + if os.path.exists(new_path): + print("File " + new_path + " exists, cannot overwrite") + else: + print("Creating file " + new_path) + with open(new_path, 'w') as file: + file.write(yaml_style) + else: - print(yaml_style) + print(yaml_style) if __name__ == '__main__': main()
Add save functionality to the conversion script
## Code Before: import argparse import json import yaml from rst2pdf.dumpstyle import fixstyle from rst2pdf.rson import loads as rloads def main(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( 'paths', metavar='PATH', nargs='+', help='An RSON-formatted file to convert.', ) args = parser.parse_args() for path in args.paths: # read rson from a file with open(path, 'rb') as fh: style_data = fixstyle(rloads(fh.read())) # output the style as json, then parse that json_style = json.dumps(style_data) reparsed_style = json.loads(json_style) yaml_style = yaml.dump(reparsed_style, default_flow_style=None) print(yaml_style) if __name__ == '__main__': main() ## Instruction: Add save functionality to the conversion script ## Code After: import argparse import json import os import yaml from rst2pdf.dumpstyle import fixstyle from rst2pdf.rson import loads as rloads def main(): # set up the command, optional --save parameter, and a list of paths parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '--save', action='store_true', help='Save .yaml version of the file (rather than output to stdout)', ) parser.add_argument( 'paths', metavar='PATH', nargs='+', help='An RSON-formatted file to convert.', ) args = parser.parse_args() # loop over the files for path in args.paths: # read rson from a file with open(path, 'rb') as fh: style_data = fixstyle(rloads(fh.read())) # output the style as json (already supported), then parse that json_style = json.dumps(style_data) reparsed_style = json.loads(json_style) yaml_style = yaml.dump(reparsed_style, default_flow_style=None) # output the yaml or save to a file if args.save: new_path = '.'.join((os.path.splitext(path)[0], 'yaml')) if os.path.exists(new_path): print("File " + new_path + " exists, cannot overwrite") else: print("Creating file " + new_path) with open(new_path, 'w') as file: file.write(yaml_style) else: print(yaml_style) if __name__ == '__main__': main()
... import argparse import json import os import yaml ... def main(): # set up the command, optional --save parameter, and a list of paths parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( '--save', action='store_true', help='Save .yaml version of the file (rather than output to stdout)', ) parser.add_argument( 'paths', ... help='An RSON-formatted file to convert.', ) args = parser.parse_args() # loop over the files for path in args.paths: # read rson from a file with open(path, 'rb') as fh: style_data = fixstyle(rloads(fh.read())) # output the style as json (already supported), then parse that json_style = json.dumps(style_data) reparsed_style = json.loads(json_style) yaml_style = yaml.dump(reparsed_style, default_flow_style=None) # output the yaml or save to a file if args.save: new_path = '.'.join((os.path.splitext(path)[0], 'yaml')) if os.path.exists(new_path): print("File " + new_path + " exists, cannot overwrite") else: print("Creating file " + new_path) with open(new_path, 'w') as file: file.write(yaml_style) else: print(yaml_style) if __name__ == '__main__': ...
e9df15b0f084ed9e026a5de129b109a3c546f99c
src/libeeyore/parse_tree_to_cpp.py
src/libeeyore/parse_tree_to_cpp.py
import builtins from cpp.cpprenderer import EeyCppRenderer from environment import EeyEnvironment from values import * def parse_tree_string_to_values( string ): return eval( string ) def non_empty_line( ln ): return ( ln.strip() != "" ) def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ): env = EeyEnvironment( EeyCppRenderer() ) builtins.add_builtins( self ) values = ( parse_tree_string_to_values( ln ) for ln in filter( non_empty_line, parse_tree_in_fl ) ) cpp_out_fl.write( env.render_exe( values ) )
from itertools import imap import builtins from cpp.cpprenderer import EeyCppRenderer from environment import EeyEnvironment from functionvalues import * from languagevalues import * from values import * def parse_tree_string_to_values( string ): return eval( string ) def remove_comments( ln ): i = ln.find( "#" ) if i != -1: return ln[:i] else: return ln def non_empty_line( ln ): return ( ln.strip() != "" ) def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ): env = EeyEnvironment( EeyCppRenderer() ) builtins.add_builtins( env ) values = ( parse_tree_string_to_values( ln ) for ln in filter( non_empty_line, imap( remove_comments, parse_tree_in_fl ) ) ) cpp_out_fl.write( env.render_exe( values ) )
Handle comments in parse tree.
Handle comments in parse tree.
Python
mit
andybalaam/pepper,andybalaam/pepper,andybalaam/pepper,andybalaam/pepper,andybalaam/pepper
+ from itertools import imap import builtins from cpp.cpprenderer import EeyCppRenderer from environment import EeyEnvironment + from functionvalues import * + from languagevalues import * from values import * def parse_tree_string_to_values( string ): return eval( string ) + + def remove_comments( ln ): + i = ln.find( "#" ) + if i != -1: + return ln[:i] + else: + return ln def non_empty_line( ln ): return ( ln.strip() != "" ) def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ): env = EeyEnvironment( EeyCppRenderer() ) - builtins.add_builtins( self ) + builtins.add_builtins( env ) values = ( parse_tree_string_to_values( ln ) for ln in - filter( non_empty_line, parse_tree_in_fl ) ) + filter( non_empty_line, imap( remove_comments, parse_tree_in_fl ) ) ) cpp_out_fl.write( env.render_exe( values ) )
Handle comments in parse tree.
## Code Before: import builtins from cpp.cpprenderer import EeyCppRenderer from environment import EeyEnvironment from values import * def parse_tree_string_to_values( string ): return eval( string ) def non_empty_line( ln ): return ( ln.strip() != "" ) def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ): env = EeyEnvironment( EeyCppRenderer() ) builtins.add_builtins( self ) values = ( parse_tree_string_to_values( ln ) for ln in filter( non_empty_line, parse_tree_in_fl ) ) cpp_out_fl.write( env.render_exe( values ) ) ## Instruction: Handle comments in parse tree. ## Code After: from itertools import imap import builtins from cpp.cpprenderer import EeyCppRenderer from environment import EeyEnvironment from functionvalues import * from languagevalues import * from values import * def parse_tree_string_to_values( string ): return eval( string ) def remove_comments( ln ): i = ln.find( "#" ) if i != -1: return ln[:i] else: return ln def non_empty_line( ln ): return ( ln.strip() != "" ) def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ): env = EeyEnvironment( EeyCppRenderer() ) builtins.add_builtins( env ) values = ( parse_tree_string_to_values( ln ) for ln in filter( non_empty_line, imap( remove_comments, parse_tree_in_fl ) ) ) cpp_out_fl.write( env.render_exe( values ) )
# ... existing code ... from itertools import imap import builtins from cpp.cpprenderer import EeyCppRenderer from environment import EeyEnvironment from functionvalues import * from languagevalues import * from values import * def parse_tree_string_to_values( string ): return eval( string ) def remove_comments( ln ): i = ln.find( "#" ) if i != -1: return ln[:i] else: return ln def non_empty_line( ln ): return ( ln.strip() != "" ) # ... modified code ... def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ): env = EeyEnvironment( EeyCppRenderer() ) builtins.add_builtins( env ) values = ( parse_tree_string_to_values( ln ) for ln in filter( non_empty_line, imap( remove_comments, parse_tree_in_fl ) ) ) cpp_out_fl.write( env.render_exe( values ) ) # ... rest of the code ...
c1b797b74098fd6f7ea480f7f1bf496d5f52bdc7
signac/__init__.py
signac/__init__.py
from __future__ import absolute_import from . import common from . import contrib from . import db from . import gui from .common import errors from .contrib import Project from .contrib import get_project from .contrib import init_project from .contrib import fetch from .contrib import export_one from .contrib import export from .contrib import export_to_mirror from .contrib import export_pymongo from .contrib import fetch_one # deprecated from .contrib import filesystems as fs from .db import get_database __version__ = '0.5.0' __all__ = ['__version__', 'common', 'contrib', 'db', 'gui', 'errors', 'Project', 'get_project', 'init_project', 'get_database', 'fetch', 'fetch_one', 'export_one', 'export', 'export_to_mirror', 'export_pymongo', 'fs' ]
from __future__ import absolute_import from . import contrib from . import db from . import gui from .contrib import Project from .contrib import get_project from .contrib import init_project from .contrib import fetch from .contrib import export_one from .contrib import export from .contrib import export_to_mirror from .contrib import export_pymongo from .contrib import fetch_one # deprecated from .contrib import filesystems as fs from .db import get_database __version__ = '0.5.0' __all__ = ['__version__', 'contrib', 'db', 'gui', 'Project', 'get_project', 'init_project', 'get_database', 'fetch', 'fetch_one', 'export_one', 'export', 'export_to_mirror', 'export_pymongo', 'fs' ]
Remove common and errors from root namespace.
Remove common and errors from root namespace.
Python
bsd-3-clause
csadorf/signac,csadorf/signac
from __future__ import absolute_import - from . import common from . import contrib from . import db from . import gui - from .common import errors from .contrib import Project from .contrib import get_project from .contrib import init_project from .contrib import fetch from .contrib import export_one from .contrib import export from .contrib import export_to_mirror from .contrib import export_pymongo from .contrib import fetch_one # deprecated from .contrib import filesystems as fs from .db import get_database __version__ = '0.5.0' - __all__ = ['__version__', 'common', 'contrib', 'db', 'gui', + __all__ = ['__version__', 'contrib', 'db', 'gui', - 'errors', 'Project', 'get_project', 'init_project', 'get_database', 'fetch', 'fetch_one', 'export_one', 'export', 'export_to_mirror', 'export_pymongo', 'fs' ]
Remove common and errors from root namespace.
## Code Before: from __future__ import absolute_import from . import common from . import contrib from . import db from . import gui from .common import errors from .contrib import Project from .contrib import get_project from .contrib import init_project from .contrib import fetch from .contrib import export_one from .contrib import export from .contrib import export_to_mirror from .contrib import export_pymongo from .contrib import fetch_one # deprecated from .contrib import filesystems as fs from .db import get_database __version__ = '0.5.0' __all__ = ['__version__', 'common', 'contrib', 'db', 'gui', 'errors', 'Project', 'get_project', 'init_project', 'get_database', 'fetch', 'fetch_one', 'export_one', 'export', 'export_to_mirror', 'export_pymongo', 'fs' ] ## Instruction: Remove common and errors from root namespace. ## Code After: from __future__ import absolute_import from . import contrib from . import db from . import gui from .contrib import Project from .contrib import get_project from .contrib import init_project from .contrib import fetch from .contrib import export_one from .contrib import export from .contrib import export_to_mirror from .contrib import export_pymongo from .contrib import fetch_one # deprecated from .contrib import filesystems as fs from .db import get_database __version__ = '0.5.0' __all__ = ['__version__', 'contrib', 'db', 'gui', 'Project', 'get_project', 'init_project', 'get_database', 'fetch', 'fetch_one', 'export_one', 'export', 'export_to_mirror', 'export_pymongo', 'fs' ]
// ... existing code ... from __future__ import absolute_import from . import contrib from . import db from . import gui from .contrib import Project from .contrib import get_project from .contrib import init_project // ... modified code ... __version__ = '0.5.0' __all__ = ['__version__', 'contrib', 'db', 'gui', 'Project', 'get_project', 'init_project', 'get_database', 'fetch', 'fetch_one', 'export_one', 'export', 'export_to_mirror', // ... rest of the code ...
db59332e3d522c68c3eeef77fe4393fe137e5059
inspectors/registration/util.py
inspectors/registration/util.py
import requests API_URL = 'https://opendata.miamidade.gov/resource/vvjq-pfmc.json' def is_valid_permit(id): # checks if the ID is a valid Miami-Dade Permit or Process Number API = API_URL + '?$where=permit_number=%27' + id + '%27%20or%20process_number=%27' + id + '%27' response = requests.get(API) json_result = response.json() return json_result is not None
import requests API_URL = 'https://opendata.miamidade.gov/resource/vvjq-pfmc.json' def is_valid_permit(id): # checks if the ID is a valid Miami-Dade Permit or Process Number API = API_URL + '?$where=permit_number=%27' + id + '%27%20or%20process_number=%27' + id + '%27' response = requests.get(API) json_result = response.json() return json_result
Fix logic bug for API result
Fix logic bug for API result
Python
bsd-3-clause
codeforamerica/mdc-inspectors,codeforamerica/mdc-inspectors,codeforamerica/mdc-inspectors
import requests API_URL = 'https://opendata.miamidade.gov/resource/vvjq-pfmc.json' def is_valid_permit(id): # checks if the ID is a valid Miami-Dade Permit or Process Number API = API_URL + '?$where=permit_number=%27' + id + '%27%20or%20process_number=%27' + id + '%27' response = requests.get(API) json_result = response.json() - return json_result is not None + return json_result
Fix logic bug for API result
## Code Before: import requests API_URL = 'https://opendata.miamidade.gov/resource/vvjq-pfmc.json' def is_valid_permit(id): # checks if the ID is a valid Miami-Dade Permit or Process Number API = API_URL + '?$where=permit_number=%27' + id + '%27%20or%20process_number=%27' + id + '%27' response = requests.get(API) json_result = response.json() return json_result is not None ## Instruction: Fix logic bug for API result ## Code After: import requests API_URL = 'https://opendata.miamidade.gov/resource/vvjq-pfmc.json' def is_valid_permit(id): # checks if the ID is a valid Miami-Dade Permit or Process Number API = API_URL + '?$where=permit_number=%27' + id + '%27%20or%20process_number=%27' + id + '%27' response = requests.get(API) json_result = response.json() return json_result
// ... existing code ... response = requests.get(API) json_result = response.json() return json_result // ... rest of the code ...
7261c5f96d52dba7f8a12716994e753910ceda64
tests/context_managers.py
tests/context_managers.py
from contextlib import contextmanager from io import BytesIO import sys import mock_modules import yv_suggest.shared as yvs @contextmanager def redirect_stdout(): """temporarily redirect stdout to new output stream""" original_stdout = sys.stdout out = BytesIO() try: sys.stdout = out yield out finally: sys.stdout = original_stdout @contextmanager def preserve_prefs(): """safely retrieve and restore preferences""" original_prefs = yvs.get_prefs() yield original_prefs.copy() yvs.update_prefs(original_prefs) @contextmanager def preserve_recent_refs(): """safely retrieve and restore list of recent references""" original_recent_refs = yvs.get_recent_refs() yield original_recent_refs[:] yvs.update_recent_refs(original_recent_refs) @contextmanager def mock_webbrowser(yvs): mock = mock_modules.WebbrowserMock() original_webbrowser = yvs.webbrowser yvs.webbrowser = mock yield mock yvs.webbrowser = original_webbrowser
from contextlib import contextmanager from io import BytesIO import sys import mock_modules import yv_suggest.shared as yvs @contextmanager def redirect_stdout(): """temporarily redirect stdout to new output stream""" original_stdout = sys.stdout out = BytesIO() try: sys.stdout = out yield out finally: sys.stdout = original_stdout @contextmanager def preserve_prefs(): """safely retrieve and restore preferences""" original_prefs = yvs.get_prefs() try: yield original_prefs.copy() finally: yvs.update_prefs(original_prefs) @contextmanager def preserve_recent_refs(): """safely retrieve and restore list of recent references""" original_recent_refs = yvs.get_recent_refs() try: yield original_recent_refs[:] finally: yvs.update_recent_refs(original_recent_refs) @contextmanager def mock_webbrowser(yvs): mock = mock_modules.WebbrowserMock() original_webbrowser = yvs.webbrowser yvs.webbrowser = mock try: yield mock finally: yvs.webbrowser = original_webbrowser
Add finally blocks to ensure tests always clean up
Add finally blocks to ensure tests always clean up
Python
mit
caleb531/youversion-suggest,caleb531/youversion-suggest
from contextlib import contextmanager from io import BytesIO import sys import mock_modules import yv_suggest.shared as yvs @contextmanager def redirect_stdout(): """temporarily redirect stdout to new output stream""" original_stdout = sys.stdout out = BytesIO() try: sys.stdout = out yield out finally: sys.stdout = original_stdout @contextmanager def preserve_prefs(): """safely retrieve and restore preferences""" original_prefs = yvs.get_prefs() + try: - yield original_prefs.copy() + yield original_prefs.copy() + finally: - yvs.update_prefs(original_prefs) + yvs.update_prefs(original_prefs) @contextmanager def preserve_recent_refs(): """safely retrieve and restore list of recent references""" original_recent_refs = yvs.get_recent_refs() + try: - yield original_recent_refs[:] + yield original_recent_refs[:] + finally: - yvs.update_recent_refs(original_recent_refs) + yvs.update_recent_refs(original_recent_refs) @contextmanager def mock_webbrowser(yvs): mock = mock_modules.WebbrowserMock() original_webbrowser = yvs.webbrowser yvs.webbrowser = mock + try: - yield mock + yield mock + finally: - yvs.webbrowser = original_webbrowser + yvs.webbrowser = original_webbrowser
Add finally blocks to ensure tests always clean up
## Code Before: from contextlib import contextmanager from io import BytesIO import sys import mock_modules import yv_suggest.shared as yvs @contextmanager def redirect_stdout(): """temporarily redirect stdout to new output stream""" original_stdout = sys.stdout out = BytesIO() try: sys.stdout = out yield out finally: sys.stdout = original_stdout @contextmanager def preserve_prefs(): """safely retrieve and restore preferences""" original_prefs = yvs.get_prefs() yield original_prefs.copy() yvs.update_prefs(original_prefs) @contextmanager def preserve_recent_refs(): """safely retrieve and restore list of recent references""" original_recent_refs = yvs.get_recent_refs() yield original_recent_refs[:] yvs.update_recent_refs(original_recent_refs) @contextmanager def mock_webbrowser(yvs): mock = mock_modules.WebbrowserMock() original_webbrowser = yvs.webbrowser yvs.webbrowser = mock yield mock yvs.webbrowser = original_webbrowser ## Instruction: Add finally blocks to ensure tests always clean up ## Code After: from contextlib import contextmanager from io import BytesIO import sys import mock_modules import yv_suggest.shared as yvs @contextmanager def redirect_stdout(): """temporarily redirect stdout to new output stream""" original_stdout = sys.stdout out = BytesIO() try: sys.stdout = out yield out finally: sys.stdout = original_stdout @contextmanager def preserve_prefs(): """safely retrieve and restore preferences""" original_prefs = yvs.get_prefs() try: yield original_prefs.copy() finally: yvs.update_prefs(original_prefs) @contextmanager def preserve_recent_refs(): """safely retrieve and restore list of recent references""" original_recent_refs = yvs.get_recent_refs() try: yield original_recent_refs[:] finally: yvs.update_recent_refs(original_recent_refs) @contextmanager def mock_webbrowser(yvs): mock = mock_modules.WebbrowserMock() original_webbrowser = yvs.webbrowser yvs.webbrowser = mock try: yield mock finally: yvs.webbrowser = original_webbrowser
... def preserve_prefs(): """safely retrieve and restore preferences""" original_prefs = yvs.get_prefs() try: yield original_prefs.copy() finally: yvs.update_prefs(original_prefs) @contextmanager ... def preserve_recent_refs(): """safely retrieve and restore list of recent references""" original_recent_refs = yvs.get_recent_refs() try: yield original_recent_refs[:] finally: yvs.update_recent_refs(original_recent_refs) @contextmanager ... mock = mock_modules.WebbrowserMock() original_webbrowser = yvs.webbrowser yvs.webbrowser = mock try: yield mock finally: yvs.webbrowser = original_webbrowser ...
93d2e33795e240407ab7e18aec67514124ff6713
app/__init__.py
app/__init__.py
from flask import Flask from flask_sqlalchemy import SQLAlchemy from instance.config import app_config app = Flask(__name__) def EnvironmentName(environ): app.config.from_object(app_config[environ]) EnvironmentName('TestingConfig') databases = SQLAlchemy(app) from app.v1 import bucketlist
from flask import Flask from flask_sqlalchemy import SQLAlchemy from instance.config import app_config app = Flask(__name__) def EnvironmentName(environ): app.config.from_object(app_config[environ]) EnvironmentName('DevelopmentEnviron') databases = SQLAlchemy(app) from app.v1 import bucketlist
Change postman testing environment to development
Change postman testing environment to development
Python
mit
paulupendo/CP-2-Bucketlist-Application
from flask import Flask from flask_sqlalchemy import SQLAlchemy from instance.config import app_config app = Flask(__name__) def EnvironmentName(environ): app.config.from_object(app_config[environ]) - EnvironmentName('TestingConfig') + EnvironmentName('DevelopmentEnviron') databases = SQLAlchemy(app) from app.v1 import bucketlist
Change postman testing environment to development
## Code Before: from flask import Flask from flask_sqlalchemy import SQLAlchemy from instance.config import app_config app = Flask(__name__) def EnvironmentName(environ): app.config.from_object(app_config[environ]) EnvironmentName('TestingConfig') databases = SQLAlchemy(app) from app.v1 import bucketlist ## Instruction: Change postman testing environment to development ## Code After: from flask import Flask from flask_sqlalchemy import SQLAlchemy from instance.config import app_config app = Flask(__name__) def EnvironmentName(environ): app.config.from_object(app_config[environ]) EnvironmentName('DevelopmentEnviron') databases = SQLAlchemy(app) from app.v1 import bucketlist
... app.config.from_object(app_config[environ]) EnvironmentName('DevelopmentEnviron') databases = SQLAlchemy(app) from app.v1 import bucketlist ...
3199b523a67f9c241950992a07fe38d2bbee07dc
seedlibrary/migrations/0003_extendedview_fix.py
seedlibrary/migrations/0003_extendedview_fix.py
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('seedlibrary', '0002_auto_20170219_2058'), ] operations = [ migrations.RenameField( model_name='extendedview', old_name='external_field', new_name='external_url', ), migrations.AddField( model_name='extendedview', name='grain_subcategory', field=models.CharField(blank=True, max_length=50), ), ]
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('seedlibrary', '0002_add_extendedview'), ] operations = [ migrations.RenameField( model_name='extendedview', old_name='external_field', new_name='external_url', ), migrations.AddField( model_name='extendedview', name='grain_subcategory', field=models.CharField(blank=True, max_length=50), ), ]
Update migration file for namechange
Update migration file for namechange
Python
mit
RockinRobin/seednetwork,RockinRobin/seednetwork,RockinRobin/seednetwork
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ - ('seedlibrary', '0002_auto_20170219_2058'), + ('seedlibrary', '0002_add_extendedview'), ] operations = [ migrations.RenameField( model_name='extendedview', old_name='external_field', new_name='external_url', ), migrations.AddField( model_name='extendedview', name='grain_subcategory', field=models.CharField(blank=True, max_length=50), ), ]
Update migration file for namechange
## Code Before: from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('seedlibrary', '0002_auto_20170219_2058'), ] operations = [ migrations.RenameField( model_name='extendedview', old_name='external_field', new_name='external_url', ), migrations.AddField( model_name='extendedview', name='grain_subcategory', field=models.CharField(blank=True, max_length=50), ), ] ## Instruction: Update migration file for namechange ## Code After: from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('seedlibrary', '0002_add_extendedview'), ] operations = [ migrations.RenameField( model_name='extendedview', old_name='external_field', new_name='external_url', ), migrations.AddField( model_name='extendedview', name='grain_subcategory', field=models.CharField(blank=True, max_length=50), ), ]
// ... existing code ... class Migration(migrations.Migration): dependencies = [ ('seedlibrary', '0002_add_extendedview'), ] operations = [ // ... rest of the code ...
30c875e1ba1dec3bcbd22850cd703198bcc5a1fb
peeringdb/migrations/0013_auto_20201207_2233.py
peeringdb/migrations/0013_auto_20201207_2233.py
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("peeringdb", "0012_peerrecord_visible"), ] def flush_peeringdb_tables(apps, schema_editor): apps.get_model("peeringdb", "Contact").objects.all().delete() apps.get_model("peeringdb", "Network").objects.all().delete() apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete() apps.get_model("peeringdb", "PeerRecord").objects.all().delete() operations = [migrations.RunPython(flush_peeringdb_tables)]
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("peeringdb", "0012_peerrecord_visible"), ] def flush_peeringdb_tables(apps, schema_editor): apps.get_model("peeringdb", "Contact").objects.all().delete() apps.get_model("peeringdb", "Network").objects.all().delete() apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete() apps.get_model("peeringdb", "PeerRecord").objects.all().delete() apps.get_model("peeringdb", "Synchronization").objects.all().delete() operations = [migrations.RunPython(flush_peeringdb_tables)]
Remove PeeringDB sync records on migrate
Remove PeeringDB sync records on migrate
Python
apache-2.0
respawner/peering-manager,respawner/peering-manager,respawner/peering-manager,respawner/peering-manager
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("peeringdb", "0012_peerrecord_visible"), ] def flush_peeringdb_tables(apps, schema_editor): apps.get_model("peeringdb", "Contact").objects.all().delete() apps.get_model("peeringdb", "Network").objects.all().delete() apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete() apps.get_model("peeringdb", "PeerRecord").objects.all().delete() + apps.get_model("peeringdb", "Synchronization").objects.all().delete() operations = [migrations.RunPython(flush_peeringdb_tables)]
Remove PeeringDB sync records on migrate
## Code Before: from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("peeringdb", "0012_peerrecord_visible"), ] def flush_peeringdb_tables(apps, schema_editor): apps.get_model("peeringdb", "Contact").objects.all().delete() apps.get_model("peeringdb", "Network").objects.all().delete() apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete() apps.get_model("peeringdb", "PeerRecord").objects.all().delete() operations = [migrations.RunPython(flush_peeringdb_tables)] ## Instruction: Remove PeeringDB sync records on migrate ## Code After: from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("peeringdb", "0012_peerrecord_visible"), ] def flush_peeringdb_tables(apps, schema_editor): apps.get_model("peeringdb", "Contact").objects.all().delete() apps.get_model("peeringdb", "Network").objects.all().delete() apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete() apps.get_model("peeringdb", "PeerRecord").objects.all().delete() apps.get_model("peeringdb", "Synchronization").objects.all().delete() operations = [migrations.RunPython(flush_peeringdb_tables)]
// ... existing code ... apps.get_model("peeringdb", "Network").objects.all().delete() apps.get_model("peeringdb", "NetworkIXLAN").objects.all().delete() apps.get_model("peeringdb", "PeerRecord").objects.all().delete() apps.get_model("peeringdb", "Synchronization").objects.all().delete() operations = [migrations.RunPython(flush_peeringdb_tables)] // ... rest of the code ...
3b3a8dc6aa0b38cfbb68105eb5ef31e8e73ff3a4
gcm_flask/application/models.py
gcm_flask/application/models.py
from google.appengine.ext import db class ExampleModel(db.Model): """Example Model""" example_name = db.StringProperty(required=True) example_description = db.TextProperty(required=True) added_by = db.UserProperty() timestamp = db.DateTimeProperty(auto_now_add=True) class RegIDModel(db.Model): """Regl IDs Model""" regID = db.StringProperty(required=True) class MessagesModel(db.Model): """Model for storing messages sent""" message = db.StringProperty(required=True) messagetype = db.StringProperty(required=True) added_by = db.UserProperty() sent_at = db.DateTimeProperty(auto_now_add=True)
from google.appengine.ext import db class ExampleModel(db.Model): """Example Model""" example_name = db.StringProperty(required=True) example_description = db.TextProperty(required=True) added_by = db.UserProperty() timestamp = db.DateTimeProperty(auto_now_add=True) class RegIDModel(db.Model): """Regl IDs Model""" regID = db.StringProperty(required=True) class MessagesModel(db.Model): """Model for storing messages sent""" message = db.StringProperty(required=True) messagetype = db.StringProperty(required=True) added_by = db.UserProperty(auto_current_user=True) sent_at = db.DateTimeProperty(auto_now_add=True)
Update user who sent the message
Update user who sent the message
Python
apache-2.0
BarcampBangalore/Barcamp-Bangalore-Android-App,BarcampBangalore/Barcamp-Bangalore-Android-App,rajeefmk/Barcamp-Bangalore-Android-App,rajeefmk/Barcamp-Bangalore-Android-App,BarcampBangalore/Barcamp-Bangalore-Android-App,rajeefmk/Barcamp-Bangalore-Android-App,BarcampBangalore/Barcamp-Bangalore-Android-App,rajeefmk/Barcamp-Bangalore-Android-App
from google.appengine.ext import db class ExampleModel(db.Model): """Example Model""" example_name = db.StringProperty(required=True) example_description = db.TextProperty(required=True) added_by = db.UserProperty() timestamp = db.DateTimeProperty(auto_now_add=True) class RegIDModel(db.Model): """Regl IDs Model""" regID = db.StringProperty(required=True) class MessagesModel(db.Model): """Model for storing messages sent""" message = db.StringProperty(required=True) messagetype = db.StringProperty(required=True) - added_by = db.UserProperty() + added_by = db.UserProperty(auto_current_user=True) sent_at = db.DateTimeProperty(auto_now_add=True)
Update user who sent the message
## Code Before: from google.appengine.ext import db class ExampleModel(db.Model): """Example Model""" example_name = db.StringProperty(required=True) example_description = db.TextProperty(required=True) added_by = db.UserProperty() timestamp = db.DateTimeProperty(auto_now_add=True) class RegIDModel(db.Model): """Regl IDs Model""" regID = db.StringProperty(required=True) class MessagesModel(db.Model): """Model for storing messages sent""" message = db.StringProperty(required=True) messagetype = db.StringProperty(required=True) added_by = db.UserProperty() sent_at = db.DateTimeProperty(auto_now_add=True) ## Instruction: Update user who sent the message ## Code After: from google.appengine.ext import db class ExampleModel(db.Model): """Example Model""" example_name = db.StringProperty(required=True) example_description = db.TextProperty(required=True) added_by = db.UserProperty() timestamp = db.DateTimeProperty(auto_now_add=True) class RegIDModel(db.Model): """Regl IDs Model""" regID = db.StringProperty(required=True) class MessagesModel(db.Model): """Model for storing messages sent""" message = db.StringProperty(required=True) messagetype = db.StringProperty(required=True) added_by = db.UserProperty(auto_current_user=True) sent_at = db.DateTimeProperty(auto_now_add=True)
# ... existing code ... """Model for storing messages sent""" message = db.StringProperty(required=True) messagetype = db.StringProperty(required=True) added_by = db.UserProperty(auto_current_user=True) sent_at = db.DateTimeProperty(auto_now_add=True) # ... rest of the code ...
c67a468d9b02e396c184305dc7b1bbb97982cf7b
python/testData/debug/test_multithread.py
python/testData/debug/test_multithread.py
try: import thread except : import _thread as thread import threading def bar(y): z = 100 + y print("Z=%d"%z) t = None def foo(x): global t y = x + 1 print("Y=%d"%y) t = threading.Thread(target=bar, args=(y,)) t.start() id = thread.start_new_thread(foo, (1,)) while True: pass
try: import thread except : import _thread as thread import threading from time import sleep def bar(y): z = 100 + y print("Z=%d"%z) t = None def foo(x): global t y = x + 1 print("Y=%d"%y) t = threading.Thread(target=bar, args=(y,)) t.start() id = thread.start_new_thread(foo, (1,)) while True: sleep(1)
Fix tests: add sleep to the main thread in order to stop in the child threads on the slow IronPython.
Fix tests: add sleep to the main thread in order to stop in the child threads on the slow IronPython.
Python
apache-2.0
FHannes/intellij-community,signed/intellij-community,xfournet/intellij-community,semonte/intellij-community,mglukhikh/intellij-community,ibinti/intellij-community,youdonghai/intellij-community,xfournet/intellij-community,hurricup/intellij-community,salguarnieri/intellij-community,da1z/intellij-community,hurricup/intellij-community,xfournet/intellij-community,apixandru/intellij-community,da1z/intellij-community,retomerz/intellij-community,asedunov/intellij-community,mglukhikh/intellij-community,xfournet/intellij-community,youdonghai/intellij-community,retomerz/intellij-community,ThiagoGarciaAlves/intellij-community,ThiagoGarciaAlves/intellij-community,allotria/intellij-community,FHannes/intellij-community,lucafavatella/intellij-community,semonte/intellij-community,fitermay/intellij-community,mglukhikh/intellij-community,vvv1559/intellij-community,allotria/intellij-community,signed/intellij-community,signed/intellij-community,michaelgallacher/intellij-community,lucafavatella/intellij-community,retomerz/intellij-community,apixandru/intellij-community,ibinti/intellij-community,ibinti/intellij-community,ibinti/intellij-community,semonte/intellij-community,idea4bsd/idea4bsd,idea4bsd/idea4bsd,retomerz/intellij-community,lucafavatella/intellij-community,salguarnieri/intellij-community,salguarnieri/intellij-community,fitermay/intellij-community,signed/intellij-community,hurricup/intellij-community,mglukhikh/intellij-community,FHannes/intellij-community,semonte/intellij-community,salguarnieri/intellij-community,da1z/intellij-community,vvv1559/intellij-community,idea4bsd/idea4bsd,lucafavatella/intellij-community,signed/intellij-community,da1z/intellij-community,youdonghai/intellij-community,mglukhikh/intellij-community,xfournet/intellij-community,asedunov/intellij-community,fitermay/intellij-community,ibinti/intellij-community,FHannes/intellij-community,da1z/intellij-community,suncycheng/intellij-community,xfournet/intellij-community,fitermay/intellij-community,salguarnieri/intellij-community,ibinti/intellij-community,hurricup/intellij-community,signed/intellij-community,asedunov/intellij-community,fitermay/intellij-community,suncycheng/intellij-community,retomerz/intellij-community,michaelgallacher/intellij-community,lucafavatella/intellij-community,asedunov/intellij-community,ibinti/intellij-community,mglukhikh/intellij-community,youdonghai/intellij-community,youdonghai/intellij-community,allotria/intellij-community,da1z/intellij-community,vvv1559/intellij-community,apixandru/intellij-community,michaelgallacher/intellij-community,apixandru/intellij-community,allotria/intellij-community,mglukhikh/intellij-community,da1z/intellij-community,retomerz/intellij-community,idea4bsd/idea4bsd,semonte/intellij-community,ibinti/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,lucafavatella/intellij-community,idea4bsd/idea4bsd,michaelgallacher/intellij-community,hurricup/intellij-community,idea4bsd/idea4bsd,lucafavatella/intellij-community,FHannes/intellij-community,apixandru/intellij-community,idea4bsd/idea4bsd,hurricup/intellij-community,semonte/intellij-community,da1z/intellij-community,hurricup/intellij-community,youdonghai/intellij-community,signed/intellij-community,semonte/intellij-community,ThiagoGarciaAlves/intellij-community,youdonghai/intellij-community,hurricup/intellij-community,lucafavatella/intellij-community,youdonghai/intellij-community,idea4bsd/idea4bsd,salguarnieri/intellij-community,idea4bsd/idea4bsd,semonte/intellij-community,fitermay/intellij-community,michaelgallacher/intellij-community,asedunov/intellij-community,salguarnieri/intellij-community,retomerz/intellij-community,fitermay/intellij-community,allotria/intellij-community,lucafavatella/intellij-community,signed/intellij-community,signed/intellij-community,vvv1559/intellij-community,suncycheng/intellij-community,ThiagoGarciaAlves/intellij-community,idea4bsd/idea4bsd,mglukhikh/intellij-community,xfournet/intellij-community,xfournet/intellij-community,xfournet/intellij-community,apixandru/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,lucafavatella/intellij-community,apixandru/intellij-community,ThiagoGarciaAlves/intellij-community,apixandru/intellij-community,salguarnieri/intellij-community,allotria/intellij-community,da1z/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,FHannes/intellij-community,michaelgallacher/intellij-community,retomerz/intellij-community,vvv1559/intellij-community,FHannes/intellij-community,ThiagoGarciaAlves/intellij-community,retomerz/intellij-community,allotria/intellij-community,ibinti/intellij-community,fitermay/intellij-community,hurricup/intellij-community,allotria/intellij-community,FHannes/intellij-community,fitermay/intellij-community,retomerz/intellij-community,vvv1559/intellij-community,asedunov/intellij-community,retomerz/intellij-community,suncycheng/intellij-community,idea4bsd/idea4bsd,da1z/intellij-community,FHannes/intellij-community,asedunov/intellij-community,fitermay/intellij-community,youdonghai/intellij-community,suncycheng/intellij-community,suncycheng/intellij-community,apixandru/intellij-community,asedunov/intellij-community,allotria/intellij-community,michaelgallacher/intellij-community,salguarnieri/intellij-community,youdonghai/intellij-community,suncycheng/intellij-community,michaelgallacher/intellij-community,suncycheng/intellij-community,mglukhikh/intellij-community,suncycheng/intellij-community,idea4bsd/idea4bsd,xfournet/intellij-community,salguarnieri/intellij-community,mglukhikh/intellij-community,youdonghai/intellij-community,apixandru/intellij-community,lucafavatella/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,fitermay/intellij-community,lucafavatella/intellij-community,michaelgallacher/intellij-community,ibinti/intellij-community,semonte/intellij-community,semonte/intellij-community,asedunov/intellij-community,lucafavatella/intellij-community,hurricup/intellij-community,retomerz/intellij-community,vvv1559/intellij-community,vvv1559/intellij-community,da1z/intellij-community,FHannes/intellij-community,vvv1559/intellij-community,retomerz/intellij-community,vvv1559/intellij-community,signed/intellij-community,michaelgallacher/intellij-community,ThiagoGarciaAlves/intellij-community,suncycheng/intellij-community,suncycheng/intellij-community,idea4bsd/idea4bsd,youdonghai/intellij-community,fitermay/intellij-community,signed/intellij-community,asedunov/intellij-community,xfournet/intellij-community,asedunov/intellij-community,ThiagoGarciaAlves/intellij-community,hurricup/intellij-community,michaelgallacher/intellij-community,semonte/intellij-community,apixandru/intellij-community,da1z/intellij-community,signed/intellij-community,apixandru/intellij-community,FHannes/intellij-community,youdonghai/intellij-community,ibinti/intellij-community,vvv1559/intellij-community,apixandru/intellij-community,semonte/intellij-community,ThiagoGarciaAlves/intellij-community,ibinti/intellij-community,semonte/intellij-community,ibinti/intellij-community,hurricup/intellij-community,salguarnieri/intellij-community,da1z/intellij-community,allotria/intellij-community,FHannes/intellij-community,salguarnieri/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,vvv1559/intellij-community,FHannes/intellij-community,michaelgallacher/intellij-community,apixandru/intellij-community,signed/intellij-community,allotria/intellij-community,hurricup/intellij-community,fitermay/intellij-community,vvv1559/intellij-community,mglukhikh/intellij-community,ThiagoGarciaAlves/intellij-community,asedunov/intellij-community
try: import thread except : import _thread as thread import threading + from time import sleep def bar(y): z = 100 + y print("Z=%d"%z) t = None def foo(x): global t y = x + 1 print("Y=%d"%y) t = threading.Thread(target=bar, args=(y,)) t.start() id = thread.start_new_thread(foo, (1,)) while True: - pass + sleep(1)
Fix tests: add sleep to the main thread in order to stop in the child threads on the slow IronPython.
## Code Before: try: import thread except : import _thread as thread import threading def bar(y): z = 100 + y print("Z=%d"%z) t = None def foo(x): global t y = x + 1 print("Y=%d"%y) t = threading.Thread(target=bar, args=(y,)) t.start() id = thread.start_new_thread(foo, (1,)) while True: pass ## Instruction: Fix tests: add sleep to the main thread in order to stop in the child threads on the slow IronPython. ## Code After: try: import thread except : import _thread as thread import threading from time import sleep def bar(y): z = 100 + y print("Z=%d"%z) t = None def foo(x): global t y = x + 1 print("Y=%d"%y) t = threading.Thread(target=bar, args=(y,)) t.start() id = thread.start_new_thread(foo, (1,)) while True: sleep(1)
... import _thread as thread import threading from time import sleep def bar(y): z = 100 + y ... id = thread.start_new_thread(foo, (1,)) while True: sleep(1) ...
35c643aef0cb6b194e62cd5f2fcf7df98bf46870
django_lightweight_queue/management/commands/queue_deduplicate.py
django_lightweight_queue/management/commands/queue_deduplicate.py
from django.core.management.base import BaseCommand, CommandError from ...utils import get_backend class Command(BaseCommand): help = "Command to deduplicate tasks in a redis-backed queue" def add_arguments(self, parser): parser.add_argument( 'queue', action='store', help="The queue to deduplicate", ) def handle(self, queue, **options): backend = get_backend(queue) if not hasattr(backend, 'deduplicate'): raise CommandError( "Configured backend '%s.%s' doesn't support deduplication" % ( type(backend).__module__, type(backend).__name__, ), ) original_size, new_size = backend.deduplicate(queue) self.stdout.write( "Deduplication reduced the queue from %d jobs to %d job(s)" % ( original_size, new_size, ), )
from django.core.management.base import BaseCommand, CommandError from ...utils import get_backend class Command(BaseCommand): help = "Command to deduplicate tasks in a redis-backed queue" def add_arguments(self, parser): parser.add_argument( 'queue', action='store', help="The queue to deduplicate", ) def handle(self, queue, **options): backend = get_backend(queue) if not hasattr(backend, 'deduplicate'): raise CommandError( "Configured backend '%s.%s' doesn't support deduplication" % ( type(backend).__module__, type(backend).__name__, ), ) original_size, new_size = backend.deduplicate(queue) if original_size == new_size: self.stdout.write( "No duplicate jobs detected (queue length remains %d)" % ( original_size, ), ) else: self.stdout.write( "Deduplication reduced the queue from %d jobs to %d job(s)" % ( original_size, new_size, ), )
Improve output when no deduplication happened
Improve output when no deduplication happened
Python
bsd-3-clause
thread/django-lightweight-queue,thread/django-lightweight-queue
from django.core.management.base import BaseCommand, CommandError from ...utils import get_backend class Command(BaseCommand): help = "Command to deduplicate tasks in a redis-backed queue" def add_arguments(self, parser): parser.add_argument( 'queue', action='store', help="The queue to deduplicate", ) def handle(self, queue, **options): backend = get_backend(queue) if not hasattr(backend, 'deduplicate'): raise CommandError( "Configured backend '%s.%s' doesn't support deduplication" % ( type(backend).__module__, type(backend).__name__, ), ) original_size, new_size = backend.deduplicate(queue) + if original_size == new_size: - self.stdout.write( + self.stdout.write( + "No duplicate jobs detected (queue length remains %d)" % ( + original_size, + ), + ) + else: + self.stdout.write( - "Deduplication reduced the queue from %d jobs to %d job(s)" % ( + "Deduplication reduced the queue from %d jobs to %d job(s)" % ( - original_size, + original_size, - new_size, + new_size, + ), - ), + ) - )
Improve output when no deduplication happened
## Code Before: from django.core.management.base import BaseCommand, CommandError from ...utils import get_backend class Command(BaseCommand): help = "Command to deduplicate tasks in a redis-backed queue" def add_arguments(self, parser): parser.add_argument( 'queue', action='store', help="The queue to deduplicate", ) def handle(self, queue, **options): backend = get_backend(queue) if not hasattr(backend, 'deduplicate'): raise CommandError( "Configured backend '%s.%s' doesn't support deduplication" % ( type(backend).__module__, type(backend).__name__, ), ) original_size, new_size = backend.deduplicate(queue) self.stdout.write( "Deduplication reduced the queue from %d jobs to %d job(s)" % ( original_size, new_size, ), ) ## Instruction: Improve output when no deduplication happened ## Code After: from django.core.management.base import BaseCommand, CommandError from ...utils import get_backend class Command(BaseCommand): help = "Command to deduplicate tasks in a redis-backed queue" def add_arguments(self, parser): parser.add_argument( 'queue', action='store', help="The queue to deduplicate", ) def handle(self, queue, **options): backend = get_backend(queue) if not hasattr(backend, 'deduplicate'): raise CommandError( "Configured backend '%s.%s' doesn't support deduplication" % ( type(backend).__module__, type(backend).__name__, ), ) original_size, new_size = backend.deduplicate(queue) if original_size == new_size: self.stdout.write( "No duplicate jobs detected (queue length remains %d)" % ( original_size, ), ) else: self.stdout.write( "Deduplication reduced the queue from %d jobs to %d job(s)" % ( original_size, new_size, ), )
// ... existing code ... original_size, new_size = backend.deduplicate(queue) if original_size == new_size: self.stdout.write( "No duplicate jobs detected (queue length remains %d)" % ( original_size, ), ) else: self.stdout.write( "Deduplication reduced the queue from %d jobs to %d job(s)" % ( original_size, new_size, ), ) // ... rest of the code ...
6761d8230d59031ad5183615f68a71e51f5f0309
elasticmock/__init__.py
elasticmock/__init__.py
from functools import wraps from mock import patch from elasticmock.fake_elasticsearch import FakeElasticsearch ELASTIC_INSTANCES = {} def _get_elasticmock(hosts=None): elastic_key = 'localhost:9200' if hosts is None else '{0}:{1}'.format(hosts[0].get('host'), hosts[0].get('port')) if elastic_key in ELASTIC_INSTANCES: connection = ELASTIC_INSTANCES.get(elastic_key) else: connection = FakeElasticsearch() ELASTIC_INSTANCES[elastic_key] = connection return connection def elasticmock(f): @wraps(f) def decorated(*args, **kwargs): ELASTIC_INSTANCES.clear() with patch('elasticsearch.Elasticsearch', _get_elasticmock): result = f(*args, **kwargs) return result return decorated
from functools import wraps from mock import patch from elasticmock.fake_elasticsearch import FakeElasticsearch ELASTIC_INSTANCES = {} def _get_elasticmock(hosts=None, *args, **kwargs): elastic_key = 'localhost:9200' if hosts is None else '{0}:{1}'.format(hosts[0].get('host'), hosts[0].get('port')) if elastic_key in ELASTIC_INSTANCES: connection = ELASTIC_INSTANCES.get(elastic_key) else: connection = FakeElasticsearch() ELASTIC_INSTANCES[elastic_key] = connection return connection def elasticmock(f): @wraps(f) def decorated(*args, **kwargs): ELASTIC_INSTANCES.clear() with patch('elasticsearch.Elasticsearch', _get_elasticmock): result = f(*args, **kwargs) return result return decorated
Allow ignored params to Elasticsearch
Allow ignored params to Elasticsearch
Python
mit
vrcmarcos/elasticmock
from functools import wraps from mock import patch from elasticmock.fake_elasticsearch import FakeElasticsearch ELASTIC_INSTANCES = {} - def _get_elasticmock(hosts=None): + def _get_elasticmock(hosts=None, *args, **kwargs): elastic_key = 'localhost:9200' if hosts is None else '{0}:{1}'.format(hosts[0].get('host'), hosts[0].get('port')) if elastic_key in ELASTIC_INSTANCES: connection = ELASTIC_INSTANCES.get(elastic_key) else: connection = FakeElasticsearch() ELASTIC_INSTANCES[elastic_key] = connection return connection def elasticmock(f): @wraps(f) def decorated(*args, **kwargs): ELASTIC_INSTANCES.clear() with patch('elasticsearch.Elasticsearch', _get_elasticmock): result = f(*args, **kwargs) return result return decorated
Allow ignored params to Elasticsearch
## Code Before: from functools import wraps from mock import patch from elasticmock.fake_elasticsearch import FakeElasticsearch ELASTIC_INSTANCES = {} def _get_elasticmock(hosts=None): elastic_key = 'localhost:9200' if hosts is None else '{0}:{1}'.format(hosts[0].get('host'), hosts[0].get('port')) if elastic_key in ELASTIC_INSTANCES: connection = ELASTIC_INSTANCES.get(elastic_key) else: connection = FakeElasticsearch() ELASTIC_INSTANCES[elastic_key] = connection return connection def elasticmock(f): @wraps(f) def decorated(*args, **kwargs): ELASTIC_INSTANCES.clear() with patch('elasticsearch.Elasticsearch', _get_elasticmock): result = f(*args, **kwargs) return result return decorated ## Instruction: Allow ignored params to Elasticsearch ## Code After: from functools import wraps from mock import patch from elasticmock.fake_elasticsearch import FakeElasticsearch ELASTIC_INSTANCES = {} def _get_elasticmock(hosts=None, *args, **kwargs): elastic_key = 'localhost:9200' if hosts is None else '{0}:{1}'.format(hosts[0].get('host'), hosts[0].get('port')) if elastic_key in ELASTIC_INSTANCES: connection = ELASTIC_INSTANCES.get(elastic_key) else: connection = FakeElasticsearch() ELASTIC_INSTANCES[elastic_key] = connection return connection def elasticmock(f): @wraps(f) def decorated(*args, **kwargs): ELASTIC_INSTANCES.clear() with patch('elasticsearch.Elasticsearch', _get_elasticmock): result = f(*args, **kwargs) return result return decorated
// ... existing code ... ELASTIC_INSTANCES = {} def _get_elasticmock(hosts=None, *args, **kwargs): elastic_key = 'localhost:9200' if hosts is None else '{0}:{1}'.format(hosts[0].get('host'), hosts[0].get('port')) if elastic_key in ELASTIC_INSTANCES: connection = ELASTIC_INSTANCES.get(elastic_key) // ... rest of the code ...
d3c7ae5389f2fd90ae35d87f87e4f7dd01572f4a
numpy/f2py/__init__.py
numpy/f2py/__init__.py
__all__ = ['run_main','compile','f2py_testing'] import os import sys import commands from info import __doc__ import f2py2e run_main = f2py2e.run_main main = f2py2e.main import f2py_testing def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: fname = os.path.join(tempfile.mktemp()+'.f') else: fname = source_fn f = open(fname,'w') f.write(source) f.close() args = ' -c -m %s %s %s'%(modulename,fname,extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) s,o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass return s
__all__ = ['run_main','compile','f2py_testing'] import os import sys import commands import f2py2e import f2py_testing import diagnose from info import __doc__ run_main = f2py2e.run_main main = f2py2e.main def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: fname = os.path.join(tempfile.mktemp()+'.f') else: fname = source_fn f = open(fname,'w') f.write(source) f.close() args = ' -c -m %s %s %s'%(modulename,fname,extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) s,o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass return s
Add diagnose to f2py package. This makes the tests a bit easier to fix.
ENH: Add diagnose to f2py package. This makes the tests a bit easier to fix.
Python
bsd-3-clause
ChristopherHogan/numpy,ChristopherHogan/numpy,seberg/numpy,bmorris3/numpy,njase/numpy,BabeNovelty/numpy,mhvk/numpy,tdsmith/numpy,cowlicks/numpy,MaPePeR/numpy,rmcgibbo/numpy,utke1/numpy,simongibbons/numpy,GrimDerp/numpy,shoyer/numpy,numpy/numpy-refactor,has2k1/numpy,ESSS/numpy,githubmlai/numpy,rgommers/numpy,Srisai85/numpy,pizzathief/numpy,joferkington/numpy,rhythmsosad/numpy,embray/numpy,dch312/numpy,embray/numpy,jonathanunderwood/numpy,trankmichael/numpy,has2k1/numpy,SunghanKim/numpy,bmorris3/numpy,madphysicist/numpy,jschueller/numpy,musically-ut/numpy,felipebetancur/numpy,WillieMaddox/numpy,numpy/numpy,SunghanKim/numpy,sigma-random/numpy,astrofrog/numpy,WarrenWeckesser/numpy,NextThought/pypy-numpy,ajdawson/numpy,MaPePeR/numpy,rudimeier/numpy,pizzathief/numpy,grlee77/numpy,Dapid/numpy,stefanv/numpy,ajdawson/numpy,CMartelLML/numpy,GrimDerp/numpy,jonathanunderwood/numpy,ddasilva/numpy,shoyer/numpy,jorisvandenbossche/numpy,rherault-insa/numpy,MichaelAquilina/numpy,Eric89GXL/numpy,SunghanKim/numpy,mwiebe/numpy,rajathkumarmp/numpy,jankoslavic/numpy,larsmans/numpy,tdsmith/numpy,KaelChen/numpy,brandon-rhodes/numpy,SiccarPoint/numpy,rajathkumarmp/numpy,numpy/numpy,dato-code/numpy,seberg/numpy,pdebuyl/numpy,KaelChen/numpy,Anwesh43/numpy,leifdenby/numpy,joferkington/numpy,matthew-brett/numpy,BabeNovelty/numpy,gmcastil/numpy,brandon-rhodes/numpy,MichaelAquilina/numpy,anntzer/numpy,Eric89GXL/numpy,mortada/numpy,groutr/numpy,ahaldane/numpy,simongibbons/numpy,anntzer/numpy,astrofrog/numpy,stuarteberg/numpy,ChanderG/numpy,ContinuumIO/numpy,jakirkham/numpy,grlee77/numpy,pizzathief/numpy,kirillzhuravlev/numpy,NextThought/pypy-numpy,dimasad/numpy,astrofrog/numpy,GaZ3ll3/numpy,andsor/numpy,ViralLeadership/numpy,MichaelAquilina/numpy,WillieMaddox/numpy,mingwpy/numpy,dimasad/numpy,anntzer/numpy,rhythmsosad/numpy,simongibbons/numpy,numpy/numpy-refactor,sigma-random/numpy,tacaswell/numpy,bringingheavendown/numpy,ahaldane/numpy,b-carter/numpy,jakirkham/numpy,BabeNovelty/numpy,mathdd/numpy,mwiebe/numpy,drasmuss/numpy,WarrenWeckesser/numpy,ChristopherHogan/numpy,b-carter/numpy,andsor/numpy,endolith/numpy,bertrand-l/numpy,mwiebe/numpy,endolith/numpy,ogrisel/numpy,abalkin/numpy,rmcgibbo/numpy,mhvk/numpy,ChanderG/numpy,GrimDerp/numpy,seberg/numpy,jorisvandenbossche/numpy,mingwpy/numpy,ogrisel/numpy,ogrisel/numpy,mortada/numpy,ewmoore/numpy,mindw/numpy,madphysicist/numpy,pelson/numpy,mindw/numpy,dato-code/numpy,matthew-brett/numpy,mhvk/numpy,sinhrks/numpy,stuarteberg/numpy,pelson/numpy,njase/numpy,SiccarPoint/numpy,skymanaditya1/numpy,ViralLeadership/numpy,ssanderson/numpy,Yusa95/numpy,trankmichael/numpy,Anwesh43/numpy,WillieMaddox/numpy,jonathanunderwood/numpy,has2k1/numpy,GaZ3ll3/numpy,ajdawson/numpy,Yusa95/numpy,ChristopherHogan/numpy,dwillmer/numpy,Anwesh43/numpy,rudimeier/numpy,anntzer/numpy,ahaldane/numpy,shoyer/numpy,MSeifert04/numpy,charris/numpy,ChanderG/numpy,Dapid/numpy,grlee77/numpy,pbrod/numpy,pbrod/numpy,nguyentu1602/numpy,endolith/numpy,ekalosak/numpy,SunghanKim/numpy,numpy/numpy-refactor,MSeifert04/numpy,ewmoore/numpy,behzadnouri/numpy,gfyoung/numpy,ContinuumIO/numpy,mattip/numpy,NextThought/pypy-numpy,rgommers/numpy,solarjoe/numpy,astrofrog/numpy,gmcastil/numpy,naritta/numpy,chiffa/numpy,sigma-random/numpy,tdsmith/numpy,ewmoore/numpy,behzadnouri/numpy,ssanderson/numpy,nguyentu1602/numpy,felipebetancur/numpy,grlee77/numpy,rgommers/numpy,yiakwy/numpy,ekalosak/numpy,MaPePeR/numpy,b-carter/numpy,pizzathief/numpy,jankoslavic/numpy,dwf/numpy,nguyentu1602/numpy,SiccarPoint/numpy,dch312/numpy,trankmichael/numpy,BabeNovelty/numpy,tacaswell/numpy,skwbc/numpy,kirillzhuravlev/numpy,behzadnouri/numpy,andsor/numpy,dwillmer/numpy,grlee77/numpy,moreati/numpy,dwf/numpy,cowlicks/numpy,chiffa/numpy,utke1/numpy,pyparallel/numpy,sonnyhu/numpy,chiffa/numpy,mhvk/numpy,andsor/numpy,AustereCuriosity/numpy,drasmuss/numpy,rudimeier/numpy,ESSS/numpy,cjermain/numpy,Linkid/numpy,tacaswell/numpy,mathdd/numpy,argriffing/numpy,ddasilva/numpy,AustereCuriosity/numpy,sigma-random/numpy,pyparallel/numpy,ogrisel/numpy,dwillmer/numpy,rhythmsosad/numpy,abalkin/numpy,Srisai85/numpy,pbrod/numpy,ddasilva/numpy,bringingheavendown/numpy,simongibbons/numpy,pelson/numpy,embray/numpy,stuarteberg/numpy,mindw/numpy,bertrand-l/numpy,GrimDerp/numpy,tynn/numpy,argriffing/numpy,embray/numpy,nbeaver/numpy,musically-ut/numpy,MSeifert04/numpy,shoyer/numpy,ESSS/numpy,kiwifb/numpy,skymanaditya1/numpy,tynn/numpy,rherault-insa/numpy,sonnyhu/numpy,cjermain/numpy,yiakwy/numpy,naritta/numpy,maniteja123/numpy,dato-code/numpy,pdebuyl/numpy,WarrenWeckesser/numpy,KaelChen/numpy,WarrenWeckesser/numpy,gmcastil/numpy,astrofrog/numpy,nbeaver/numpy,KaelChen/numpy,hainm/numpy,larsmans/numpy,numpy/numpy-refactor,SiccarPoint/numpy,bertrand-l/numpy,hainm/numpy,jakirkham/numpy,Yusa95/numpy,jorisvandenbossche/numpy,naritta/numpy,pelson/numpy,numpy/numpy-refactor,CMartelLML/numpy,matthew-brett/numpy,mortada/numpy,cjermain/numpy,seberg/numpy,hainm/numpy,ChanderG/numpy,drasmuss/numpy,jankoslavic/numpy,kiwifb/numpy,dwf/numpy,BMJHayward/numpy,ahaldane/numpy,rmcgibbo/numpy,tynn/numpy,chatcannon/numpy,mortada/numpy,dch312/numpy,joferkington/numpy,groutr/numpy,empeeu/numpy,ekalosak/numpy,groutr/numpy,BMJHayward/numpy,WarrenWeckesser/numpy,matthew-brett/numpy,sonnyhu/numpy,bmorris3/numpy,stefanv/numpy,embray/numpy,abalkin/numpy,brandon-rhodes/numpy,stefanv/numpy,madphysicist/numpy,dimasad/numpy,bmorris3/numpy,mattip/numpy,GaZ3ll3/numpy,charris/numpy,empeeu/numpy,mathdd/numpy,jschueller/numpy,MSeifert04/numpy,pdebuyl/numpy,jakirkham/numpy,mattip/numpy,yiakwy/numpy,felipebetancur/numpy,bringingheavendown/numpy,ContinuumIO/numpy,BMJHayward/numpy,skwbc/numpy,numpy/numpy,Linkid/numpy,maniteja123/numpy,empeeu/numpy,githubmlai/numpy,madphysicist/numpy,empeeu/numpy,MaPePeR/numpy,utke1/numpy,jorisvandenbossche/numpy,ekalosak/numpy,naritta/numpy,leifdenby/numpy,pbrod/numpy,Eric89GXL/numpy,kirillzhuravlev/numpy,jorisvandenbossche/numpy,Yusa95/numpy,musically-ut/numpy,kirillzhuravlev/numpy,rhythmsosad/numpy,matthew-brett/numpy,mingwpy/numpy,immerrr/numpy,mhvk/numpy,pyparallel/numpy,cjermain/numpy,solarjoe/numpy,ssanderson/numpy,githubmlai/numpy,mingwpy/numpy,skymanaditya1/numpy,stuarteberg/numpy,skymanaditya1/numpy,has2k1/numpy,BMJHayward/numpy,MichaelAquilina/numpy,AustereCuriosity/numpy,gfyoung/numpy,sinhrks/numpy,rgommers/numpy,stefanv/numpy,charris/numpy,immerrr/numpy,sonnyhu/numpy,moreati/numpy,ewmoore/numpy,dwillmer/numpy,dch312/numpy,joferkington/numpy,sinhrks/numpy,dimasad/numpy,jschueller/numpy,pizzathief/numpy,nbeaver/numpy,leifdenby/numpy,endolith/numpy,CMartelLML/numpy,simongibbons/numpy,Dapid/numpy,dwf/numpy,felipebetancur/numpy,githubmlai/numpy,chatcannon/numpy,rajathkumarmp/numpy,mathdd/numpy,Linkid/numpy,GaZ3ll3/numpy,njase/numpy,solarjoe/numpy,skwbc/numpy,larsmans/numpy,pdebuyl/numpy,jakirkham/numpy,NextThought/pypy-numpy,cowlicks/numpy,rajathkumarmp/numpy,hainm/numpy,chatcannon/numpy,trankmichael/numpy,pbrod/numpy,ewmoore/numpy,numpy/numpy,jschueller/numpy,Srisai85/numpy,cowlicks/numpy,rmcgibbo/numpy,MSeifert04/numpy,dato-code/numpy,madphysicist/numpy,musically-ut/numpy,maniteja123/numpy,Srisai85/numpy,immerrr/numpy,Anwesh43/numpy,ajdawson/numpy,Eric89GXL/numpy,ahaldane/numpy,charris/numpy,mindw/numpy,nguyentu1602/numpy,kiwifb/numpy,ogrisel/numpy,sinhrks/numpy,CMartelLML/numpy,Linkid/numpy,brandon-rhodes/numpy,rudimeier/numpy,larsmans/numpy,immerrr/numpy,moreati/numpy,yiakwy/numpy,mattip/numpy,pelson/numpy,tdsmith/numpy,argriffing/numpy,shoyer/numpy,ViralLeadership/numpy,rherault-insa/numpy,jankoslavic/numpy,gfyoung/numpy,stefanv/numpy,dwf/numpy
__all__ = ['run_main','compile','f2py_testing'] import os import sys import commands + import f2py2e + import f2py_testing + import diagnose + from info import __doc__ - import f2py2e run_main = f2py2e.run_main main = f2py2e.main - import f2py_testing def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: fname = os.path.join(tempfile.mktemp()+'.f') else: fname = source_fn f = open(fname,'w') f.write(source) f.close() args = ' -c -m %s %s %s'%(modulename,fname,extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) s,o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass return s
Add diagnose to f2py package. This makes the tests a bit easier to fix.
## Code Before: __all__ = ['run_main','compile','f2py_testing'] import os import sys import commands from info import __doc__ import f2py2e run_main = f2py2e.run_main main = f2py2e.main import f2py_testing def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: fname = os.path.join(tempfile.mktemp()+'.f') else: fname = source_fn f = open(fname,'w') f.write(source) f.close() args = ' -c -m %s %s %s'%(modulename,fname,extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) s,o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass return s ## Instruction: Add diagnose to f2py package. This makes the tests a bit easier to fix. ## Code After: __all__ = ['run_main','compile','f2py_testing'] import os import sys import commands import f2py2e import f2py_testing import diagnose from info import __doc__ run_main = f2py2e.run_main main = f2py2e.main def compile(source, modulename = 'untitled', extra_args = '', verbose = 1, source_fn = None ): ''' Build extension module from processing source with f2py. Read the source of this function for more information. ''' from numpy.distutils.exec_command import exec_command import tempfile if source_fn is None: fname = os.path.join(tempfile.mktemp()+'.f') else: fname = source_fn f = open(fname,'w') f.write(source) f.close() args = ' -c -m %s %s %s'%(modulename,fname,extra_args) c = '%s -c "import numpy.f2py as f2py2e;f2py2e.main()" %s' %(sys.executable,args) s,o = exec_command(c) if source_fn is None: try: os.remove(fname) except OSError: pass return s
# ... existing code ... import sys import commands import f2py2e import f2py_testing import diagnose from info import __doc__ run_main = f2py2e.run_main main = f2py2e.main def compile(source, modulename = 'untitled', # ... rest of the code ...
7bb93bfdf2b75ba8df0983d058854a1d00d75c16
geotrek/feedback/tests/test_commands.py
geotrek/feedback/tests/test_commands.py
from io import StringIO from django.core.management import call_command from django.test import TestCase from django.utils import timezone from geotrek.feedback.models import Report from geotrek.feedback.factories import ReportFactory class TestRemoveEmailsOlders(TestCase): """Test command erase_emails, if older emails are removed""" def setUp(self): # Create two reports self.old_report = ReportFactory() self.recent_report = ReportFactory() # Modify date_insert for old_report one_year_one_day = timezone.timedelta(days=370) self.old_report.date_insert = timezone.now() - one_year_one_day self.old_report.save() def test_erase_old_emails(self): output = StringIO() call_command('erase_emails', stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "") self.assertEqual(old_report.__str__(), "Anonymized report")
from io import StringIO from django.core.management import call_command from django.test import TestCase from django.utils import timezone from geotrek.feedback.models import Report from geotrek.feedback.factories import ReportFactory class TestRemoveEmailsOlders(TestCase): """Test command erase_emails, if older emails are removed""" def setUp(self): # Create two reports self.old_report = ReportFactory(email="[email protected]") self.recent_report = ReportFactory(email="[email protected]") # Modify date_insert for old_report one_year_one_day = timezone.timedelta(days=370) self.old_report.date_insert = timezone.now() - one_year_one_day self.old_report.save() def test_erase_old_emails(self): output = StringIO() call_command('erase_emails', stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "") self.assertEqual(old_report.__str__(), "Anonymous report") def test_dry_run_command(self): """Test if dry_run mode keeps emails""" output = StringIO() call_command('erase_emails', dry_run=True, stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "[email protected]")
Test dry run mode in erase_mail
Test dry run mode in erase_mail
Python
bsd-2-clause
makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin
from io import StringIO from django.core.management import call_command from django.test import TestCase from django.utils import timezone from geotrek.feedback.models import Report from geotrek.feedback.factories import ReportFactory class TestRemoveEmailsOlders(TestCase): """Test command erase_emails, if older emails are removed""" def setUp(self): # Create two reports - self.old_report = ReportFactory() + self.old_report = ReportFactory(email="[email protected]") - self.recent_report = ReportFactory() + self.recent_report = ReportFactory(email="[email protected]") # Modify date_insert for old_report one_year_one_day = timezone.timedelta(days=370) self.old_report.date_insert = timezone.now() - one_year_one_day self.old_report.save() def test_erase_old_emails(self): output = StringIO() call_command('erase_emails', stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "") - self.assertEqual(old_report.__str__(), "Anonymized report") + self.assertEqual(old_report.__str__(), "Anonymous report") + def test_dry_run_command(self): + """Test if dry_run mode keeps emails""" + output = StringIO() + call_command('erase_emails', dry_run=True, stdout=output) + old_report = Report.objects.get(id=self.old_report.id) + self.assertEqual(old_report.email, "[email protected]") +
Test dry run mode in erase_mail
## Code Before: from io import StringIO from django.core.management import call_command from django.test import TestCase from django.utils import timezone from geotrek.feedback.models import Report from geotrek.feedback.factories import ReportFactory class TestRemoveEmailsOlders(TestCase): """Test command erase_emails, if older emails are removed""" def setUp(self): # Create two reports self.old_report = ReportFactory() self.recent_report = ReportFactory() # Modify date_insert for old_report one_year_one_day = timezone.timedelta(days=370) self.old_report.date_insert = timezone.now() - one_year_one_day self.old_report.save() def test_erase_old_emails(self): output = StringIO() call_command('erase_emails', stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "") self.assertEqual(old_report.__str__(), "Anonymized report") ## Instruction: Test dry run mode in erase_mail ## Code After: from io import StringIO from django.core.management import call_command from django.test import TestCase from django.utils import timezone from geotrek.feedback.models import Report from geotrek.feedback.factories import ReportFactory class TestRemoveEmailsOlders(TestCase): """Test command erase_emails, if older emails are removed""" def setUp(self): # Create two reports self.old_report = ReportFactory(email="[email protected]") self.recent_report = ReportFactory(email="[email protected]") # Modify date_insert for old_report one_year_one_day = timezone.timedelta(days=370) self.old_report.date_insert = timezone.now() - one_year_one_day self.old_report.save() def test_erase_old_emails(self): output = StringIO() call_command('erase_emails', stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "") self.assertEqual(old_report.__str__(), "Anonymous report") def test_dry_run_command(self): """Test if dry_run mode keeps emails""" output = StringIO() call_command('erase_emails', dry_run=True, stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "[email protected]")
// ... existing code ... def setUp(self): # Create two reports self.old_report = ReportFactory(email="[email protected]") self.recent_report = ReportFactory(email="[email protected]") # Modify date_insert for old_report one_year_one_day = timezone.timedelta(days=370) // ... modified code ... call_command('erase_emails', stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "") self.assertEqual(old_report.__str__(), "Anonymous report") def test_dry_run_command(self): """Test if dry_run mode keeps emails""" output = StringIO() call_command('erase_emails', dry_run=True, stdout=output) old_report = Report.objects.get(id=self.old_report.id) self.assertEqual(old_report.email, "[email protected]") // ... rest of the code ...
517c0f2b1f8e6616cc63ec0c3990dcff2922f0e6
pinax/invitations/admin.py
pinax/invitations/admin.py
from django.contrib import admin from django.contrib.auth import get_user_model from .models import InvitationStat, JoinInvitation User = get_user_model() class InvitationStatAdmin(admin.ModelAdmin): raw_id_fields = ["user"] readonly_fields = ["invites_sent", "invites_accepted"] list_display = [ "user", "invites_sent", "invites_accepted", "invites_allocated", "invites_remaining", "can_send" ] list_filter = ["invites_sent", "invites_accepted"] admin.site.register( JoinInvitation, list_display=["from_user", "to_user", "sent", "status", "to_user_email"], list_filter=["sent", "status"], search_fields=["from_user__{}".format(User.USERNAME_FIELD)] ) admin.site.register(InvitationStat, InvitationStatAdmin)
from django.contrib import admin from django.contrib.auth import get_user_model from .models import InvitationStat, JoinInvitation User = get_user_model() class InvitationStatAdmin(admin.ModelAdmin): raw_id_fields = ["user"] readonly_fields = ["invites_sent", "invites_accepted"] list_display = [ "user", "invites_sent", "invites_accepted", "invites_allocated", "invites_remaining", "can_send" ] list_filter = ["invites_sent", "invites_accepted"] admin.site.register( JoinInvitation, list_display=["from_user", "to_user", "sent", "status", "to_user_email"], list_filter=["sent", "status"], search_fields=[f"from_user__{User.USERNAME_FIELD}"] ) admin.site.register(InvitationStat, InvitationStatAdmin)
Use f-strings in place of `str.format()`
Use f-strings in place of `str.format()`
Python
unknown
pinax/pinax-invitations,eldarion/kaleo
from django.contrib import admin from django.contrib.auth import get_user_model from .models import InvitationStat, JoinInvitation User = get_user_model() class InvitationStatAdmin(admin.ModelAdmin): raw_id_fields = ["user"] readonly_fields = ["invites_sent", "invites_accepted"] list_display = [ "user", "invites_sent", "invites_accepted", "invites_allocated", "invites_remaining", "can_send" ] list_filter = ["invites_sent", "invites_accepted"] admin.site.register( JoinInvitation, list_display=["from_user", "to_user", "sent", "status", "to_user_email"], list_filter=["sent", "status"], - search_fields=["from_user__{}".format(User.USERNAME_FIELD)] + search_fields=[f"from_user__{User.USERNAME_FIELD}"] ) admin.site.register(InvitationStat, InvitationStatAdmin)
Use f-strings in place of `str.format()`
## Code Before: from django.contrib import admin from django.contrib.auth import get_user_model from .models import InvitationStat, JoinInvitation User = get_user_model() class InvitationStatAdmin(admin.ModelAdmin): raw_id_fields = ["user"] readonly_fields = ["invites_sent", "invites_accepted"] list_display = [ "user", "invites_sent", "invites_accepted", "invites_allocated", "invites_remaining", "can_send" ] list_filter = ["invites_sent", "invites_accepted"] admin.site.register( JoinInvitation, list_display=["from_user", "to_user", "sent", "status", "to_user_email"], list_filter=["sent", "status"], search_fields=["from_user__{}".format(User.USERNAME_FIELD)] ) admin.site.register(InvitationStat, InvitationStatAdmin) ## Instruction: Use f-strings in place of `str.format()` ## Code After: from django.contrib import admin from django.contrib.auth import get_user_model from .models import InvitationStat, JoinInvitation User = get_user_model() class InvitationStatAdmin(admin.ModelAdmin): raw_id_fields = ["user"] readonly_fields = ["invites_sent", "invites_accepted"] list_display = [ "user", "invites_sent", "invites_accepted", "invites_allocated", "invites_remaining", "can_send" ] list_filter = ["invites_sent", "invites_accepted"] admin.site.register( JoinInvitation, list_display=["from_user", "to_user", "sent", "status", "to_user_email"], list_filter=["sent", "status"], search_fields=[f"from_user__{User.USERNAME_FIELD}"] ) admin.site.register(InvitationStat, InvitationStatAdmin)
... JoinInvitation, list_display=["from_user", "to_user", "sent", "status", "to_user_email"], list_filter=["sent", "status"], search_fields=[f"from_user__{User.USERNAME_FIELD}"] ) admin.site.register(InvitationStat, InvitationStatAdmin) ...
440593615adca029b11575e604d251c7b68942b4
api/licenses/serializers.py
api/licenses/serializers.py
from rest_framework import serializers as ser from api.base.serializers import ( JSONAPISerializer, LinksField, IDField, TypeField ) from api.base.utils import absolute_reverse class LicenseSerializer(JSONAPISerializer): filterable_fields = frozenset([ 'name', 'id', ]) non_anonymized_fields = ['type'] id = IDField(source='_id', read_only=True) type = TypeField() name = ser.CharField(required=True, help_text='License name') text = ser.CharField(required=True, help_text='Full text of the license') required_fields = ser.ListField(source='properties', read_only=True, help_text='Fields required for this license (provided to help front-end validators)') links = LinksField({'self': 'get_absolute_url'}) class Meta: type_ = 'licenses' def get_absolute_url(self, obj): return absolute_reverse('licenses:license-detail', kwargs={ 'license_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version'] })
from rest_framework import serializers as ser from api.base.serializers import ( JSONAPISerializer, LinksField, IDField, TypeField ) from api.base.utils import absolute_reverse class LicenseSerializer(JSONAPISerializer): filterable_fields = frozenset([ 'name', 'id', ]) non_anonymized_fields = ['type'] id = IDField(source='_id', read_only=True) type = TypeField() name = ser.CharField(required=True, help_text='License name') text = ser.CharField(required=True, help_text='Full text of the license') url = ser.URLField(required=False, help_text='URL for the license') required_fields = ser.ListField(source='properties', read_only=True, help_text='Fields required for this license (provided to help front-end validators)') links = LinksField({'self': 'get_absolute_url'}) class Meta: type_ = 'licenses' def get_absolute_url(self, obj): return absolute_reverse('licenses:license-detail', kwargs={ 'license_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version'] })
Add url to the license api serializer
Add url to the license api serializer
Python
apache-2.0
felliott/osf.io,baylee-d/osf.io,sloria/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,adlius/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,felliott/osf.io,mfraezz/osf.io,cslzchen/osf.io,icereval/osf.io,felliott/osf.io,adlius/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,caseyrollins/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,mfraezz/osf.io,adlius/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,caseyrollins/osf.io,saradbowman/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,adlius/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,aaxelb/osf.io,aaxelb/osf.io,mfraezz/osf.io,erinspace/osf.io,pattisdr/osf.io,mattclark/osf.io,erinspace/osf.io,icereval/osf.io,cslzchen/osf.io,caseyrollins/osf.io,aaxelb/osf.io,cslzchen/osf.io,mfraezz/osf.io,felliott/osf.io,brianjgeiger/osf.io,sloria/osf.io,icereval/osf.io,mattclark/osf.io,sloria/osf.io,erinspace/osf.io
from rest_framework import serializers as ser from api.base.serializers import ( JSONAPISerializer, LinksField, IDField, TypeField ) from api.base.utils import absolute_reverse class LicenseSerializer(JSONAPISerializer): filterable_fields = frozenset([ 'name', 'id', ]) non_anonymized_fields = ['type'] id = IDField(source='_id', read_only=True) type = TypeField() name = ser.CharField(required=True, help_text='License name') text = ser.CharField(required=True, help_text='Full text of the license') + url = ser.URLField(required=False, help_text='URL for the license') required_fields = ser.ListField(source='properties', read_only=True, help_text='Fields required for this license (provided to help front-end validators)') links = LinksField({'self': 'get_absolute_url'}) class Meta: type_ = 'licenses' def get_absolute_url(self, obj): return absolute_reverse('licenses:license-detail', kwargs={ 'license_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version'] })
Add url to the license api serializer
## Code Before: from rest_framework import serializers as ser from api.base.serializers import ( JSONAPISerializer, LinksField, IDField, TypeField ) from api.base.utils import absolute_reverse class LicenseSerializer(JSONAPISerializer): filterable_fields = frozenset([ 'name', 'id', ]) non_anonymized_fields = ['type'] id = IDField(source='_id', read_only=True) type = TypeField() name = ser.CharField(required=True, help_text='License name') text = ser.CharField(required=True, help_text='Full text of the license') required_fields = ser.ListField(source='properties', read_only=True, help_text='Fields required for this license (provided to help front-end validators)') links = LinksField({'self': 'get_absolute_url'}) class Meta: type_ = 'licenses' def get_absolute_url(self, obj): return absolute_reverse('licenses:license-detail', kwargs={ 'license_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version'] }) ## Instruction: Add url to the license api serializer ## Code After: from rest_framework import serializers as ser from api.base.serializers import ( JSONAPISerializer, LinksField, IDField, TypeField ) from api.base.utils import absolute_reverse class LicenseSerializer(JSONAPISerializer): filterable_fields = frozenset([ 'name', 'id', ]) non_anonymized_fields = ['type'] id = IDField(source='_id', read_only=True) type = TypeField() name = ser.CharField(required=True, help_text='License name') text = ser.CharField(required=True, help_text='Full text of the license') url = ser.URLField(required=False, help_text='URL for the license') required_fields = ser.ListField(source='properties', read_only=True, help_text='Fields required for this license (provided to help front-end validators)') links = LinksField({'self': 'get_absolute_url'}) class Meta: type_ = 'licenses' def get_absolute_url(self, obj): return absolute_reverse('licenses:license-detail', kwargs={ 'license_id': obj._id, 'version': self.context['request'].parser_context['kwargs']['version'] })
... type = TypeField() name = ser.CharField(required=True, help_text='License name') text = ser.CharField(required=True, help_text='Full text of the license') url = ser.URLField(required=False, help_text='URL for the license') required_fields = ser.ListField(source='properties', read_only=True, help_text='Fields required for this license (provided to help front-end validators)') links = LinksField({'self': 'get_absolute_url'}) ...
52fd4086b0ef1ac290b393b8cd534a042826b145
scripts/addStitleToBlastTab.py
scripts/addStitleToBlastTab.py
import sys, argparse parser = argparse.ArgumentParser() parser.add_argument('--db2Name', help='tab-separated database lookup: full name file for reference (eg nr or swissprot)') parser.add_argument('-b','--blast', help='blast input file') args = parser.parse_args() blastOrder = [] blastD = {} with open(args.blast, 'r') as f: for line in f: line = line.rstrip().split('\t') #import pdb; pdb.set_trace() blastOrder.append(line[1]) blastD[line[1]] = line f.close() #potentially huge file --> don't want this in memory with open(args.db2Name, 'r') as f: for line in f: line = line.rstrip().split('\t') hitInfo = blastD.get(line[0], None) if hitInfo is not None: hitInfo.extend(line[1:]) f.close() outExtendedTab = open(args.blast, 'w') for hit in blastOrder: outExtendedTab.write('\t'.join(map(str,blastD[hit])) + '\n')
import sys, argparse parser = argparse.ArgumentParser() parser.add_argument('--db2Name', help='tab-separated database lookup: full name file for reference (eg nr or swissprot)') parser.add_argument('-b','--blast', help='blast input file') args = parser.parse_args() blastOrder = [] blastD = {} with open(args.blast, 'r') as f: for line in f: line = line.rstrip().split('\t') #import pdb; pdb.set_trace() blastOrder.append(line[1]) blastD[line[1]] = line f.close() #potentially huge file --> don't want this in memory with open(args.db2Name, 'r') as f: for line in f: line = line.rstrip().split('\t') hitInfo = blastD.get(line[0], None) if hitInfo is not None: hitInfo.extend(line[1:]) f.close() outExtendedTab = open(args.blast, 'w') for hit in blastOrder: outExtendedTab.write('\t'.join(map(str,blastD[hit])) + '\n')
Fix mixed indents. replaced tabs with spaces
Fix mixed indents. replaced tabs with spaces
Python
bsd-3-clause
bluegenes/MakeMyTranscriptome,bluegenes/MakeMyTranscriptome,bluegenes/MakeMyTranscriptome
import sys, argparse parser = argparse.ArgumentParser() parser.add_argument('--db2Name', help='tab-separated database lookup: full name file for reference (eg nr or swissprot)') parser.add_argument('-b','--blast', help='blast input file') args = parser.parse_args() blastOrder = [] blastD = {} with open(args.blast, 'r') as f: for line in f: line = line.rstrip().split('\t') - #import pdb; pdb.set_trace() + #import pdb; pdb.set_trace() - blastOrder.append(line[1]) + blastOrder.append(line[1]) - blastD[line[1]] = line + blastD[line[1]] = line f.close() #potentially huge file --> don't want this in memory with open(args.db2Name, 'r') as f: for line in f: line = line.rstrip().split('\t') - hitInfo = blastD.get(line[0], None) + hitInfo = blastD.get(line[0], None) - if hitInfo is not None: + if hitInfo is not None: - hitInfo.extend(line[1:]) + hitInfo.extend(line[1:]) f.close() outExtendedTab = open(args.blast, 'w') for hit in blastOrder: outExtendedTab.write('\t'.join(map(str,blastD[hit])) + '\n')
Fix mixed indents. replaced tabs with spaces
## Code Before: import sys, argparse parser = argparse.ArgumentParser() parser.add_argument('--db2Name', help='tab-separated database lookup: full name file for reference (eg nr or swissprot)') parser.add_argument('-b','--blast', help='blast input file') args = parser.parse_args() blastOrder = [] blastD = {} with open(args.blast, 'r') as f: for line in f: line = line.rstrip().split('\t') #import pdb; pdb.set_trace() blastOrder.append(line[1]) blastD[line[1]] = line f.close() #potentially huge file --> don't want this in memory with open(args.db2Name, 'r') as f: for line in f: line = line.rstrip().split('\t') hitInfo = blastD.get(line[0], None) if hitInfo is not None: hitInfo.extend(line[1:]) f.close() outExtendedTab = open(args.blast, 'w') for hit in blastOrder: outExtendedTab.write('\t'.join(map(str,blastD[hit])) + '\n') ## Instruction: Fix mixed indents. replaced tabs with spaces ## Code After: import sys, argparse parser = argparse.ArgumentParser() parser.add_argument('--db2Name', help='tab-separated database lookup: full name file for reference (eg nr or swissprot)') parser.add_argument('-b','--blast', help='blast input file') args = parser.parse_args() blastOrder = [] blastD = {} with open(args.blast, 'r') as f: for line in f: line = line.rstrip().split('\t') #import pdb; pdb.set_trace() blastOrder.append(line[1]) blastD[line[1]] = line f.close() #potentially huge file --> don't want this in memory with open(args.db2Name, 'r') as f: for line in f: line = line.rstrip().split('\t') hitInfo = blastD.get(line[0], None) if hitInfo is not None: hitInfo.extend(line[1:]) f.close() outExtendedTab = open(args.blast, 'w') for hit in blastOrder: outExtendedTab.write('\t'.join(map(str,blastD[hit])) + '\n')
// ... existing code ... with open(args.blast, 'r') as f: for line in f: line = line.rstrip().split('\t') #import pdb; pdb.set_trace() blastOrder.append(line[1]) blastD[line[1]] = line f.close() #potentially huge file --> don't want this in memory // ... modified code ... with open(args.db2Name, 'r') as f: for line in f: line = line.rstrip().split('\t') hitInfo = blastD.get(line[0], None) if hitInfo is not None: hitInfo.extend(line[1:]) f.close() outExtendedTab = open(args.blast, 'w') // ... rest of the code ...
999752ec378bbf6d3017f7afc964090c6871b7d4
app/user_administration/tests.py
app/user_administration/tests.py
from __future__ import unicode_literals from django.test import TestCase class LoginRequiredTest(TestCase): def test_login_required(self): response = self.client.get('/') self.assertEqual( response.status_code, 302, msg="Login Required Validation Failed, Received code {0} instead of 302".format(response.status_code) ) self.assertEqual( response.url, '/login?next=/', msg="Login Required Redirection Failed, Received url {0} instead of /login?next=/".format(response.url) )
from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import User from .models import Clients class LoginRequiredTest(TestCase): def test_login_required(self): response = self.client.get('/') self.assertEqual( response.status_code, 302, msg="Login Required Validation Failed, Received code {0} instead of 302".format(response.status_code) ) self.assertEqual( response.url, '/login?next=/', msg="Login Required Redirection Failed, Received url {0} instead of /login?next=/".format(response.url) ) class LoginSetup(TestCase): def setUp(self): self.user = User.objects.create(username='testUser', is_active=True, is_superuser=True) self.user.set_password('RHChallenge') self.user.save() self.client.force_login(self.user) class ClientsViewTest(LoginSetup): def setUp(self): super(ClientsViewTest, self).setUp() self.custom_client = Clients.objects.create(first_name='RH', last_name='CH', iban='IBAN') def test_client_create(self): data = {'first_name': 'Rexhep', 'last_name': 'Berlajolli', 'iban': 'XK051506001004471930'} self.client.post('/add', data=data) clients_count = Clients.objects.count() self.assertEqual( clients_count, 2, msg="Create client failed, received {0} clients instead of 2".format(clients_count) ) def test_client_create_validation(self): data = {'first_name': 'Invalid', 'last_name': 'Data', 'iban': 'INVALID_IBAN'} self.client.post('/add', data=data) clients_count = Clients.objects.count() self.assertEqual( clients_count, 1, msg="Insertion of invalid data succeeded, received {0} clients instead of 1".format(clients_count) ) def test_get_clients(self): response = self.client.get('/') clients = response.context_data['clients'] self.assertEqual( list(clients), list(Clients.objects.all()), msg="Get clients failed, received clients {0} instead of {1}".format(clients, [self.custom_client]) )
Add TestCase for ClientListView and ClientCreateView
Add TestCase for ClientListView and ClientCreateView
Python
mit
rexhepberlajolli/RHChallenge,rexhepberlajolli/RHChallenge
from __future__ import unicode_literals from django.test import TestCase + from django.contrib.auth.models import User + from .models import Clients class LoginRequiredTest(TestCase): def test_login_required(self): response = self.client.get('/') self.assertEqual( response.status_code, 302, msg="Login Required Validation Failed, Received code {0} instead of 302".format(response.status_code) ) self.assertEqual( response.url, '/login?next=/', msg="Login Required Redirection Failed, Received url {0} instead of /login?next=/".format(response.url) ) + + class LoginSetup(TestCase): + def setUp(self): + self.user = User.objects.create(username='testUser', is_active=True, is_superuser=True) + self.user.set_password('RHChallenge') + self.user.save() + self.client.force_login(self.user) + + + class ClientsViewTest(LoginSetup): + def setUp(self): + super(ClientsViewTest, self).setUp() + self.custom_client = Clients.objects.create(first_name='RH', last_name='CH', iban='IBAN') + + def test_client_create(self): + data = {'first_name': 'Rexhep', 'last_name': 'Berlajolli', 'iban': 'XK051506001004471930'} + self.client.post('/add', data=data) + clients_count = Clients.objects.count() + self.assertEqual( + clients_count, + 2, + msg="Create client failed, received {0} clients instead of 2".format(clients_count) + ) + + def test_client_create_validation(self): + data = {'first_name': 'Invalid', 'last_name': 'Data', 'iban': 'INVALID_IBAN'} + self.client.post('/add', data=data) + clients_count = Clients.objects.count() + self.assertEqual( + clients_count, + 1, + msg="Insertion of invalid data succeeded, received {0} clients instead of 1".format(clients_count) + ) + + def test_get_clients(self): + response = self.client.get('/') + clients = response.context_data['clients'] + self.assertEqual( + list(clients), + list(Clients.objects.all()), + msg="Get clients failed, received clients {0} instead of {1}".format(clients, [self.custom_client]) + ) +
Add TestCase for ClientListView and ClientCreateView
## Code Before: from __future__ import unicode_literals from django.test import TestCase class LoginRequiredTest(TestCase): def test_login_required(self): response = self.client.get('/') self.assertEqual( response.status_code, 302, msg="Login Required Validation Failed, Received code {0} instead of 302".format(response.status_code) ) self.assertEqual( response.url, '/login?next=/', msg="Login Required Redirection Failed, Received url {0} instead of /login?next=/".format(response.url) ) ## Instruction: Add TestCase for ClientListView and ClientCreateView ## Code After: from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import User from .models import Clients class LoginRequiredTest(TestCase): def test_login_required(self): response = self.client.get('/') self.assertEqual( response.status_code, 302, msg="Login Required Validation Failed, Received code {0} instead of 302".format(response.status_code) ) self.assertEqual( response.url, '/login?next=/', msg="Login Required Redirection Failed, Received url {0} instead of /login?next=/".format(response.url) ) class LoginSetup(TestCase): def setUp(self): self.user = User.objects.create(username='testUser', is_active=True, is_superuser=True) self.user.set_password('RHChallenge') self.user.save() self.client.force_login(self.user) class ClientsViewTest(LoginSetup): def setUp(self): super(ClientsViewTest, self).setUp() self.custom_client = Clients.objects.create(first_name='RH', last_name='CH', iban='IBAN') def test_client_create(self): data = {'first_name': 'Rexhep', 'last_name': 'Berlajolli', 'iban': 'XK051506001004471930'} self.client.post('/add', data=data) clients_count = Clients.objects.count() self.assertEqual( clients_count, 2, msg="Create client failed, received {0} clients instead of 2".format(clients_count) ) def test_client_create_validation(self): data = {'first_name': 'Invalid', 'last_name': 'Data', 'iban': 'INVALID_IBAN'} self.client.post('/add', data=data) clients_count = Clients.objects.count() self.assertEqual( clients_count, 1, msg="Insertion of invalid data succeeded, received {0} clients instead of 1".format(clients_count) ) def test_get_clients(self): response = self.client.get('/') clients = response.context_data['clients'] self.assertEqual( list(clients), list(Clients.objects.all()), msg="Get clients failed, received clients {0} instead of {1}".format(clients, [self.custom_client]) )
// ... existing code ... from __future__ import unicode_literals from django.test import TestCase from django.contrib.auth.models import User from .models import Clients class LoginRequiredTest(TestCase): // ... modified code ... '/login?next=/', msg="Login Required Redirection Failed, Received url {0} instead of /login?next=/".format(response.url) ) class LoginSetup(TestCase): def setUp(self): self.user = User.objects.create(username='testUser', is_active=True, is_superuser=True) self.user.set_password('RHChallenge') self.user.save() self.client.force_login(self.user) class ClientsViewTest(LoginSetup): def setUp(self): super(ClientsViewTest, self).setUp() self.custom_client = Clients.objects.create(first_name='RH', last_name='CH', iban='IBAN') def test_client_create(self): data = {'first_name': 'Rexhep', 'last_name': 'Berlajolli', 'iban': 'XK051506001004471930'} self.client.post('/add', data=data) clients_count = Clients.objects.count() self.assertEqual( clients_count, 2, msg="Create client failed, received {0} clients instead of 2".format(clients_count) ) def test_client_create_validation(self): data = {'first_name': 'Invalid', 'last_name': 'Data', 'iban': 'INVALID_IBAN'} self.client.post('/add', data=data) clients_count = Clients.objects.count() self.assertEqual( clients_count, 1, msg="Insertion of invalid data succeeded, received {0} clients instead of 1".format(clients_count) ) def test_get_clients(self): response = self.client.get('/') clients = response.context_data['clients'] self.assertEqual( list(clients), list(Clients.objects.all()), msg="Get clients failed, received clients {0} instead of {1}".format(clients, [self.custom_client]) ) // ... rest of the code ...
e12a4504da0b40ad66e116aa9a0373d1abc6d160
mongo_test/handlers.py
mongo_test/handlers.py
import subprocess import commands import re import signal import os import logging _logger = logging.getLogger(__name__) TARGET_DIR='test/target' PORT='27018' def startup(): _logger.info("about to start mongod") p = subprocess.Popen([commands.getoutput('which mongod'), '--port', PORT, '--fork', '--dbpath', '{0}/db'.format(TARGET_DIR), '--logpath', '{0}/mongo.log'.format(TARGET_DIR), '--smallfiles', '--noprealloc']) p.wait() _logger.info("mongod started successfully") def teardown(): _logger.info("about to stop mongod") with open('{0}/db/mongod.lock'.format(TARGET_DIR), 'r') as log_file: first_line = log_file.readline() pid = int(first_line) os.kill(pid, signal.SIGTERM) _logger.info("mongodb stopped")
import subprocess import commands import signal import os import logging _logger = logging.getLogger(__name__) TARGET_DIR='test/target' PORT='27018' def startup(mongo_path): _logger.info("about to start mongod") path = mongo_path or commands.getoutput('which mongod') p = subprocess.Popen([path, '--port', PORT, '--fork', '--dbpath', '{0}/db'.format(TARGET_DIR), '--logpath', '{0}/mongo.log'.format(TARGET_DIR), '--smallfiles', '--noprealloc']) p.wait() _logger.info("mongod started successfully") def teardown(): _logger.info("about to stop mongod") with open('{0}/db/mongod.lock'.format(TARGET_DIR), 'r') as log_file: first_line = log_file.readline() pid = int(first_line) os.kill(pid, signal.SIGTERM) _logger.info("mongodb stopped")
Allow the option of specifying a path in startup
Allow the option of specifying a path in startup
Python
mit
idbentley/MongoTest
import subprocess import commands - import re import signal import os import logging _logger = logging.getLogger(__name__) TARGET_DIR='test/target' PORT='27018' - def startup(): + def startup(mongo_path): _logger.info("about to start mongod") - p = subprocess.Popen([commands.getoutput('which mongod'), + path = mongo_path or commands.getoutput('which mongod') + p = subprocess.Popen([path, '--port', PORT, '--fork', '--dbpath', '{0}/db'.format(TARGET_DIR), '--logpath', '{0}/mongo.log'.format(TARGET_DIR), '--smallfiles', '--noprealloc']) p.wait() _logger.info("mongod started successfully") def teardown(): _logger.info("about to stop mongod") with open('{0}/db/mongod.lock'.format(TARGET_DIR), 'r') as log_file: first_line = log_file.readline() pid = int(first_line) os.kill(pid, signal.SIGTERM) _logger.info("mongodb stopped")
Allow the option of specifying a path in startup
## Code Before: import subprocess import commands import re import signal import os import logging _logger = logging.getLogger(__name__) TARGET_DIR='test/target' PORT='27018' def startup(): _logger.info("about to start mongod") p = subprocess.Popen([commands.getoutput('which mongod'), '--port', PORT, '--fork', '--dbpath', '{0}/db'.format(TARGET_DIR), '--logpath', '{0}/mongo.log'.format(TARGET_DIR), '--smallfiles', '--noprealloc']) p.wait() _logger.info("mongod started successfully") def teardown(): _logger.info("about to stop mongod") with open('{0}/db/mongod.lock'.format(TARGET_DIR), 'r') as log_file: first_line = log_file.readline() pid = int(first_line) os.kill(pid, signal.SIGTERM) _logger.info("mongodb stopped") ## Instruction: Allow the option of specifying a path in startup ## Code After: import subprocess import commands import signal import os import logging _logger = logging.getLogger(__name__) TARGET_DIR='test/target' PORT='27018' def startup(mongo_path): _logger.info("about to start mongod") path = mongo_path or commands.getoutput('which mongod') p = subprocess.Popen([path, '--port', PORT, '--fork', '--dbpath', '{0}/db'.format(TARGET_DIR), '--logpath', '{0}/mongo.log'.format(TARGET_DIR), '--smallfiles', '--noprealloc']) p.wait() _logger.info("mongod started successfully") def teardown(): _logger.info("about to stop mongod") with open('{0}/db/mongod.lock'.format(TARGET_DIR), 'r') as log_file: first_line = log_file.readline() pid = int(first_line) os.kill(pid, signal.SIGTERM) _logger.info("mongodb stopped")
... import subprocess import commands import signal import os import logging ... TARGET_DIR='test/target' PORT='27018' def startup(mongo_path): _logger.info("about to start mongod") path = mongo_path or commands.getoutput('which mongod') p = subprocess.Popen([path, '--port', PORT, '--fork', '--dbpath', '{0}/db'.format(TARGET_DIR), ...
6f60d2c76ece73e8f37f2ae1014cc26b485495d0
numpy/distutils/setup.py
numpy/distutils/setup.py
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('distutils',parent_package,top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_data_dir('tests') config.add_data_files('site.cfg') config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('distutils',parent_package,top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_data_dir('tests') config.add_data_files('site.cfg') config.add_data_files('mingw/gfortran_vs2003_hack.c') config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
Make the gfortran/vs2003 hack source file known to distutils.
Make the gfortran/vs2003 hack source file known to distutils.
Python
bsd-3-clause
simongibbons/numpy,ContinuumIO/numpy,BabeNovelty/numpy,rherault-insa/numpy,cjermain/numpy,madphysicist/numpy,ahaldane/numpy,BabeNovelty/numpy,abalkin/numpy,naritta/numpy,dimasad/numpy,dch312/numpy,GrimDerp/numpy,MichaelAquilina/numpy,has2k1/numpy,jankoslavic/numpy,sonnyhu/numpy,madphysicist/numpy,ogrisel/numpy,gmcastil/numpy,Anwesh43/numpy,dwillmer/numpy,Linkid/numpy,larsmans/numpy,dato-code/numpy,embray/numpy,ajdawson/numpy,njase/numpy,SiccarPoint/numpy,ajdawson/numpy,KaelChen/numpy,MaPePeR/numpy,shoyer/numpy,seberg/numpy,kirillzhuravlev/numpy,embray/numpy,SunghanKim/numpy,mindw/numpy,brandon-rhodes/numpy,ssanderson/numpy,simongibbons/numpy,mattip/numpy,embray/numpy,cowlicks/numpy,skymanaditya1/numpy,b-carter/numpy,dwillmer/numpy,seberg/numpy,MSeifert04/numpy,brandon-rhodes/numpy,sinhrks/numpy,mattip/numpy,endolith/numpy,MSeifert04/numpy,nguyentu1602/numpy,bmorris3/numpy,andsor/numpy,cjermain/numpy,brandon-rhodes/numpy,madphysicist/numpy,pdebuyl/numpy,ewmoore/numpy,ChanderG/numpy,WillieMaddox/numpy,ddasilva/numpy,ahaldane/numpy,rgommers/numpy,mindw/numpy,behzadnouri/numpy,mingwpy/numpy,Anwesh43/numpy,pelson/numpy,stefanv/numpy,Yusa95/numpy,pyparallel/numpy,NextThought/pypy-numpy,leifdenby/numpy,stuarteberg/numpy,jankoslavic/numpy,pbrod/numpy,dwf/numpy,Srisai85/numpy,sonnyhu/numpy,sinhrks/numpy,pelson/numpy,ahaldane/numpy,chiffa/numpy,dch312/numpy,NextThought/pypy-numpy,mhvk/numpy,ChanderG/numpy,sonnyhu/numpy,moreati/numpy,ewmoore/numpy,matthew-brett/numpy,rmcgibbo/numpy,dato-code/numpy,KaelChen/numpy,musically-ut/numpy,moreati/numpy,GrimDerp/numpy,Eric89GXL/numpy,joferkington/numpy,matthew-brett/numpy,hainm/numpy,mindw/numpy,bmorris3/numpy,jakirkham/numpy,dimasad/numpy,MichaelAquilina/numpy,behzadnouri/numpy,numpy/numpy-refactor,Yusa95/numpy,SiccarPoint/numpy,trankmichael/numpy,dwillmer/numpy,seberg/numpy,bringingheavendown/numpy,mattip/numpy,ekalosak/numpy,utke1/numpy,immerrr/numpy,felipebetancur/numpy,endolith/numpy,utke1/numpy,ewmoore/numpy,grlee77/numpy,simongibbons/numpy,njase/numpy,jorisvandenbossche/numpy,matthew-brett/numpy,stuarteberg/numpy,joferkington/numpy,charris/numpy,grlee77/numpy,ssanderson/numpy,sinhrks/numpy,CMartelLML/numpy,groutr/numpy,GaZ3ll3/numpy,joferkington/numpy,bmorris3/numpy,pizzathief/numpy,chatcannon/numpy,grlee77/numpy,has2k1/numpy,pdebuyl/numpy,tacaswell/numpy,pizzathief/numpy,ContinuumIO/numpy,dwf/numpy,NextThought/pypy-numpy,simongibbons/numpy,jschueller/numpy,rudimeier/numpy,abalkin/numpy,numpy/numpy-refactor,MaPePeR/numpy,ddasilva/numpy,ddasilva/numpy,ChristopherHogan/numpy,rmcgibbo/numpy,bringingheavendown/numpy,rhythmsosad/numpy,dwillmer/numpy,andsor/numpy,ogrisel/numpy,dato-code/numpy,Anwesh43/numpy,kirillzhuravlev/numpy,immerrr/numpy,mindw/numpy,pizzathief/numpy,pyparallel/numpy,stefanv/numpy,argriffing/numpy,drasmuss/numpy,has2k1/numpy,njase/numpy,jonathanunderwood/numpy,naritta/numpy,gfyoung/numpy,mwiebe/numpy,felipebetancur/numpy,ESSS/numpy,charris/numpy,numpy/numpy,MSeifert04/numpy,naritta/numpy,Srisai85/numpy,ChristopherHogan/numpy,BMJHayward/numpy,maniteja123/numpy,rgommers/numpy,BMJHayward/numpy,CMartelLML/numpy,rherault-insa/numpy,mhvk/numpy,groutr/numpy,musically-ut/numpy,ChristopherHogan/numpy,seberg/numpy,stefanv/numpy,mathdd/numpy,empeeu/numpy,nbeaver/numpy,charris/numpy,chatcannon/numpy,ViralLeadership/numpy,Yusa95/numpy,ahaldane/numpy,andsor/numpy,mingwpy/numpy,Anwesh43/numpy,CMartelLML/numpy,tynn/numpy,mathdd/numpy,mathdd/numpy,sigma-random/numpy,utke1/numpy,GrimDerp/numpy,rgommers/numpy,stefanv/numpy,SiccarPoint/numpy,nbeaver/numpy,Linkid/numpy,nguyentu1602/numpy,dwf/numpy,stefanv/numpy,skwbc/numpy,mhvk/numpy,bmorris3/numpy,rudimeier/numpy,pbrod/numpy,mortada/numpy,anntzer/numpy,rmcgibbo/numpy,grlee77/numpy,pizzathief/numpy,jschueller/numpy,kirillzhuravlev/numpy,ekalosak/numpy,tacaswell/numpy,pelson/numpy,rhythmsosad/numpy,musically-ut/numpy,ssanderson/numpy,grlee77/numpy,pelson/numpy,BabeNovelty/numpy,rmcgibbo/numpy,astrofrog/numpy,yiakwy/numpy,cowlicks/numpy,jakirkham/numpy,jorisvandenbossche/numpy,mortada/numpy,tynn/numpy,WillieMaddox/numpy,nbeaver/numpy,skwbc/numpy,Srisai85/numpy,numpy/numpy,kiwifb/numpy,andsor/numpy,ESSS/numpy,mingwpy/numpy,pdebuyl/numpy,githubmlai/numpy,argriffing/numpy,rherault-insa/numpy,GaZ3ll3/numpy,abalkin/numpy,pizzathief/numpy,matthew-brett/numpy,jorisvandenbossche/numpy,ewmoore/numpy,SunghanKim/numpy,astrofrog/numpy,gmcastil/numpy,ewmoore/numpy,rajathkumarmp/numpy,sinhrks/numpy,dato-code/numpy,numpy/numpy-refactor,MaPePeR/numpy,Dapid/numpy,dimasad/numpy,matthew-brett/numpy,endolith/numpy,NextThought/pypy-numpy,mortada/numpy,sigma-random/numpy,kiwifb/numpy,nguyentu1602/numpy,bertrand-l/numpy,b-carter/numpy,rhythmsosad/numpy,ekalosak/numpy,sonnyhu/numpy,shoyer/numpy,musically-ut/numpy,skwbc/numpy,yiakwy/numpy,numpy/numpy-refactor,gfyoung/numpy,sigma-random/numpy,jonathanunderwood/numpy,tacaswell/numpy,joferkington/numpy,hainm/numpy,shoyer/numpy,ViralLeadership/numpy,rajathkumarmp/numpy,ekalosak/numpy,astrofrog/numpy,mwiebe/numpy,BMJHayward/numpy,MSeifert04/numpy,kiwifb/numpy,WarrenWeckesser/numpy,tdsmith/numpy,AustereCuriosity/numpy,larsmans/numpy,skymanaditya1/numpy,pbrod/numpy,ogrisel/numpy,GaZ3ll3/numpy,jonathanunderwood/numpy,nguyentu1602/numpy,chatcannon/numpy,cowlicks/numpy,SunghanKim/numpy,jakirkham/numpy,pbrod/numpy,ajdawson/numpy,mathdd/numpy,rhythmsosad/numpy,solarjoe/numpy,hainm/numpy,MichaelAquilina/numpy,ChanderG/numpy,embray/numpy,mortada/numpy,WarrenWeckesser/numpy,WarrenWeckesser/numpy,chiffa/numpy,jakirkham/numpy,felipebetancur/numpy,CMartelLML/numpy,Dapid/numpy,bringingheavendown/numpy,githubmlai/numpy,jankoslavic/numpy,gfyoung/numpy,embray/numpy,mhvk/numpy,tynn/numpy,skymanaditya1/numpy,ahaldane/numpy,astrofrog/numpy,argriffing/numpy,jankoslavic/numpy,simongibbons/numpy,immerrr/numpy,SiccarPoint/numpy,mingwpy/numpy,has2k1/numpy,AustereCuriosity/numpy,leifdenby/numpy,brandon-rhodes/numpy,numpy/numpy,ViralLeadership/numpy,cjermain/numpy,solarjoe/numpy,githubmlai/numpy,BabeNovelty/numpy,Eric89GXL/numpy,jakirkham/numpy,empeeu/numpy,ogrisel/numpy,numpy/numpy,ajdawson/numpy,ogrisel/numpy,bertrand-l/numpy,WillieMaddox/numpy,KaelChen/numpy,bertrand-l/numpy,Linkid/numpy,kirillzhuravlev/numpy,charris/numpy,rudimeier/numpy,solarjoe/numpy,drasmuss/numpy,ESSS/numpy,Dapid/numpy,dch312/numpy,yiakwy/numpy,cjermain/numpy,trankmichael/numpy,dimasad/numpy,naritta/numpy,dwf/numpy,BMJHayward/numpy,rudimeier/numpy,Srisai85/numpy,stuarteberg/numpy,drasmuss/numpy,groutr/numpy,larsmans/numpy,shoyer/numpy,jorisvandenbossche/numpy,yiakwy/numpy,KaelChen/numpy,numpy/numpy-refactor,MichaelAquilina/numpy,skymanaditya1/numpy,tdsmith/numpy,GrimDerp/numpy,madphysicist/numpy,WarrenWeckesser/numpy,moreati/numpy,madphysicist/numpy,Eric89GXL/numpy,empeeu/numpy,astrofrog/numpy,pyparallel/numpy,jorisvandenbossche/numpy,mhvk/numpy,Linkid/numpy,maniteja123/numpy,cowlicks/numpy,SunghanKim/numpy,WarrenWeckesser/numpy,immerrr/numpy,behzadnouri/numpy,chiffa/numpy,gmcastil/numpy,stuarteberg/numpy,pelson/numpy,shoyer/numpy,anntzer/numpy,ChristopherHogan/numpy,ChanderG/numpy,felipebetancur/numpy,maniteja123/numpy,AustereCuriosity/numpy,dwf/numpy,rgommers/numpy,larsmans/numpy,jschueller/numpy,pdebuyl/numpy,leifdenby/numpy,sigma-random/numpy,MaPePeR/numpy,mattip/numpy,tdsmith/numpy,MSeifert04/numpy,trankmichael/numpy,rajathkumarmp/numpy,mwiebe/numpy,tdsmith/numpy,ContinuumIO/numpy,Yusa95/numpy,pbrod/numpy,GaZ3ll3/numpy,dch312/numpy,trankmichael/numpy,Eric89GXL/numpy,githubmlai/numpy,anntzer/numpy,anntzer/numpy,endolith/numpy,hainm/numpy,empeeu/numpy,b-carter/numpy,rajathkumarmp/numpy,jschueller/numpy
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('distutils',parent_package,top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_data_dir('tests') config.add_data_files('site.cfg') + config.add_data_files('mingw/gfortran_vs2003_hack.c') config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
Make the gfortran/vs2003 hack source file known to distutils.
## Code Before: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('distutils',parent_package,top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_data_dir('tests') config.add_data_files('site.cfg') config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration) ## Instruction: Make the gfortran/vs2003 hack source file known to distutils. ## Code After: def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('distutils',parent_package,top_path) config.add_subpackage('command') config.add_subpackage('fcompiler') config.add_data_dir('tests') config.add_data_files('site.cfg') config.add_data_files('mingw/gfortran_vs2003_hack.c') config.make_config_py() return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)
... config.add_subpackage('fcompiler') config.add_data_dir('tests') config.add_data_files('site.cfg') config.add_data_files('mingw/gfortran_vs2003_hack.c') config.make_config_py() return config ...
d07a7ad25f69a18c57c50d6c32df212e1f987bd4
www/tests/test_collections.py
www/tests/test_collections.py
import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9]
import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] # namedtuple a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) assert a.bar == 1 assert a.bash == 2 assert repr(a) == 'foo(bar=1, bash=2, bing=3)'
Add a test on namedtuple
Add a test on namedtuple
Python
bsd-3-clause
kikocorreoso/brython,Mozhuowen/brython,Hasimir/brython,Isendir/brython,Isendir/brython,amrdraz/brython,jonathanverner/brython,kevinmel2000/brython,brython-dev/brython,Mozhuowen/brython,jonathanverner/brython,Hasimir/brython,rubyinhell/brython,Hasimir/brython,molebot/brython,Isendir/brython,JohnDenker/brython,olemis/brython,kevinmel2000/brython,molebot/brython,Mozhuowen/brython,kevinmel2000/brython,brython-dev/brython,rubyinhell/brython,kikocorreoso/brython,amrdraz/brython,Lh4cKg/brython,rubyinhell/brython,Lh4cKg/brython,Hasimir/brython,olemis/brython,olemis/brython,jonathanverner/brython,kikocorreoso/brython,amrdraz/brython,molebot/brython,Lh4cKg/brython,molebot/brython,JohnDenker/brython,jonathanverner/brython,Lh4cKg/brython,olemis/brython,kevinmel2000/brython,rubyinhell/brython,Mozhuowen/brython,amrdraz/brython,brython-dev/brython,Isendir/brython,JohnDenker/brython,JohnDenker/brython
import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] + # namedtuple + a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) + assert a.bar == 1 + assert a.bash == 2 + assert repr(a) == 'foo(bar=1, bash=2, bing=3)' +
Add a test on namedtuple
## Code Before: import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] ## Instruction: Add a test on namedtuple ## Code After: import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] # namedtuple a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) assert a.bar == 1 assert a.bash == 2 assert repr(a) == 'foo(bar=1, bash=2, bing=3)'
... assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] # namedtuple a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) assert a.bar == 1 assert a.bash == 2 assert repr(a) == 'foo(bar=1, bash=2, bing=3)' ...
8e6a835cf98212545d00f0967b6f6ce936143687
fluxghost/http_server_debug.py
fluxghost/http_server_debug.py
from multiprocessing import Process import sys from fluxghost.http_server_base import HttpServerBase, logger def fork_entry(request, client, server): from fluxghost.http_handler import HttpHandler HttpHandler(request, client, server) def check_autoreload(): if "fluxghost.http_handler" in sys.modules: logger.error("Warning!! The fluxghost.http_handler has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return if "fluxclient" in sys.modules: logger.error("Warning!! The fluxclient has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return class HttpServer(HttpServerBase): def on_accept(self): check_autoreload() request, client = self.sock.accept() w = Process(target=fork_entry, args=(request, client, self)) w.daemon = True w.start()
from multiprocessing import Process import sys from fluxghost.http_server_base import HttpServerBase, logger def fork_entry(request, client, server): from fluxghost.http_handler import HttpHandler HttpHandler(request, client, server) def check_autoreload(): if "fluxghost.http_handler" in sys.modules: logger.error("Warning!! The fluxghost.http_handler has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return if "fluxclient" in sys.modules: logger.error("Warning!! The fluxclient has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return class HttpServer(HttpServerBase): def on_accept(self): check_autoreload() request, client = self.sock.accept() w = Process(target=fork_entry, args=(request, client, self)) w.daemon = True w.start() request.close()
Fix missing close socket error
Fix missing close socket error
Python
agpl-3.0
flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost
from multiprocessing import Process import sys from fluxghost.http_server_base import HttpServerBase, logger def fork_entry(request, client, server): from fluxghost.http_handler import HttpHandler HttpHandler(request, client, server) def check_autoreload(): if "fluxghost.http_handler" in sys.modules: logger.error("Warning!! The fluxghost.http_handler has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return if "fluxclient" in sys.modules: logger.error("Warning!! The fluxclient has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return class HttpServer(HttpServerBase): def on_accept(self): check_autoreload() request, client = self.sock.accept() w = Process(target=fork_entry, args=(request, client, self)) w.daemon = True w.start() + request.close() +
Fix missing close socket error
## Code Before: from multiprocessing import Process import sys from fluxghost.http_server_base import HttpServerBase, logger def fork_entry(request, client, server): from fluxghost.http_handler import HttpHandler HttpHandler(request, client, server) def check_autoreload(): if "fluxghost.http_handler" in sys.modules: logger.error("Warning!! The fluxghost.http_handler has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return if "fluxclient" in sys.modules: logger.error("Warning!! The fluxclient has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return class HttpServer(HttpServerBase): def on_accept(self): check_autoreload() request, client = self.sock.accept() w = Process(target=fork_entry, args=(request, client, self)) w.daemon = True w.start() ## Instruction: Fix missing close socket error ## Code After: from multiprocessing import Process import sys from fluxghost.http_server_base import HttpServerBase, logger def fork_entry(request, client, server): from fluxghost.http_handler import HttpHandler HttpHandler(request, client, server) def check_autoreload(): if "fluxghost.http_handler" in sys.modules: logger.error("Warning!! The fluxghost.http_handler has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return if "fluxclient" in sys.modules: logger.error("Warning!! The fluxclient has been " "loaded before fork, auto-reload moudle function is" " not work anymore.") return class HttpServer(HttpServerBase): def on_accept(self): check_autoreload() request, client = self.sock.accept() w = Process(target=fork_entry, args=(request, client, self)) w.daemon = True w.start() request.close()
# ... existing code ... w = Process(target=fork_entry, args=(request, client, self)) w.daemon = True w.start() request.close() # ... rest of the code ...
7aa140778cd689a8efa86f0890c4ccb8fc7f0d43
infrastructure/tests/test_api_views.py
infrastructure/tests/test_api_views.py
from django.test import Client, TestCase from infrastructure import utils from infrastructure import models import json from infrastructure.models import FinancialYear, QuarterlySpendFile, Expenditure, Project from scorecard.models import Geography from scorecard.profiles import MunicipalityProfile from scorecard.admin import MunicipalityProfilesCompilationAdmin class TestProject(TestCase): def setUp(self): fixtures = ["test_infrastructure.json"] TestProject.geography = Geography.objects.create( geo_level="municipality", geo_code="BUF", province_name="Eastern Cape", province_code="EC", category="A", ) def test_infrastructure_project_search(self): response = self.client.get( "/api/v1/infrastructure/search/?province=Eastern+Cape&municipality=Buffalo+City&q=&budget_phase=Budget+year&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(len(js["results"]), 3)
from django.test import TestCase class TestProject(TestCase): fixtures = ["test_infrastructure.json"] def test_infrastructure_project_filters(self): response = self.client.get( "/api/v1/infrastructure/search/?q=&province=Western+Cape&municipality=City+of+Cape+Town&project_type=New&function=Administrative+and+Corporate+Support&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 2) self.assertEquals(len(js["results"]["projects"]), 2) def test_infrastructure_project_search(self): response = self.client.get( "/api/v1/infrastructure/search/?q=PC001002004002_00473&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 1) self.assertEquals(len(js["results"]["projects"]), 1) response = self.client.get( "/api/v1/infrastructure/search/?q=Acquisition&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 1) self.assertEquals(len(js["results"]["projects"]), 1)
Add test for infra search API and some refactoring
Add test for infra search API and some refactoring
Python
mit
Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data
- from django.test import Client, TestCase + from django.test import TestCase - from infrastructure import utils - from infrastructure import models - import json - - from infrastructure.models import FinancialYear, QuarterlySpendFile, Expenditure, Project - - from scorecard.models import Geography - from scorecard.profiles import MunicipalityProfile - from scorecard.admin import MunicipalityProfilesCompilationAdmin class TestProject(TestCase): - def setUp(self): - fixtures = ["test_infrastructure.json"] + fixtures = ["test_infrastructure.json"] - TestProject.geography = Geography.objects.create( - geo_level="municipality", - geo_code="BUF", - province_name="Eastern Cape", - province_code="EC", - category="A", - ) + def test_infrastructure_project_filters(self): + response = self.client.get( + "/api/v1/infrastructure/search/?q=&province=Western+Cape&municipality=City+of+Cape+Town&project_type=New&function=Administrative+and+Corporate+Support&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") + self.assertEqual(response.status_code, 200) + js = response.json() + self.assertEquals(js["count"], 2) + self.assertEquals(len(js["results"]["projects"]), 2) def test_infrastructure_project_search(self): - response = self.client.get( + response = self.client.get( - "/api/v1/infrastructure/search/?province=Eastern+Cape&municipality=Buffalo+City&q=&budget_phase=Budget+year&financial_year=2019%2F2020&ordering=-total_forecast_budget") + "/api/v1/infrastructure/search/?q=PC001002004002_00473&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") - self.assertEqual(response.status_code, 200) + self.assertEqual(response.status_code, 200) - js = response.json() + js = response.json() + self.assertEquals(js["count"], 1) - self.assertEquals(len(js["results"]), 3) + self.assertEquals(len(js["results"]["projects"]), 1) + response = self.client.get( + "/api/v1/infrastructure/search/?q=Acquisition&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") + self.assertEqual(response.status_code, 200) + js = response.json() + self.assertEquals(js["count"], 1) + self.assertEquals(len(js["results"]["projects"]), 1)
Add test for infra search API and some refactoring
## Code Before: from django.test import Client, TestCase from infrastructure import utils from infrastructure import models import json from infrastructure.models import FinancialYear, QuarterlySpendFile, Expenditure, Project from scorecard.models import Geography from scorecard.profiles import MunicipalityProfile from scorecard.admin import MunicipalityProfilesCompilationAdmin class TestProject(TestCase): def setUp(self): fixtures = ["test_infrastructure.json"] TestProject.geography = Geography.objects.create( geo_level="municipality", geo_code="BUF", province_name="Eastern Cape", province_code="EC", category="A", ) def test_infrastructure_project_search(self): response = self.client.get( "/api/v1/infrastructure/search/?province=Eastern+Cape&municipality=Buffalo+City&q=&budget_phase=Budget+year&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(len(js["results"]), 3) ## Instruction: Add test for infra search API and some refactoring ## Code After: from django.test import TestCase class TestProject(TestCase): fixtures = ["test_infrastructure.json"] def test_infrastructure_project_filters(self): response = self.client.get( "/api/v1/infrastructure/search/?q=&province=Western+Cape&municipality=City+of+Cape+Town&project_type=New&function=Administrative+and+Corporate+Support&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 2) self.assertEquals(len(js["results"]["projects"]), 2) def test_infrastructure_project_search(self): response = self.client.get( "/api/v1/infrastructure/search/?q=PC001002004002_00473&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 1) self.assertEquals(len(js["results"]["projects"]), 1) response = self.client.get( "/api/v1/infrastructure/search/?q=Acquisition&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 1) self.assertEquals(len(js["results"]["projects"]), 1)
// ... existing code ... from django.test import TestCase class TestProject(TestCase): fixtures = ["test_infrastructure.json"] def test_infrastructure_project_filters(self): response = self.client.get( "/api/v1/infrastructure/search/?q=&province=Western+Cape&municipality=City+of+Cape+Town&project_type=New&function=Administrative+and+Corporate+Support&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 2) self.assertEquals(len(js["results"]["projects"]), 2) def test_infrastructure_project_search(self): response = self.client.get( "/api/v1/infrastructure/search/?q=PC001002004002_00473&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 1) self.assertEquals(len(js["results"]["projects"]), 1) response = self.client.get( "/api/v1/infrastructure/search/?q=Acquisition&budget_phase=Budget+year&quarterly_phase=Original+Budget&financial_year=2019%2F2020&ordering=-total_forecast_budget") self.assertEqual(response.status_code, 200) js = response.json() self.assertEquals(js["count"], 1) self.assertEquals(len(js["results"]["projects"]), 1) // ... rest of the code ...
c5b73be1bf0f0edd05c4743c2449bee568d01c76
setup.py
setup.py
from distutils.core import setup from turbasen import VERSION name = 'turbasen' setup( name=name, packages=[name], version=VERSION, description='Client for Nasjonal Turbase REST API', author='Ali Kaafarani', author_email='[email protected]', url='https://github.com/Turbasen/turbasen.py', download_url='https://github.com/Turbasen/turbasen.py/tarball/v%s' % (VERSION), keywords=['turbasen', 'nasjonalturbase', 'turistforening', 'rest-api'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: Norwegian', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], install_requires=['requests'], )
from distutils.core import setup from os import path from turbasen import VERSION name = 'turbasen' here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=name, packages=[name], version=VERSION, description='Client for Nasjonal Turbase REST API', long_description=long_description, author='Ali Kaafarani', author_email='[email protected]', url='https://github.com/Turbasen/turbasen.py', download_url='https://github.com/Turbasen/turbasen.py/tarball/v%s' % (VERSION), keywords=['turbasen', 'nasjonalturbase', 'turistforening', 'rest-api'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: Norwegian', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], install_requires=['requests'], )
Add long description from README
Add long description from README
Python
mit
Turbasen/turbasen.py
from distutils.core import setup + from os import path from turbasen import VERSION name = 'turbasen' + + here = path.abspath(path.dirname(__file__)) + with open(path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = f.read() setup( name=name, packages=[name], version=VERSION, description='Client for Nasjonal Turbase REST API', + long_description=long_description, author='Ali Kaafarani', author_email='[email protected]', url='https://github.com/Turbasen/turbasen.py', download_url='https://github.com/Turbasen/turbasen.py/tarball/v%s' % (VERSION), keywords=['turbasen', 'nasjonalturbase', 'turistforening', 'rest-api'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: Norwegian', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], install_requires=['requests'], )
Add long description from README
## Code Before: from distutils.core import setup from turbasen import VERSION name = 'turbasen' setup( name=name, packages=[name], version=VERSION, description='Client for Nasjonal Turbase REST API', author='Ali Kaafarani', author_email='[email protected]', url='https://github.com/Turbasen/turbasen.py', download_url='https://github.com/Turbasen/turbasen.py/tarball/v%s' % (VERSION), keywords=['turbasen', 'nasjonalturbase', 'turistforening', 'rest-api'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: Norwegian', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], install_requires=['requests'], ) ## Instruction: Add long description from README ## Code After: from distutils.core import setup from os import path from turbasen import VERSION name = 'turbasen' here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=name, packages=[name], version=VERSION, description='Client for Nasjonal Turbase REST API', long_description=long_description, author='Ali Kaafarani', author_email='[email protected]', url='https://github.com/Turbasen/turbasen.py', download_url='https://github.com/Turbasen/turbasen.py/tarball/v%s' % (VERSION), keywords=['turbasen', 'nasjonalturbase', 'turistforening', 'rest-api'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: Norwegian', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], install_requires=['requests'], )
... from distutils.core import setup from os import path from turbasen import VERSION name = 'turbasen' here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name=name, ... packages=[name], version=VERSION, description='Client for Nasjonal Turbase REST API', long_description=long_description, author='Ali Kaafarani', author_email='[email protected]', url='https://github.com/Turbasen/turbasen.py', ...
c35c11fb546123d0aada37fe7d7ab1829a6fa9f0
graphenebase/transactions.py
graphenebase/transactions.py
from collections import OrderedDict from binascii import hexlify, unhexlify from calendar import timegm from datetime import datetime import json import struct import time from .account import PublicKey from .chains import known_chains from .signedtransactions import Signed_Transaction from .operations import Operation from .objects import GrapheneObject, isArgsThisClass timeformat = '%Y-%m-%dT%H:%M:%S%Z' def getBlockParams(ws): """ Auxiliary method to obtain ``ref_block_num`` and ``ref_block_prefix``. Requires a websocket connection to a witness node! """ dynBCParams = ws.get_dynamic_global_properties() ref_block_num = dynBCParams["last_irreversible_block_num"] & 0xFFFF ref_block_prefix = struct.unpack_from("<I", unhexlify(dynBCParams["head_block_id"]), 4)[0] return ref_block_num, ref_block_prefix def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeformat)
from collections import OrderedDict from binascii import hexlify, unhexlify from calendar import timegm from datetime import datetime import json import struct import time from .account import PublicKey from .chains import known_chains from .signedtransactions import Signed_Transaction from .operations import Operation from .objects import GrapheneObject, isArgsThisClass timeformat = '%Y-%m-%dT%H:%M:%S%Z' def getBlockParams(ws): """ Auxiliary method to obtain ``ref_block_num`` and ``ref_block_prefix``. Requires a websocket connection to a witness node! """ dynBCParams = ws.get_dynamic_global_properties() ref_block_num = dynBCParams["head_block_number"] & 0xFFFF ref_block_prefix = struct.unpack_from("<I", unhexlify(dynBCParams["head_block_id"]), 4)[0] return ref_block_num, ref_block_prefix def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeformat)
Revert "[TaPOS] link to the last irreversible block instead of headblock"
Revert "[TaPOS] link to the last irreversible block instead of headblock" This reverts commit 05cec8e450a09fd0d3fa2b40860760f7bff0c125.
Python
mit
xeroc/python-graphenelib
from collections import OrderedDict from binascii import hexlify, unhexlify from calendar import timegm from datetime import datetime import json import struct import time from .account import PublicKey from .chains import known_chains from .signedtransactions import Signed_Transaction from .operations import Operation from .objects import GrapheneObject, isArgsThisClass timeformat = '%Y-%m-%dT%H:%M:%S%Z' def getBlockParams(ws): """ Auxiliary method to obtain ``ref_block_num`` and ``ref_block_prefix``. Requires a websocket connection to a witness node! """ dynBCParams = ws.get_dynamic_global_properties() - ref_block_num = dynBCParams["last_irreversible_block_num"] & 0xFFFF + ref_block_num = dynBCParams["head_block_number"] & 0xFFFF ref_block_prefix = struct.unpack_from("<I", unhexlify(dynBCParams["head_block_id"]), 4)[0] return ref_block_num, ref_block_prefix def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeformat)
Revert "[TaPOS] link to the last irreversible block instead of headblock"
## Code Before: from collections import OrderedDict from binascii import hexlify, unhexlify from calendar import timegm from datetime import datetime import json import struct import time from .account import PublicKey from .chains import known_chains from .signedtransactions import Signed_Transaction from .operations import Operation from .objects import GrapheneObject, isArgsThisClass timeformat = '%Y-%m-%dT%H:%M:%S%Z' def getBlockParams(ws): """ Auxiliary method to obtain ``ref_block_num`` and ``ref_block_prefix``. Requires a websocket connection to a witness node! """ dynBCParams = ws.get_dynamic_global_properties() ref_block_num = dynBCParams["last_irreversible_block_num"] & 0xFFFF ref_block_prefix = struct.unpack_from("<I", unhexlify(dynBCParams["head_block_id"]), 4)[0] return ref_block_num, ref_block_prefix def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeformat) ## Instruction: Revert "[TaPOS] link to the last irreversible block instead of headblock" ## Code After: from collections import OrderedDict from binascii import hexlify, unhexlify from calendar import timegm from datetime import datetime import json import struct import time from .account import PublicKey from .chains import known_chains from .signedtransactions import Signed_Transaction from .operations import Operation from .objects import GrapheneObject, isArgsThisClass timeformat = '%Y-%m-%dT%H:%M:%S%Z' def getBlockParams(ws): """ Auxiliary method to obtain ``ref_block_num`` and ``ref_block_prefix``. Requires a websocket connection to a witness node! """ dynBCParams = ws.get_dynamic_global_properties() ref_block_num = dynBCParams["head_block_number"] & 0xFFFF ref_block_prefix = struct.unpack_from("<I", unhexlify(dynBCParams["head_block_id"]), 4)[0] return ref_block_num, ref_block_prefix def formatTimeFromNow(secs=0): """ Properly Format Time that is `x` seconds in the future :param int secs: Seconds to go in the future (`x>0`) or the past (`x<0`) :return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`) :rtype: str """ return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeformat)
// ... existing code ... witness node! """ dynBCParams = ws.get_dynamic_global_properties() ref_block_num = dynBCParams["head_block_number"] & 0xFFFF ref_block_prefix = struct.unpack_from("<I", unhexlify(dynBCParams["head_block_id"]), 4)[0] return ref_block_num, ref_block_prefix // ... rest of the code ...
c58e3c207ad5f534ea8a7e17cb13f6a1a1b8c714
multi_schema/admin.py
multi_schema/admin.py
from django.contrib import admin from models import Schema class SchemaAdmin(admin.ModelAdmin): pass admin.site.register(Schema, SchemaAdmin)
from django.contrib import admin, auth from models import Schema, UserSchema class SchemaAdmin(admin.ModelAdmin): def get_readonly_fields(self, request, obj=None): if obj is not None: return ('schema',) return () admin.site.register(Schema, SchemaAdmin) class SchemaInline(admin.StackedInline): model = UserSchema # Inject SchemeInline into UserAdmin UserAdmin = admin.site._registry[auth.models.User].__class__ class SchemaUserAdmin(UserAdmin): inlines = UserAdmin.inlines + [SchemaInline] admin.site.unregister(auth.models.User) admin.site.register(auth.models.User, SchemaUserAdmin)
Make 'schema' value readonly after creation. Inject SchemaUser into UserAdmin inlines.
Make 'schema' value readonly after creation. Inject SchemaUser into UserAdmin inlines.
Python
bsd-3-clause
schinckel/django-boardinghouse,schinckel/django-boardinghouse,schinckel/django-boardinghouse
- from django.contrib import admin + from django.contrib import admin, auth - from models import Schema + from models import Schema, UserSchema class SchemaAdmin(admin.ModelAdmin): - pass + def get_readonly_fields(self, request, obj=None): + if obj is not None: + return ('schema',) + return () admin.site.register(Schema, SchemaAdmin) + + class SchemaInline(admin.StackedInline): + model = UserSchema + + # Inject SchemeInline into UserAdmin + UserAdmin = admin.site._registry[auth.models.User].__class__ + + class SchemaUserAdmin(UserAdmin): + inlines = UserAdmin.inlines + [SchemaInline] + + admin.site.unregister(auth.models.User) + admin.site.register(auth.models.User, SchemaUserAdmin)
Make 'schema' value readonly after creation. Inject SchemaUser into UserAdmin inlines.
## Code Before: from django.contrib import admin from models import Schema class SchemaAdmin(admin.ModelAdmin): pass admin.site.register(Schema, SchemaAdmin) ## Instruction: Make 'schema' value readonly after creation. Inject SchemaUser into UserAdmin inlines. ## Code After: from django.contrib import admin, auth from models import Schema, UserSchema class SchemaAdmin(admin.ModelAdmin): def get_readonly_fields(self, request, obj=None): if obj is not None: return ('schema',) return () admin.site.register(Schema, SchemaAdmin) class SchemaInline(admin.StackedInline): model = UserSchema # Inject SchemeInline into UserAdmin UserAdmin = admin.site._registry[auth.models.User].__class__ class SchemaUserAdmin(UserAdmin): inlines = UserAdmin.inlines + [SchemaInline] admin.site.unregister(auth.models.User) admin.site.register(auth.models.User, SchemaUserAdmin)
... from django.contrib import admin, auth from models import Schema, UserSchema class SchemaAdmin(admin.ModelAdmin): def get_readonly_fields(self, request, obj=None): if obj is not None: return ('schema',) return () admin.site.register(Schema, SchemaAdmin) class SchemaInline(admin.StackedInline): model = UserSchema # Inject SchemeInline into UserAdmin UserAdmin = admin.site._registry[auth.models.User].__class__ class SchemaUserAdmin(UserAdmin): inlines = UserAdmin.inlines + [SchemaInline] admin.site.unregister(auth.models.User) admin.site.register(auth.models.User, SchemaUserAdmin) ...
2c6ff2b65ea291816221fe996fb282c2c4a74dd7
install_steps/create_bosh_cert.py
install_steps/create_bosh_cert.py
def do_step(context): call("mkdir -p ./bosh", shell=True) call("mkdir -p ./bosh/manifests", shell=True) # Generate the private key and certificate call("sh create_cert.sh", shell=True) call("cp bosh.key ./bosh/bosh", shell=True) with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) context.meta['settings']['SSH_CERTIFICATE'] = ssh_cert return context
from subprocess import call from os import makedirs from shutil import copy def do_step(context): makedirs("bosh/manifests") # Generate the private key and certificate call("sh create_cert.sh", shell=True) copy("bosh.key", "./bosh/bosh") with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) context.meta['settings']['SSH_CERTIFICATE'] = ssh_cert return context
Use python libs to do file operations
Use python libs to do file operations
Python
apache-2.0
cf-platform-eng/bosh-azure-template,cf-platform-eng/bosh-azure-template
+ from subprocess import call + from os import makedirs + from shutil import copy + def do_step(context): + makedirs("bosh/manifests") - call("mkdir -p ./bosh", shell=True) - call("mkdir -p ./bosh/manifests", shell=True) # Generate the private key and certificate call("sh create_cert.sh", shell=True) - call("cp bosh.key ./bosh/bosh", shell=True) + copy("bosh.key", "./bosh/bosh") with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) context.meta['settings']['SSH_CERTIFICATE'] = ssh_cert - + return context
Use python libs to do file operations
## Code Before: def do_step(context): call("mkdir -p ./bosh", shell=True) call("mkdir -p ./bosh/manifests", shell=True) # Generate the private key and certificate call("sh create_cert.sh", shell=True) call("cp bosh.key ./bosh/bosh", shell=True) with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) context.meta['settings']['SSH_CERTIFICATE'] = ssh_cert return context ## Instruction: Use python libs to do file operations ## Code After: from subprocess import call from os import makedirs from shutil import copy def do_step(context): makedirs("bosh/manifests") # Generate the private key and certificate call("sh create_cert.sh", shell=True) copy("bosh.key", "./bosh/bosh") with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) context.meta['settings']['SSH_CERTIFICATE'] = ssh_cert return context
# ... existing code ... from subprocess import call from os import makedirs from shutil import copy def do_step(context): makedirs("bosh/manifests") # Generate the private key and certificate call("sh create_cert.sh", shell=True) copy("bosh.key", "./bosh/bosh") with open ('bosh_cert.pem', 'r') as tmpfile: ssh_cert = tmpfile.read() ssh_cert = "|\n" + ssh_cert # ... modified code ... ssh_cert="\n ".join([line for line in ssh_cert.split('\n')]) context.meta['settings']['SSH_CERTIFICATE'] = ssh_cert return context # ... rest of the code ...
ad31b6f0226ecd642289f54ec649964aeb9b5799
tests/sanity_tests.py
tests/sanity_tests.py
import filecmp import os import tempfile import unittest import isomedia TESTDATA = os.path.join(os.path.dirname(__file__), 'testdata') class TestSanity(unittest.TestCase): def test_sanity(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') mp4file = open(mp4filename, 'rb') isofile = isomedia.load(mp4file) root = isofile.root moov_atom = [atom for atom in root.children if atom.type() == 'moov'] self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.') def test_lossless_write(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') infile = open(mp4filename, 'rb') isofile = isomedia.load(infile) outfile = tempfile.NamedTemporaryFile(delete=False) isofile.write(outfile) infile.close() outfile.close() self.assertTrue(filecmp.cmp(infile.name, outfile.name)) if __name__ == '__main__': unittest.main()
import filecmp import os import tempfile import unittest import isomedia TESTDATA = os.path.join(os.path.dirname(__file__), 'testdata') class TestSanity(unittest.TestCase): def test_sanity(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') mp4file = open(mp4filename, 'rb') isofile = isomedia.load(mp4file) root = isofile.root moov_atom = [atom for atom in root.children if atom.type() == 'moov'] self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.') mp4file.close() def test_lossless_write(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') infile = open(mp4filename, 'rb') isofile = isomedia.load(infile) outfile = tempfile.NamedTemporaryFile(delete=False) isofile.write(outfile) infile.close() outfile.close() self.assertTrue(filecmp.cmp(infile.name, outfile.name)) os.remove(outfile.name) if __name__ == '__main__': unittest.main()
Clean up more in tests
Clean up more in tests
Python
mit
flxf/isomedia
import filecmp import os import tempfile import unittest import isomedia TESTDATA = os.path.join(os.path.dirname(__file__), 'testdata') class TestSanity(unittest.TestCase): def test_sanity(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') mp4file = open(mp4filename, 'rb') isofile = isomedia.load(mp4file) root = isofile.root moov_atom = [atom for atom in root.children if atom.type() == 'moov'] self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.') + mp4file.close() + def test_lossless_write(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') infile = open(mp4filename, 'rb') isofile = isomedia.load(infile) outfile = tempfile.NamedTemporaryFile(delete=False) isofile.write(outfile) infile.close() outfile.close() self.assertTrue(filecmp.cmp(infile.name, outfile.name)) + os.remove(outfile.name) + if __name__ == '__main__': unittest.main()
Clean up more in tests
## Code Before: import filecmp import os import tempfile import unittest import isomedia TESTDATA = os.path.join(os.path.dirname(__file__), 'testdata') class TestSanity(unittest.TestCase): def test_sanity(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') mp4file = open(mp4filename, 'rb') isofile = isomedia.load(mp4file) root = isofile.root moov_atom = [atom for atom in root.children if atom.type() == 'moov'] self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.') def test_lossless_write(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') infile = open(mp4filename, 'rb') isofile = isomedia.load(infile) outfile = tempfile.NamedTemporaryFile(delete=False) isofile.write(outfile) infile.close() outfile.close() self.assertTrue(filecmp.cmp(infile.name, outfile.name)) if __name__ == '__main__': unittest.main() ## Instruction: Clean up more in tests ## Code After: import filecmp import os import tempfile import unittest import isomedia TESTDATA = os.path.join(os.path.dirname(__file__), 'testdata') class TestSanity(unittest.TestCase): def test_sanity(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') mp4file = open(mp4filename, 'rb') isofile = isomedia.load(mp4file) root = isofile.root moov_atom = [atom for atom in root.children if atom.type() == 'moov'] self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.') mp4file.close() def test_lossless_write(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') infile = open(mp4filename, 'rb') isofile = isomedia.load(infile) outfile = tempfile.NamedTemporaryFile(delete=False) isofile.write(outfile) infile.close() outfile.close() self.assertTrue(filecmp.cmp(infile.name, outfile.name)) os.remove(outfile.name) if __name__ == '__main__': unittest.main()
# ... existing code ... moov_atom = [atom for atom in root.children if atom.type() == 'moov'] self.assertEqual(len(moov_atom), 1, 'There should be 1 moov atom.') mp4file.close() def test_lossless_write(self): mp4filename = os.path.join(TESTDATA, 'loop_circle.mp4') infile = open(mp4filename, 'rb') # ... modified code ... self.assertTrue(filecmp.cmp(infile.name, outfile.name)) os.remove(outfile.name) if __name__ == '__main__': unittest.main() # ... rest of the code ...
d3cb08d45af60aaf06757ad230a2a33bc3615543
apps/organizations/middleware.py
apps/organizations/middleware.py
from django.http import Http404 from .models import Organization class OrganizationMiddleware(object): def process_request(self, request): try: request.organization = Organization.objects.get( slug__iexact=request.subdomain ) except Organization.DoesNotExist: raise Http404
from django.http import Http404 from .models import Organization class OrganizationMiddleware(object): def process_request(self, request): if request.subdomain is None: return try: request.organization = Organization.objects.get( slug__iexact=request.subdomain ) except Organization.DoesNotExist: raise Http404
Remove subdomain check on pages where subdomain is none
Remove subdomain check on pages where subdomain is none
Python
mit
xobb1t/ddash2013,xobb1t/ddash2013
from django.http import Http404 from .models import Organization class OrganizationMiddleware(object): def process_request(self, request): + if request.subdomain is None: + return try: request.organization = Organization.objects.get( slug__iexact=request.subdomain ) except Organization.DoesNotExist: raise Http404
Remove subdomain check on pages where subdomain is none
## Code Before: from django.http import Http404 from .models import Organization class OrganizationMiddleware(object): def process_request(self, request): try: request.organization = Organization.objects.get( slug__iexact=request.subdomain ) except Organization.DoesNotExist: raise Http404 ## Instruction: Remove subdomain check on pages where subdomain is none ## Code After: from django.http import Http404 from .models import Organization class OrganizationMiddleware(object): def process_request(self, request): if request.subdomain is None: return try: request.organization = Organization.objects.get( slug__iexact=request.subdomain ) except Organization.DoesNotExist: raise Http404
# ... existing code ... class OrganizationMiddleware(object): def process_request(self, request): if request.subdomain is None: return try: request.organization = Organization.objects.get( slug__iexact=request.subdomain # ... rest of the code ...
9530f7a65b5381fbb178dab120454dfc40a8d604
djangopypi2/apps/pypi_users/views.py
djangopypi2/apps/pypi_users/views.py
from django.contrib.auth import logout as auth_logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.generic.detail import SingleObjectMixin from django.views.generic.list import MultipleObjectMixin from django.views.generic.list import ListView from django.views.generic.detail import DetailView from django.utils.decorators import method_decorator from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect class SingleUserMixin(SingleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'user' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(Index, self).dispatch(*args, **kwargs) class MultipleUsersMixin(MultipleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'users' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(Index, self).dispatch(*args, **kwargs) class Index(MultipleUsersMixin, ListView): template_name = 'pypi_users/index.html' class UserDetails(SingleUserMixin, DetailView): template_name = 'pypi_users/user_profile.html' @login_required def logout(request): auth_logout(request) return HttpResponseRedirect('/')
from django.contrib.auth import logout as auth_logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.generic.detail import SingleObjectMixin from django.views.generic.list import MultipleObjectMixin from django.views.generic.list import ListView from django.views.generic.detail import DetailView from django.utils.decorators import method_decorator from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect class SingleUserMixin(SingleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'user' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(SingleUserMixin, self).dispatch(*args, **kwargs) class MultipleUsersMixin(MultipleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'users' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(MultipleUsersMixin, self).dispatch(*args, **kwargs) class Index(MultipleUsersMixin, ListView): template_name = 'pypi_users/index.html' class UserDetails(SingleUserMixin, DetailView): template_name = 'pypi_users/user_profile.html' @login_required def logout(request): auth_logout(request) return HttpResponseRedirect('/')
Fix bad super calls causing infinite recursion
Fix bad super calls causing infinite recursion
Python
bsd-3-clause
popen2/djangopypi2,popen2/djangopypi2,pitrho/djangopypi2,hsmade/djangopypi2,pitrho/djangopypi2,hsmade/djangopypi2
from django.contrib.auth import logout as auth_logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.generic.detail import SingleObjectMixin from django.views.generic.list import MultipleObjectMixin from django.views.generic.list import ListView from django.views.generic.detail import DetailView from django.utils.decorators import method_decorator from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect class SingleUserMixin(SingleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'user' @method_decorator(login_required) def dispatch(self, *args, **kwargs): - return super(Index, self).dispatch(*args, **kwargs) + return super(SingleUserMixin, self).dispatch(*args, **kwargs) class MultipleUsersMixin(MultipleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'users' @method_decorator(login_required) def dispatch(self, *args, **kwargs): - return super(Index, self).dispatch(*args, **kwargs) + return super(MultipleUsersMixin, self).dispatch(*args, **kwargs) class Index(MultipleUsersMixin, ListView): template_name = 'pypi_users/index.html' class UserDetails(SingleUserMixin, DetailView): template_name = 'pypi_users/user_profile.html' @login_required def logout(request): auth_logout(request) return HttpResponseRedirect('/')
Fix bad super calls causing infinite recursion
## Code Before: from django.contrib.auth import logout as auth_logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.generic.detail import SingleObjectMixin from django.views.generic.list import MultipleObjectMixin from django.views.generic.list import ListView from django.views.generic.detail import DetailView from django.utils.decorators import method_decorator from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect class SingleUserMixin(SingleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'user' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(Index, self).dispatch(*args, **kwargs) class MultipleUsersMixin(MultipleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'users' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(Index, self).dispatch(*args, **kwargs) class Index(MultipleUsersMixin, ListView): template_name = 'pypi_users/index.html' class UserDetails(SingleUserMixin, DetailView): template_name = 'pypi_users/user_profile.html' @login_required def logout(request): auth_logout(request) return HttpResponseRedirect('/') ## Instruction: Fix bad super calls causing infinite recursion ## Code After: from django.contrib.auth import logout as auth_logout from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.generic.detail import SingleObjectMixin from django.views.generic.list import MultipleObjectMixin from django.views.generic.list import ListView from django.views.generic.detail import DetailView from django.utils.decorators import method_decorator from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect class SingleUserMixin(SingleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'user' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(SingleUserMixin, self).dispatch(*args, **kwargs) class MultipleUsersMixin(MultipleObjectMixin): model = User slug_field = 'username' slug_url_kwarg = 'username' context_object_name = 'users' @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(MultipleUsersMixin, self).dispatch(*args, **kwargs) class Index(MultipleUsersMixin, ListView): template_name = 'pypi_users/index.html' class UserDetails(SingleUserMixin, DetailView): template_name = 'pypi_users/user_profile.html' @login_required def logout(request): auth_logout(request) return HttpResponseRedirect('/')
// ... existing code ... @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(SingleUserMixin, self).dispatch(*args, **kwargs) class MultipleUsersMixin(MultipleObjectMixin): model = User // ... modified code ... @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(MultipleUsersMixin, self).dispatch(*args, **kwargs) class Index(MultipleUsersMixin, ListView): template_name = 'pypi_users/index.html' // ... rest of the code ...
312c0d463940257cb1f777d3720778550b5bdb2d
bluebottle/organizations/serializers.py
bluebottle/organizations/serializers.py
from rest_framework import serializers from bluebottle.organizations.models import Organization from bluebottle.utils.serializers import URLField class OrganizationSerializer(serializers.ModelSerializer): class Meta: model = Organization fields = ('id', 'name', 'slug', 'address_line1', 'address_line2', 'city', 'state', 'country', 'postal_code', 'phone_number', 'website', 'email') class ManageOrganizationSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, allow_null=True) name = serializers.CharField(required=True) website = URLField(required=False, allow_blank=True) email = serializers.EmailField(required=False, allow_blank=True) class Meta: model = Organization fields = OrganizationSerializer.Meta.fields + ('partner_organizations', 'created', 'updated')
from rest_framework import serializers from bluebottle.organizations.models import Organization from bluebottle.utils.serializers import URLField class OrganizationSerializer(serializers.ModelSerializer): class Meta: model = Organization fields = ('id', 'name', 'slug', 'address_line1', 'address_line2', 'city', 'state', 'country', 'postal_code', 'phone_number', 'website', 'email') class ManageOrganizationSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, allow_null=True) name = serializers.CharField(required=True, allow_blank=True) website = URLField(required=False, allow_blank=True) email = serializers.EmailField(required=False, allow_blank=True) class Meta: model = Organization fields = OrganizationSerializer.Meta.fields + ('partner_organizations', 'created', 'updated')
Revert "Make the name of an organization required"
Revert "Make the name of an organization required" This reverts commit 02140561a29a2b7fe50f7bf2402da566e60be641.
Python
bsd-3-clause
jfterpstra/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,jfterpstra/bluebottle,jfterpstra/bluebottle,jfterpstra/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
from rest_framework import serializers from bluebottle.organizations.models import Organization from bluebottle.utils.serializers import URLField class OrganizationSerializer(serializers.ModelSerializer): class Meta: model = Organization fields = ('id', 'name', 'slug', 'address_line1', 'address_line2', 'city', 'state', 'country', 'postal_code', 'phone_number', 'website', 'email') class ManageOrganizationSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, allow_null=True) - name = serializers.CharField(required=True) + name = serializers.CharField(required=True, allow_blank=True) website = URLField(required=False, allow_blank=True) email = serializers.EmailField(required=False, allow_blank=True) class Meta: model = Organization fields = OrganizationSerializer.Meta.fields + ('partner_organizations', 'created', 'updated')
Revert "Make the name of an organization required"
## Code Before: from rest_framework import serializers from bluebottle.organizations.models import Organization from bluebottle.utils.serializers import URLField class OrganizationSerializer(serializers.ModelSerializer): class Meta: model = Organization fields = ('id', 'name', 'slug', 'address_line1', 'address_line2', 'city', 'state', 'country', 'postal_code', 'phone_number', 'website', 'email') class ManageOrganizationSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, allow_null=True) name = serializers.CharField(required=True) website = URLField(required=False, allow_blank=True) email = serializers.EmailField(required=False, allow_blank=True) class Meta: model = Organization fields = OrganizationSerializer.Meta.fields + ('partner_organizations', 'created', 'updated') ## Instruction: Revert "Make the name of an organization required" ## Code After: from rest_framework import serializers from bluebottle.organizations.models import Organization from bluebottle.utils.serializers import URLField class OrganizationSerializer(serializers.ModelSerializer): class Meta: model = Organization fields = ('id', 'name', 'slug', 'address_line1', 'address_line2', 'city', 'state', 'country', 'postal_code', 'phone_number', 'website', 'email') class ManageOrganizationSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, allow_null=True) name = serializers.CharField(required=True, allow_blank=True) website = URLField(required=False, allow_blank=True) email = serializers.EmailField(required=False, allow_blank=True) class Meta: model = Organization fields = OrganizationSerializer.Meta.fields + ('partner_organizations', 'created', 'updated')
// ... existing code ... class ManageOrganizationSerializer(serializers.ModelSerializer): slug = serializers.SlugField(required=False, allow_null=True) name = serializers.CharField(required=True, allow_blank=True) website = URLField(required=False, allow_blank=True) email = serializers.EmailField(required=False, allow_blank=True) // ... rest of the code ...
9a5c17781178e8c97a4749e49374c3b4449c7387
tests/test_models.py
tests/test_models.py
from unittest import TestCase import unittest.mock from molecupy.structures import Model, AtomicStructure, SmallMolecule class ModelTest(TestCase): def setUp(self): self.small_molecule1 = unittest.mock.Mock(spec=SmallMolecule) self.small_molecule2 = unittest.mock.Mock(spec=SmallMolecule) class ModelCreationTest(ModelTest): def test_can_create_chain(self): model = Model() self.assertIsInstance(model, AtomicStructure) self.assertEqual(model._atoms, set()) def test_model_repr(self): model = Model() self.assertEqual(str(model), "<Model (0 atoms)>") class ModelSmallMoleculeTests(ModelTest): def test_can_add_small_molecules(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.add_small_molecule(self.small_molecule1) self.assertEqual(model.small_molecules(), set([self.small_molecule1])) model.add_small_molecule(self.small_molecule2) self.assertEqual( model.small_molecules(), set([self.small_molecule1, self.small_molecule2]) )
from unittest import TestCase import unittest.mock from molecupy.structures import Model, AtomicStructure, SmallMolecule class ModelTest(TestCase): def setUp(self): self.small_molecule1 = unittest.mock.Mock(spec=SmallMolecule) self.small_molecule2 = unittest.mock.Mock(spec=SmallMolecule) class ModelCreationTest(ModelTest): def test_can_create_chain(self): model = Model() self.assertIsInstance(model, AtomicStructure) self.assertEqual(model._atoms, set()) def test_model_repr(self): model = Model() self.assertEqual(str(model), "<Model (0 atoms)>") class ModelSmallMoleculeTests(ModelTest): def test_can_add_small_molecules(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.add_small_molecule(self.small_molecule1) self.assertEqual(model.small_molecules(), set([self.small_molecule1])) model.add_small_molecule(self.small_molecule2) self.assertEqual( model.small_molecules(), set([self.small_molecule1, self.small_molecule2]) ) def test_must_use_method_to_add_small_molecule(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.small_molecules().add(self.small_molecule1) self.assertEqual(model.small_molecules(), set())
Make small molecules read only
Make small molecules read only
Python
mit
samirelanduk/atomium,samirelanduk/atomium,samirelanduk/molecupy
from unittest import TestCase import unittest.mock from molecupy.structures import Model, AtomicStructure, SmallMolecule class ModelTest(TestCase): def setUp(self): self.small_molecule1 = unittest.mock.Mock(spec=SmallMolecule) self.small_molecule2 = unittest.mock.Mock(spec=SmallMolecule) class ModelCreationTest(ModelTest): def test_can_create_chain(self): model = Model() self.assertIsInstance(model, AtomicStructure) self.assertEqual(model._atoms, set()) def test_model_repr(self): model = Model() self.assertEqual(str(model), "<Model (0 atoms)>") class ModelSmallMoleculeTests(ModelTest): def test_can_add_small_molecules(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.add_small_molecule(self.small_molecule1) self.assertEqual(model.small_molecules(), set([self.small_molecule1])) model.add_small_molecule(self.small_molecule2) self.assertEqual( model.small_molecules(), set([self.small_molecule1, self.small_molecule2]) ) + + def test_must_use_method_to_add_small_molecule(self): + model = Model() + self.assertEqual(model.small_molecules(), set()) + model.small_molecules().add(self.small_molecule1) + self.assertEqual(model.small_molecules(), set()) +
Make small molecules read only
## Code Before: from unittest import TestCase import unittest.mock from molecupy.structures import Model, AtomicStructure, SmallMolecule class ModelTest(TestCase): def setUp(self): self.small_molecule1 = unittest.mock.Mock(spec=SmallMolecule) self.small_molecule2 = unittest.mock.Mock(spec=SmallMolecule) class ModelCreationTest(ModelTest): def test_can_create_chain(self): model = Model() self.assertIsInstance(model, AtomicStructure) self.assertEqual(model._atoms, set()) def test_model_repr(self): model = Model() self.assertEqual(str(model), "<Model (0 atoms)>") class ModelSmallMoleculeTests(ModelTest): def test_can_add_small_molecules(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.add_small_molecule(self.small_molecule1) self.assertEqual(model.small_molecules(), set([self.small_molecule1])) model.add_small_molecule(self.small_molecule2) self.assertEqual( model.small_molecules(), set([self.small_molecule1, self.small_molecule2]) ) ## Instruction: Make small molecules read only ## Code After: from unittest import TestCase import unittest.mock from molecupy.structures import Model, AtomicStructure, SmallMolecule class ModelTest(TestCase): def setUp(self): self.small_molecule1 = unittest.mock.Mock(spec=SmallMolecule) self.small_molecule2 = unittest.mock.Mock(spec=SmallMolecule) class ModelCreationTest(ModelTest): def test_can_create_chain(self): model = Model() self.assertIsInstance(model, AtomicStructure) self.assertEqual(model._atoms, set()) def test_model_repr(self): model = Model() self.assertEqual(str(model), "<Model (0 atoms)>") class ModelSmallMoleculeTests(ModelTest): def test_can_add_small_molecules(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.add_small_molecule(self.small_molecule1) self.assertEqual(model.small_molecules(), set([self.small_molecule1])) model.add_small_molecule(self.small_molecule2) self.assertEqual( model.small_molecules(), set([self.small_molecule1, self.small_molecule2]) ) def test_must_use_method_to_add_small_molecule(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.small_molecules().add(self.small_molecule1) self.assertEqual(model.small_molecules(), set())
// ... existing code ... model.small_molecules(), set([self.small_molecule1, self.small_molecule2]) ) def test_must_use_method_to_add_small_molecule(self): model = Model() self.assertEqual(model.small_molecules(), set()) model.small_molecules().add(self.small_molecule1) self.assertEqual(model.small_molecules(), set()) // ... rest of the code ...
e04123ce0b368b19015270b1fc1bd1f706c765cf
pwm/_compat.py
pwm/_compat.py
# pylint: disable=unused-import import sys PY2 = sys.version_info[0] == 2 if PY2: # pragma: no cover from ConfigParser import RawConfigParser from httplib import HTTPConnection input = raw_input def ord_byte(char): ''' convert a single character into integer representation ''' return ord(char) else: # pragma: no cover from configparser import RawConfigParser from http.client import HTTPConnection def ord_byte(byte): ''' convert a single byte into integer representation ''' return byte
# pylint: disable=unused-import import sys PY2 = sys.version_info[0] == 2 if PY2: # pragma: no cover from ConfigParser import RawConfigParser from httplib import HTTPConnection input = raw_input def ord_byte(char): ''' convert a single character into integer representation ''' return ord(char) else: # pragma: no cover from configparser import RawConfigParser from http.client import HTTPConnection input = input def ord_byte(byte): ''' convert a single byte into integer representation ''' return byte
Fix python 3 input compatiblity
Fix python 3 input compatiblity
Python
mit
thusoy/pwm,thusoy/pwm
# pylint: disable=unused-import import sys PY2 = sys.version_info[0] == 2 if PY2: # pragma: no cover from ConfigParser import RawConfigParser from httplib import HTTPConnection input = raw_input def ord_byte(char): ''' convert a single character into integer representation ''' return ord(char) else: # pragma: no cover from configparser import RawConfigParser from http.client import HTTPConnection + input = input def ord_byte(byte): ''' convert a single byte into integer representation ''' return byte
Fix python 3 input compatiblity
## Code Before: # pylint: disable=unused-import import sys PY2 = sys.version_info[0] == 2 if PY2: # pragma: no cover from ConfigParser import RawConfigParser from httplib import HTTPConnection input = raw_input def ord_byte(char): ''' convert a single character into integer representation ''' return ord(char) else: # pragma: no cover from configparser import RawConfigParser from http.client import HTTPConnection def ord_byte(byte): ''' convert a single byte into integer representation ''' return byte ## Instruction: Fix python 3 input compatiblity ## Code After: # pylint: disable=unused-import import sys PY2 = sys.version_info[0] == 2 if PY2: # pragma: no cover from ConfigParser import RawConfigParser from httplib import HTTPConnection input = raw_input def ord_byte(char): ''' convert a single character into integer representation ''' return ord(char) else: # pragma: no cover from configparser import RawConfigParser from http.client import HTTPConnection input = input def ord_byte(byte): ''' convert a single byte into integer representation ''' return byte
... else: # pragma: no cover from configparser import RawConfigParser from http.client import HTTPConnection input = input def ord_byte(byte): ''' convert a single byte into integer representation ''' return byte ...
a0172116503f0b212a184fc4a1d2179115675e17
fuzzyfinder/main.py
fuzzyfinder/main.py
import re from . import export @export def fuzzyfinder(text, collection): """ Args: text (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the input `text`. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collections` using the `text` input. """ suggestions = [] pat = '.*?'.join(map(re.escape, text)) regex = re.compile('%s' % pat) for item in sorted(collection): r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) return (z for _, _, z in sorted(suggestions))
import re from . import export @export def fuzzyfinder(text, collection): """ Args: text (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the input `text`. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collections` using the `text` input. """ suggestions = [] pat = '.*?'.join(map(re.escape, text)) regex = re.compile(pat) for item in collection: r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) return (z for _, _, z in sorted(suggestions))
Remove string interpolation and sorting the collection.
Remove string interpolation and sorting the collection.
Python
bsd-3-clause
adammenges/fuzzyfinder,amjith/fuzzyfinder,harrisonfeng/fuzzyfinder
import re from . import export @export def fuzzyfinder(text, collection): """ Args: text (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the input `text`. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collections` using the `text` input. """ suggestions = [] pat = '.*?'.join(map(re.escape, text)) - regex = re.compile('%s' % pat) + regex = re.compile(pat) - for item in sorted(collection): + for item in collection: r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) return (z for _, _, z in sorted(suggestions))
Remove string interpolation and sorting the collection.
## Code Before: import re from . import export @export def fuzzyfinder(text, collection): """ Args: text (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the input `text`. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collections` using the `text` input. """ suggestions = [] pat = '.*?'.join(map(re.escape, text)) regex = re.compile('%s' % pat) for item in sorted(collection): r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) return (z for _, _, z in sorted(suggestions)) ## Instruction: Remove string interpolation and sorting the collection. ## Code After: import re from . import export @export def fuzzyfinder(text, collection): """ Args: text (str): A partial string which is typically entered by a user. collection (iterable): A collection of strings which will be filtered based on the input `text`. Returns: suggestions (generator): A generator object that produces a list of suggestions narrowed down from `collections` using the `text` input. """ suggestions = [] pat = '.*?'.join(map(re.escape, text)) regex = re.compile(pat) for item in collection: r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) return (z for _, _, z in sorted(suggestions))
... """ suggestions = [] pat = '.*?'.join(map(re.escape, text)) regex = re.compile(pat) for item in collection: r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) ...
7c85e2b278667e7340c7c6bf57c3c0c91210c471
coolfig/__init__.py
coolfig/__init__.py
from .schema import Value, Settings __version__ = '0.2.0' __url__ = 'https://github.com/GaretJax/coolfig' __all__ = ['Value', 'Settings']
from .schema import Value, Settings from .providers import EnvConfig, DictConfig from .django import load_django_settings __version__ = '0.2.0' __url__ = 'https://github.com/GaretJax/coolfig' __all__ = ['Value', 'Settings', 'EnvConfig', 'DictConfig', 'load_django_settings']
Add some more importing shortcuts
Add some more importing shortcuts
Python
mit
GaretJax/coolfig
from .schema import Value, Settings + from .providers import EnvConfig, DictConfig + from .django import load_django_settings __version__ = '0.2.0' __url__ = 'https://github.com/GaretJax/coolfig' - __all__ = ['Value', 'Settings'] + __all__ = ['Value', 'Settings', 'EnvConfig', 'DictConfig', + 'load_django_settings']
Add some more importing shortcuts
## Code Before: from .schema import Value, Settings __version__ = '0.2.0' __url__ = 'https://github.com/GaretJax/coolfig' __all__ = ['Value', 'Settings'] ## Instruction: Add some more importing shortcuts ## Code After: from .schema import Value, Settings from .providers import EnvConfig, DictConfig from .django import load_django_settings __version__ = '0.2.0' __url__ = 'https://github.com/GaretJax/coolfig' __all__ = ['Value', 'Settings', 'EnvConfig', 'DictConfig', 'load_django_settings']
// ... existing code ... from .schema import Value, Settings from .providers import EnvConfig, DictConfig from .django import load_django_settings __version__ = '0.2.0' __url__ = 'https://github.com/GaretJax/coolfig' __all__ = ['Value', 'Settings', 'EnvConfig', 'DictConfig', 'load_django_settings'] // ... rest of the code ...
0b8cc130f00b51b18e55805f82ba661fdf66fba6
saml2idp/saml2idp_metadata.py
saml2idp/saml2idp_metadata.py
from django.conf import settings from django.core.exceptions import ImproperlyConfigured CERTIFICATE_DATA = 'certificate_data' CERTIFICATE_FILENAME = 'certificate_file' PRIVATE_KEY_DATA = 'private_key_data' PRIVATE_KEY_FILENAME = 'private_key_file' def check_configuration_contains(config, keys): available_keys = set(keys).intersection(set(config.keys())) if not available_keys: raise ImproperlyConfigured( 'one of the followin keys is required but none was ' 'specified: {}'.format(keys)) if len(available_keys) > 1: raise ImproperlyConfigured( 'found conflicting configuration: {}. Only one key can be used at' 'a time.'.format(available_keys)) def validate_configuration(config): check_configuration_contains(config=config, keys=[PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME]) check_configuration_contains(config=config, keys=[CERTIFICATE_DATA, CERTIFICATE_FILENAME]) try: SAML2IDP_CONFIG = settings.SAML2IDP_CONFIG except: raise ImproperlyConfigured('SAML2IDP_CONFIG setting is missing.') else: validate_configuration(SAML2IDP_CONFIG) try: SAML2IDP_REMOTES = settings.SAML2IDP_REMOTES except: raise ImproperlyConfigured('SAML2IDP_REMOTES setting is missing.')
from django.conf import settings from django.core.exceptions import ImproperlyConfigured CERTIFICATE_DATA = 'certificate_data' CERTIFICATE_FILENAME = 'certificate_file' PRIVATE_KEY_DATA = 'private_key_data' PRIVATE_KEY_FILENAME = 'private_key_file' def check_configuration_contains(config, keys): available_keys = frozenset(keys).intersection(frozenset(config.keys())) if not available_keys: raise ImproperlyConfigured( 'one of the following keys is required but none was ' 'specified: {}'.format(keys)) if len(available_keys) > 1: raise ImproperlyConfigured( 'found conflicting configuration: {}. Only one key can be used at' 'a time.'.format(available_keys)) def validate_configuration(config): check_configuration_contains(config=config, keys=(PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME)) check_configuration_contains(config=config, keys=(CERTIFICATE_DATA, CERTIFICATE_FILENAME)) try: SAML2IDP_CONFIG = settings.SAML2IDP_CONFIG except: raise ImproperlyConfigured('SAML2IDP_CONFIG setting is missing.') else: validate_configuration(SAML2IDP_CONFIG) try: SAML2IDP_REMOTES = settings.SAML2IDP_REMOTES except: raise ImproperlyConfigured('SAML2IDP_REMOTES setting is missing.')
Implement suggested changes in PR review
Implement suggested changes in PR review
Python
mit
mobify/dj-saml-idp,mobify/dj-saml-idp,mobify/dj-saml-idp
from django.conf import settings from django.core.exceptions import ImproperlyConfigured CERTIFICATE_DATA = 'certificate_data' CERTIFICATE_FILENAME = 'certificate_file' PRIVATE_KEY_DATA = 'private_key_data' PRIVATE_KEY_FILENAME = 'private_key_file' def check_configuration_contains(config, keys): - available_keys = set(keys).intersection(set(config.keys())) + available_keys = frozenset(keys).intersection(frozenset(config.keys())) if not available_keys: raise ImproperlyConfigured( - 'one of the followin keys is required but none was ' + 'one of the following keys is required but none was ' 'specified: {}'.format(keys)) if len(available_keys) > 1: raise ImproperlyConfigured( 'found conflicting configuration: {}. Only one key can be used at' 'a time.'.format(available_keys)) def validate_configuration(config): check_configuration_contains(config=config, - keys=[PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME]) + keys=(PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME)) check_configuration_contains(config=config, - keys=[CERTIFICATE_DATA, CERTIFICATE_FILENAME]) + keys=(CERTIFICATE_DATA, CERTIFICATE_FILENAME)) try: SAML2IDP_CONFIG = settings.SAML2IDP_CONFIG except: raise ImproperlyConfigured('SAML2IDP_CONFIG setting is missing.') else: validate_configuration(SAML2IDP_CONFIG) try: SAML2IDP_REMOTES = settings.SAML2IDP_REMOTES except: raise ImproperlyConfigured('SAML2IDP_REMOTES setting is missing.')
Implement suggested changes in PR review
## Code Before: from django.conf import settings from django.core.exceptions import ImproperlyConfigured CERTIFICATE_DATA = 'certificate_data' CERTIFICATE_FILENAME = 'certificate_file' PRIVATE_KEY_DATA = 'private_key_data' PRIVATE_KEY_FILENAME = 'private_key_file' def check_configuration_contains(config, keys): available_keys = set(keys).intersection(set(config.keys())) if not available_keys: raise ImproperlyConfigured( 'one of the followin keys is required but none was ' 'specified: {}'.format(keys)) if len(available_keys) > 1: raise ImproperlyConfigured( 'found conflicting configuration: {}. Only one key can be used at' 'a time.'.format(available_keys)) def validate_configuration(config): check_configuration_contains(config=config, keys=[PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME]) check_configuration_contains(config=config, keys=[CERTIFICATE_DATA, CERTIFICATE_FILENAME]) try: SAML2IDP_CONFIG = settings.SAML2IDP_CONFIG except: raise ImproperlyConfigured('SAML2IDP_CONFIG setting is missing.') else: validate_configuration(SAML2IDP_CONFIG) try: SAML2IDP_REMOTES = settings.SAML2IDP_REMOTES except: raise ImproperlyConfigured('SAML2IDP_REMOTES setting is missing.') ## Instruction: Implement suggested changes in PR review ## Code After: from django.conf import settings from django.core.exceptions import ImproperlyConfigured CERTIFICATE_DATA = 'certificate_data' CERTIFICATE_FILENAME = 'certificate_file' PRIVATE_KEY_DATA = 'private_key_data' PRIVATE_KEY_FILENAME = 'private_key_file' def check_configuration_contains(config, keys): available_keys = frozenset(keys).intersection(frozenset(config.keys())) if not available_keys: raise ImproperlyConfigured( 'one of the following keys is required but none was ' 'specified: {}'.format(keys)) if len(available_keys) > 1: raise ImproperlyConfigured( 'found conflicting configuration: {}. Only one key can be used at' 'a time.'.format(available_keys)) def validate_configuration(config): check_configuration_contains(config=config, keys=(PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME)) check_configuration_contains(config=config, keys=(CERTIFICATE_DATA, CERTIFICATE_FILENAME)) try: SAML2IDP_CONFIG = settings.SAML2IDP_CONFIG except: raise ImproperlyConfigured('SAML2IDP_CONFIG setting is missing.') else: validate_configuration(SAML2IDP_CONFIG) try: SAML2IDP_REMOTES = settings.SAML2IDP_REMOTES except: raise ImproperlyConfigured('SAML2IDP_REMOTES setting is missing.')
... def check_configuration_contains(config, keys): available_keys = frozenset(keys).intersection(frozenset(config.keys())) if not available_keys: raise ImproperlyConfigured( 'one of the following keys is required but none was ' 'specified: {}'.format(keys)) if len(available_keys) > 1: ... def validate_configuration(config): check_configuration_contains(config=config, keys=(PRIVATE_KEY_DATA, PRIVATE_KEY_FILENAME)) check_configuration_contains(config=config, keys=(CERTIFICATE_DATA, CERTIFICATE_FILENAME)) try: ...
d9349d617a504381d65f412930126de8e7548300
setup/create_divisions.py
setup/create_divisions.py
import os import traceback from db.common import session_scope from db.team import Team from db.division import Division def create_divisions(div_src_file=None): if not div_src_file: div_src_file = os.path.join( os.path.dirname(__file__), 'nhl_divisions_config.txt') lines = [l.strip() for l in open(div_src_file).readlines()] with session_scope() as session: try: for line in lines: if line.startswith("#"): continue division_name, season, teams, conference = line.split(";") season = int(season) team_abbrs = teams[1:-1].split(',') teams = list() for t in team_abbrs: team = Team.find(t) teams.append(team) else: if conference: division = Division( division_name, season, teams, conference) else: division = Division( division_name, season, teams) session.add(division) print(division) session.commit() except: session.rollback() traceback.print_exc()
import os import traceback from db.common import session_scope from db.team import Team from db.division import Division def create_divisions(div_src_file=None): if not div_src_file: div_src_file = os.path.join( os.path.dirname(__file__), 'nhl_divisions_config.txt') lines = [l.strip() for l in open(div_src_file).readlines()] with session_scope() as session: session.query(Division).delete(synchronize_session=False) try: for line in lines: if line.startswith("#"): continue division_name, season, teams, conference = line.split(";") season = int(season) team_abbrs = teams[1:-1].split(',') teams = list() for t in team_abbrs: team = Team.find(t) teams.append(team) else: if conference: division = Division( division_name, season, teams, conference) else: division = Division( division_name, season, teams) session.add(division) print(division) session.commit() except Exception as e: session.rollback() traceback.print_exc()
Truncate target table before creating division items
Truncate target table before creating division items
Python
mit
leaffan/pynhldb
import os import traceback from db.common import session_scope from db.team import Team from db.division import Division def create_divisions(div_src_file=None): if not div_src_file: div_src_file = os.path.join( os.path.dirname(__file__), 'nhl_divisions_config.txt') lines = [l.strip() for l in open(div_src_file).readlines()] with session_scope() as session: + + session.query(Division).delete(synchronize_session=False) + try: for line in lines: if line.startswith("#"): continue division_name, season, teams, conference = line.split(";") season = int(season) team_abbrs = teams[1:-1].split(',') teams = list() for t in team_abbrs: team = Team.find(t) teams.append(team) else: if conference: division = Division( division_name, season, teams, conference) else: division = Division( division_name, season, teams) session.add(division) print(division) session.commit() - except: + except Exception as e: session.rollback() traceback.print_exc()
Truncate target table before creating division items
## Code Before: import os import traceback from db.common import session_scope from db.team import Team from db.division import Division def create_divisions(div_src_file=None): if not div_src_file: div_src_file = os.path.join( os.path.dirname(__file__), 'nhl_divisions_config.txt') lines = [l.strip() for l in open(div_src_file).readlines()] with session_scope() as session: try: for line in lines: if line.startswith("#"): continue division_name, season, teams, conference = line.split(";") season = int(season) team_abbrs = teams[1:-1].split(',') teams = list() for t in team_abbrs: team = Team.find(t) teams.append(team) else: if conference: division = Division( division_name, season, teams, conference) else: division = Division( division_name, season, teams) session.add(division) print(division) session.commit() except: session.rollback() traceback.print_exc() ## Instruction: Truncate target table before creating division items ## Code After: import os import traceback from db.common import session_scope from db.team import Team from db.division import Division def create_divisions(div_src_file=None): if not div_src_file: div_src_file = os.path.join( os.path.dirname(__file__), 'nhl_divisions_config.txt') lines = [l.strip() for l in open(div_src_file).readlines()] with session_scope() as session: session.query(Division).delete(synchronize_session=False) try: for line in lines: if line.startswith("#"): continue division_name, season, teams, conference = line.split(";") season = int(season) team_abbrs = teams[1:-1].split(',') teams = list() for t in team_abbrs: team = Team.find(t) teams.append(team) else: if conference: division = Division( division_name, season, teams, conference) else: division = Division( division_name, season, teams) session.add(division) print(division) session.commit() except Exception as e: session.rollback() traceback.print_exc()
# ... existing code ... lines = [l.strip() for l in open(div_src_file).readlines()] with session_scope() as session: session.query(Division).delete(synchronize_session=False) try: for line in lines: if line.startswith("#"): # ... modified code ... session.commit() except Exception as e: session.rollback() traceback.print_exc() # ... rest of the code ...
bfbc156d9efca37c35d18481c4366d3e6deed1ba
slave/skia_slave_scripts/chromeos_run_bench.py
slave/skia_slave_scripts/chromeos_run_bench.py
""" Run the Skia bench executable. """ from build_step import BuildStep, BuildStepWarning from chromeos_build_step import ChromeOSBuildStep from run_bench import RunBench import sys class ChromeOSRunBench(ChromeOSBuildStep, RunBench): def _Run(self): # TODO(borenet): Re-enable this step once the crash is fixed. # RunBench._Run(self) raise BuildStepWarning('Skipping bench on ChromeOS until crash is fixed.') if '__main__' == __name__: sys.exit(BuildStep.RunBuildStep(ChromeOSRunBench))
""" Run the Skia bench executable. """ from build_step import BuildStep from chromeos_build_step import ChromeOSBuildStep from run_bench import RunBench import sys class ChromeOSRunBench(ChromeOSBuildStep, RunBench): pass if '__main__' == __name__: sys.exit(BuildStep.RunBuildStep(ChromeOSRunBench))
Stop skipping Bench on ChromeOS
Stop skipping Bench on ChromeOS (RunBuilders:Skia_ChromeOS_Alex_Debug_32) Unreviewed. git-svn-id: 32fc27f4dcfb6c0385cd9719852b95fe6680452d@8094 2bbb7eff-a529-9590-31e7-b0007b416f81
Python
bsd-3-clause
Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,Tiger66639/skia-buildbot,google/skia-buildbot,google/skia-buildbot
""" Run the Skia bench executable. """ - from build_step import BuildStep, BuildStepWarning + from build_step import BuildStep from chromeos_build_step import ChromeOSBuildStep from run_bench import RunBench import sys class ChromeOSRunBench(ChromeOSBuildStep, RunBench): + pass - def _Run(self): - # TODO(borenet): Re-enable this step once the crash is fixed. - # RunBench._Run(self) - raise BuildStepWarning('Skipping bench on ChromeOS until crash is fixed.') if '__main__' == __name__: sys.exit(BuildStep.RunBuildStep(ChromeOSRunBench))
Stop skipping Bench on ChromeOS
## Code Before: """ Run the Skia bench executable. """ from build_step import BuildStep, BuildStepWarning from chromeos_build_step import ChromeOSBuildStep from run_bench import RunBench import sys class ChromeOSRunBench(ChromeOSBuildStep, RunBench): def _Run(self): # TODO(borenet): Re-enable this step once the crash is fixed. # RunBench._Run(self) raise BuildStepWarning('Skipping bench on ChromeOS until crash is fixed.') if '__main__' == __name__: sys.exit(BuildStep.RunBuildStep(ChromeOSRunBench)) ## Instruction: Stop skipping Bench on ChromeOS ## Code After: """ Run the Skia bench executable. """ from build_step import BuildStep from chromeos_build_step import ChromeOSBuildStep from run_bench import RunBench import sys class ChromeOSRunBench(ChromeOSBuildStep, RunBench): pass if '__main__' == __name__: sys.exit(BuildStep.RunBuildStep(ChromeOSRunBench))
// ... existing code ... """ Run the Skia bench executable. """ from build_step import BuildStep from chromeos_build_step import ChromeOSBuildStep from run_bench import RunBench import sys // ... modified code ... class ChromeOSRunBench(ChromeOSBuildStep, RunBench): pass if '__main__' == __name__: // ... rest of the code ...
a18a19345298c43400dbfb984f97e97b3d0b624a
pyelasticsearch/__init__.py
pyelasticsearch/__init__.py
from __future__ import absolute_import from pyelasticsearch.client import ElasticSearch from pyelasticsearch.exceptions import (Timeout, ConnectionError, ElasticHttpError, InvalidJsonResponseError, ElasticHttpNotFoundError, IndexAlreadyExistsError) __author__ = 'Robert Eanes' __all__ = ['ElasticSearch', 'ElasticHttpError', 'InvalidJsonResponseError', 'Timeout', 'ConnectionError', 'ElasticHttpNotFoundError', 'IndexAlreadyExistsError'] __version__ = '0.6.1' __version_info__ = tuple(__version__.split('.')) get_version = lambda: __version_info__
from __future__ import absolute_import from pyelasticsearch.client import ElasticSearch from pyelasticsearch.exceptions import (Timeout, ConnectionError, ElasticHttpError, InvalidJsonResponseError, ElasticHttpNotFoundError, IndexAlreadyExistsError) __author__ = 'Erik Rose' __all__ = ['ElasticSearch', 'ElasticHttpError', 'InvalidJsonResponseError', 'Timeout', 'ConnectionError', 'ElasticHttpNotFoundError', 'IndexAlreadyExistsError'] __version__ = '0.7' __version_info__ = tuple(__version__.split('.')) get_version = lambda: __version_info__
Change author and bump version.
Change author and bump version.
Python
bsd-3-clause
erikrose/pyelasticsearch
from __future__ import absolute_import from pyelasticsearch.client import ElasticSearch from pyelasticsearch.exceptions import (Timeout, ConnectionError, ElasticHttpError, InvalidJsonResponseError, ElasticHttpNotFoundError, IndexAlreadyExistsError) - __author__ = 'Robert Eanes' + __author__ = 'Erik Rose' __all__ = ['ElasticSearch', 'ElasticHttpError', 'InvalidJsonResponseError', 'Timeout', 'ConnectionError', 'ElasticHttpNotFoundError', 'IndexAlreadyExistsError'] - __version__ = '0.6.1' + __version__ = '0.7' __version_info__ = tuple(__version__.split('.')) get_version = lambda: __version_info__
Change author and bump version.
## Code Before: from __future__ import absolute_import from pyelasticsearch.client import ElasticSearch from pyelasticsearch.exceptions import (Timeout, ConnectionError, ElasticHttpError, InvalidJsonResponseError, ElasticHttpNotFoundError, IndexAlreadyExistsError) __author__ = 'Robert Eanes' __all__ = ['ElasticSearch', 'ElasticHttpError', 'InvalidJsonResponseError', 'Timeout', 'ConnectionError', 'ElasticHttpNotFoundError', 'IndexAlreadyExistsError'] __version__ = '0.6.1' __version_info__ = tuple(__version__.split('.')) get_version = lambda: __version_info__ ## Instruction: Change author and bump version. ## Code After: from __future__ import absolute_import from pyelasticsearch.client import ElasticSearch from pyelasticsearch.exceptions import (Timeout, ConnectionError, ElasticHttpError, InvalidJsonResponseError, ElasticHttpNotFoundError, IndexAlreadyExistsError) __author__ = 'Erik Rose' __all__ = ['ElasticSearch', 'ElasticHttpError', 'InvalidJsonResponseError', 'Timeout', 'ConnectionError', 'ElasticHttpNotFoundError', 'IndexAlreadyExistsError'] __version__ = '0.7' __version_info__ = tuple(__version__.split('.')) get_version = lambda: __version_info__
... ElasticHttpNotFoundError, IndexAlreadyExistsError) __author__ = 'Erik Rose' __all__ = ['ElasticSearch', 'ElasticHttpError', 'InvalidJsonResponseError', 'Timeout', 'ConnectionError', 'ElasticHttpNotFoundError', 'IndexAlreadyExistsError'] __version__ = '0.7' __version_info__ = tuple(__version__.split('.')) get_version = lambda: __version_info__ ...
0cd084550fc5c1315fe33fcb00e57c1c332be6ab
indra/tests/test_mesh.py
indra/tests/test_mesh.py
from indra.databases import mesh_client def test_mesh_id_lookup(): mesh_id = 'D003094' mesh_name = mesh_client.get_mesh_name(mesh_id) assert mesh_name == 'Collagen'
from indra.databases import mesh_client def test_mesh_id_lookup(): mesh_id = 'D003094' mesh_name = mesh_client.get_mesh_name(mesh_id) assert mesh_name == 'Collagen' def test_invalid_id(): mesh_name = mesh_client.get_mesh_name('34jkgfh') assert mesh_name is None
Add test for invalid MESH ID
Add test for invalid MESH ID
Python
bsd-2-clause
pvtodorov/indra,johnbachman/indra,pvtodorov/indra,johnbachman/indra,bgyori/indra,pvtodorov/indra,sorgerlab/belpy,johnbachman/belpy,sorgerlab/indra,sorgerlab/indra,bgyori/indra,pvtodorov/indra,sorgerlab/belpy,sorgerlab/belpy,sorgerlab/indra,bgyori/indra,johnbachman/indra,johnbachman/belpy,johnbachman/belpy
from indra.databases import mesh_client def test_mesh_id_lookup(): mesh_id = 'D003094' mesh_name = mesh_client.get_mesh_name(mesh_id) assert mesh_name == 'Collagen' + def test_invalid_id(): + mesh_name = mesh_client.get_mesh_name('34jkgfh') + assert mesh_name is None + +
Add test for invalid MESH ID
## Code Before: from indra.databases import mesh_client def test_mesh_id_lookup(): mesh_id = 'D003094' mesh_name = mesh_client.get_mesh_name(mesh_id) assert mesh_name == 'Collagen' ## Instruction: Add test for invalid MESH ID ## Code After: from indra.databases import mesh_client def test_mesh_id_lookup(): mesh_id = 'D003094' mesh_name = mesh_client.get_mesh_name(mesh_id) assert mesh_name == 'Collagen' def test_invalid_id(): mesh_name = mesh_client.get_mesh_name('34jkgfh') assert mesh_name is None
# ... existing code ... mesh_id = 'D003094' mesh_name = mesh_client.get_mesh_name(mesh_id) assert mesh_name == 'Collagen' def test_invalid_id(): mesh_name = mesh_client.get_mesh_name('34jkgfh') assert mesh_name is None # ... rest of the code ...
4e0e29199ce01c7ac8f71af78013911da11a8dc0
LandPortalEntities/lpentities/interval.py
LandPortalEntities/lpentities/interval.py
''' Created on 02/02/2014 @author: Miguel Otero ''' from .time import Time class Interval(Time): ''' classdocs ''' MONTHLY = "http://purl.org/linked-data/sdmx/2009/code#freq-M" YEARLY = "http://purl.org/linked-data/sdmx/2009/code#freq-A" def __init__(self, frequency = YEARLY, start_time=None, end_time=None): ''' Constructor ''' self.frequency = frequency self.start_time = start_time self.end_time = end_time def get_time_string(self): return str(self.start_time) + '-' + str(self.end_time)
''' Created on 02/02/2014 @author: Miguel Otero ''' from .time import Time class Interval(Time): ''' classdocs ''' MONTHLY = "freq-M" YEARLY = "freq-A" def __init__(self, frequency=YEARLY, start_time=None, end_time=None): ''' Constructor ''' self.frequency = frequency self.start_time = start_time self.end_time = end_time def get_time_string(self): return str(self.start_time) + '-' + str(self.end_time)
Remove ontology reference in Interval frequency value
Remove ontology reference in Interval frequency value
Python
mit
weso/landportal-importers,landportal/landbook-importers,landportal/landbook-importers
''' Created on 02/02/2014 @author: Miguel Otero ''' from .time import Time class Interval(Time): ''' classdocs ''' - MONTHLY = "http://purl.org/linked-data/sdmx/2009/code#freq-M" - YEARLY = "http://purl.org/linked-data/sdmx/2009/code#freq-A" + MONTHLY = "freq-M" + YEARLY = "freq-A" - def __init__(self, frequency = YEARLY, start_time=None, end_time=None): + def __init__(self, frequency=YEARLY, start_time=None, end_time=None): ''' Constructor ''' self.frequency = frequency self.start_time = start_time self.end_time = end_time def get_time_string(self): return str(self.start_time) + '-' + str(self.end_time)
Remove ontology reference in Interval frequency value
## Code Before: ''' Created on 02/02/2014 @author: Miguel Otero ''' from .time import Time class Interval(Time): ''' classdocs ''' MONTHLY = "http://purl.org/linked-data/sdmx/2009/code#freq-M" YEARLY = "http://purl.org/linked-data/sdmx/2009/code#freq-A" def __init__(self, frequency = YEARLY, start_time=None, end_time=None): ''' Constructor ''' self.frequency = frequency self.start_time = start_time self.end_time = end_time def get_time_string(self): return str(self.start_time) + '-' + str(self.end_time) ## Instruction: Remove ontology reference in Interval frequency value ## Code After: ''' Created on 02/02/2014 @author: Miguel Otero ''' from .time import Time class Interval(Time): ''' classdocs ''' MONTHLY = "freq-M" YEARLY = "freq-A" def __init__(self, frequency=YEARLY, start_time=None, end_time=None): ''' Constructor ''' self.frequency = frequency self.start_time = start_time self.end_time = end_time def get_time_string(self): return str(self.start_time) + '-' + str(self.end_time)
// ... existing code ... ''' classdocs ''' MONTHLY = "freq-M" YEARLY = "freq-A" def __init__(self, frequency=YEARLY, start_time=None, end_time=None): ''' Constructor ''' // ... rest of the code ...
a1d8a81bbd25404b1109688bded9bea923ba6771
formly/tests/urls.py
formly/tests/urls.py
from django.conf.urls import include, url urlpatterns = [ url(r"^", include("formly.urls", namespace="formly")), ]
from django.conf.urls import include, url from django.views.generic import TemplateView urlpatterns = [ url(r"^home/", TemplateView.as_view(template_name="no-ie.html"), name="home"), url(r"^", include("formly.urls", namespace="formly")), ]
Add "home" url for testing
Add "home" url for testing
Python
bsd-3-clause
eldarion/formly,eldarion/formly
from django.conf.urls import include, url + from django.views.generic import TemplateView urlpatterns = [ + url(r"^home/", TemplateView.as_view(template_name="no-ie.html"), name="home"), url(r"^", include("formly.urls", namespace="formly")), ]
Add "home" url for testing
## Code Before: from django.conf.urls import include, url urlpatterns = [ url(r"^", include("formly.urls", namespace="formly")), ] ## Instruction: Add "home" url for testing ## Code After: from django.conf.urls import include, url from django.views.generic import TemplateView urlpatterns = [ url(r"^home/", TemplateView.as_view(template_name="no-ie.html"), name="home"), url(r"^", include("formly.urls", namespace="formly")), ]
// ... existing code ... from django.conf.urls import include, url from django.views.generic import TemplateView urlpatterns = [ url(r"^home/", TemplateView.as_view(template_name="no-ie.html"), name="home"), url(r"^", include("formly.urls", namespace="formly")), ] // ... rest of the code ...
a2762922b98c3733f103a631cd3ef346ab5bb54f
examples/pax_mininet_node.py
examples/pax_mininet_node.py
from mininet.node import Node from mininet.log import info, warn class PaxNode( Node ): "PaxNode: A node which allows Pax to behave as the sole packet hander on that node." def __init__(self, name, **params): super(PaxNode, self).__init__(name, **params) def config(self, **params): super(PaxNode, self).config(**params) # Setup iptable rules to drop incoming packets on each interface: # Because Pax only sniffs packets (it doesn't steal them), we need to drop the packets # to prevent the OS from handling them and responding. print "Drop all incoming TCP traffic on nat0 so that Pax is effectively the middle-man" for intf in self.intfList(): self.cmd("iptables -A INPUT -p tcp -i %s -j DROP" % intf.name) # Disable ip_forward because otherwise this still happens, even with the above iptables rules self.cmd("sysctl -w net.ipv4.ip_forward=0") def terminate(self): # Remove iptables rules for intf in self.intfList(): self.cmd("iptables -D INPUT -p tcp -i %s -j DROP" % intf.name) super(PaxNode, self).terminate()
from mininet.node import Node from mininet.log import info, warn class PaxNode( Node ): "PaxNode: A node which allows Pax to behave as the sole packet hander on that node." def __init__(self, name, **params): super(PaxNode, self).__init__(name, **params) def config(self, **params): super(PaxNode, self).config(**params) # Setup iptable rules to drop incoming packets on each interface: # Because Pax only sniffs packets (it doesn't steal them), we need to drop the packets # to prevent the OS from handling them and responding. for intf in self.intfList(): self.cmd("iptables -A INPUT -p tcp -i %s -j DROP" % intf.name) # Disable ip_forward because otherwise this still happens, even with the above iptables rules self.cmd("sysctl -w net.ipv4.ip_forward=0") def terminate(self): # Remove iptables rules for intf in self.intfList(): self.cmd("iptables -D INPUT -p tcp -i %s -j DROP" % intf.name) super(PaxNode, self).terminate()
Remove print statement from PaxNode config method
Remove print statement from PaxNode config method
Python
apache-2.0
TMVector/pax,TMVector/pax,niksu/pax,niksu/pax,niksu/pax
from mininet.node import Node from mininet.log import info, warn class PaxNode( Node ): "PaxNode: A node which allows Pax to behave as the sole packet hander on that node." def __init__(self, name, **params): super(PaxNode, self).__init__(name, **params) def config(self, **params): super(PaxNode, self).config(**params) # Setup iptable rules to drop incoming packets on each interface: # Because Pax only sniffs packets (it doesn't steal them), we need to drop the packets # to prevent the OS from handling them and responding. - print "Drop all incoming TCP traffic on nat0 so that Pax is effectively the middle-man" for intf in self.intfList(): self.cmd("iptables -A INPUT -p tcp -i %s -j DROP" % intf.name) # Disable ip_forward because otherwise this still happens, even with the above iptables rules self.cmd("sysctl -w net.ipv4.ip_forward=0") def terminate(self): # Remove iptables rules for intf in self.intfList(): self.cmd("iptables -D INPUT -p tcp -i %s -j DROP" % intf.name) super(PaxNode, self).terminate()
Remove print statement from PaxNode config method
## Code Before: from mininet.node import Node from mininet.log import info, warn class PaxNode( Node ): "PaxNode: A node which allows Pax to behave as the sole packet hander on that node." def __init__(self, name, **params): super(PaxNode, self).__init__(name, **params) def config(self, **params): super(PaxNode, self).config(**params) # Setup iptable rules to drop incoming packets on each interface: # Because Pax only sniffs packets (it doesn't steal them), we need to drop the packets # to prevent the OS from handling them and responding. print "Drop all incoming TCP traffic on nat0 so that Pax is effectively the middle-man" for intf in self.intfList(): self.cmd("iptables -A INPUT -p tcp -i %s -j DROP" % intf.name) # Disable ip_forward because otherwise this still happens, even with the above iptables rules self.cmd("sysctl -w net.ipv4.ip_forward=0") def terminate(self): # Remove iptables rules for intf in self.intfList(): self.cmd("iptables -D INPUT -p tcp -i %s -j DROP" % intf.name) super(PaxNode, self).terminate() ## Instruction: Remove print statement from PaxNode config method ## Code After: from mininet.node import Node from mininet.log import info, warn class PaxNode( Node ): "PaxNode: A node which allows Pax to behave as the sole packet hander on that node." def __init__(self, name, **params): super(PaxNode, self).__init__(name, **params) def config(self, **params): super(PaxNode, self).config(**params) # Setup iptable rules to drop incoming packets on each interface: # Because Pax only sniffs packets (it doesn't steal them), we need to drop the packets # to prevent the OS from handling them and responding. for intf in self.intfList(): self.cmd("iptables -A INPUT -p tcp -i %s -j DROP" % intf.name) # Disable ip_forward because otherwise this still happens, even with the above iptables rules self.cmd("sysctl -w net.ipv4.ip_forward=0") def terminate(self): # Remove iptables rules for intf in self.intfList(): self.cmd("iptables -D INPUT -p tcp -i %s -j DROP" % intf.name) super(PaxNode, self).terminate()
// ... existing code ... # Setup iptable rules to drop incoming packets on each interface: # Because Pax only sniffs packets (it doesn't steal them), we need to drop the packets # to prevent the OS from handling them and responding. for intf in self.intfList(): self.cmd("iptables -A INPUT -p tcp -i %s -j DROP" % intf.name) // ... rest of the code ...
546a4681aa54ba183e956d220e98ef67ae6de691
user/decorators.py
user/decorators.py
from django.conf import settings from django.contrib.auth import get_user from django.shortcuts import redirect def custom_login_required(view): # view argument must be a function def new_view(request, *args, **kwargs): user = get_user(request) if user.is_authenticated(): return view(request, *args, **kwargs) else: url = '{}?next={}'.format( settings.LOGIN_URL, request.path) return redirect(url) return new_view
from functools import wraps from django.conf import settings from django.contrib.auth import get_user from django.shortcuts import redirect from django.utils.decorators import \ available_attrs def custom_login_required(view): # view argument must be a function @wraps(view, assigned=available_attrs(view)) def new_view(request, *args, **kwargs): user = get_user(request) if user.is_authenticated(): return view(request, *args, **kwargs) else: url = '{}?next={}'.format( settings.LOGIN_URL, request.path) return redirect(url) return new_view
Use functools.wraps to copy view signature.
Ch20: Use functools.wraps to copy view signature.
Python
bsd-2-clause
jambonrose/DjangoUnleashed-1.8,jambonrose/DjangoUnleashed-1.8
+ from functools import wraps + from django.conf import settings from django.contrib.auth import get_user from django.shortcuts import redirect + from django.utils.decorators import \ + available_attrs def custom_login_required(view): # view argument must be a function + @wraps(view, assigned=available_attrs(view)) def new_view(request, *args, **kwargs): user = get_user(request) if user.is_authenticated(): return view(request, *args, **kwargs) else: url = '{}?next={}'.format( settings.LOGIN_URL, request.path) return redirect(url) return new_view
Use functools.wraps to copy view signature.
## Code Before: from django.conf import settings from django.contrib.auth import get_user from django.shortcuts import redirect def custom_login_required(view): # view argument must be a function def new_view(request, *args, **kwargs): user = get_user(request) if user.is_authenticated(): return view(request, *args, **kwargs) else: url = '{}?next={}'.format( settings.LOGIN_URL, request.path) return redirect(url) return new_view ## Instruction: Use functools.wraps to copy view signature. ## Code After: from functools import wraps from django.conf import settings from django.contrib.auth import get_user from django.shortcuts import redirect from django.utils.decorators import \ available_attrs def custom_login_required(view): # view argument must be a function @wraps(view, assigned=available_attrs(view)) def new_view(request, *args, **kwargs): user = get_user(request) if user.is_authenticated(): return view(request, *args, **kwargs) else: url = '{}?next={}'.format( settings.LOGIN_URL, request.path) return redirect(url) return new_view
// ... existing code ... from functools import wraps from django.conf import settings from django.contrib.auth import get_user from django.shortcuts import redirect from django.utils.decorators import \ available_attrs def custom_login_required(view): # view argument must be a function @wraps(view, assigned=available_attrs(view)) def new_view(request, *args, **kwargs): user = get_user(request) if user.is_authenticated(): // ... rest of the code ...
77d0fe3daec4f6e4f9166dcd32943b21384a2073
tests/test_core.py
tests/test_core.py
from grazer.core import crawler from bs4 import BeautifulSoup def test_extract_links(): text = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> </body> </html> """ bs = BeautifulSoup(text, "html.parser") links = crawler.extract_links(bs) expected = ["http://example.com/elsie", "http://example.com/lacie", "http://example.com/tillie"] assert links == expected def test_link_trimmer(): result = crawler.trim_link("http://example.com/lacie", "http://example.com") assert result == "/lacie"
from grazer.core import crawler from bs4 import BeautifulSoup def test_extract_links(): text = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> </body> </html> """ bs = BeautifulSoup(text, "html.parser") links = crawler.extract_links(bs) expected = ["http://example.com/elsie", "http://example.com/lacie", "http://example.com/tillie"] assert links == expected def test_link_trimmer(): result = crawler.trim_link("http://example.com/lacie", "http://example.com") assert result == "/lacie" def test_trim_link_without_trailing_slash(): result = crawler.trim_link("http://example.com", "http://example.com") assert result == "http://example.com"
Test to validate trimming scenario
Test to validate trimming scenario
Python
mit
CodersOfTheNight/verata
from grazer.core import crawler from bs4 import BeautifulSoup def test_extract_links(): text = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> </body> </html> """ bs = BeautifulSoup(text, "html.parser") links = crawler.extract_links(bs) expected = ["http://example.com/elsie", "http://example.com/lacie", "http://example.com/tillie"] assert links == expected def test_link_trimmer(): result = crawler.trim_link("http://example.com/lacie", "http://example.com") assert result == "/lacie" + + def test_trim_link_without_trailing_slash(): + result = crawler.trim_link("http://example.com", "http://example.com") + assert result == "http://example.com" +
Test to validate trimming scenario
## Code Before: from grazer.core import crawler from bs4 import BeautifulSoup def test_extract_links(): text = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> </body> </html> """ bs = BeautifulSoup(text, "html.parser") links = crawler.extract_links(bs) expected = ["http://example.com/elsie", "http://example.com/lacie", "http://example.com/tillie"] assert links == expected def test_link_trimmer(): result = crawler.trim_link("http://example.com/lacie", "http://example.com") assert result == "/lacie" ## Instruction: Test to validate trimming scenario ## Code After: from grazer.core import crawler from bs4 import BeautifulSoup def test_extract_links(): text = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> </body> </html> """ bs = BeautifulSoup(text, "html.parser") links = crawler.extract_links(bs) expected = ["http://example.com/elsie", "http://example.com/lacie", "http://example.com/tillie"] assert links == expected def test_link_trimmer(): result = crawler.trim_link("http://example.com/lacie", "http://example.com") assert result == "/lacie" def test_trim_link_without_trailing_slash(): result = crawler.trim_link("http://example.com", "http://example.com") assert result == "http://example.com"
# ... existing code ... def test_link_trimmer(): result = crawler.trim_link("http://example.com/lacie", "http://example.com") assert result == "/lacie" def test_trim_link_without_trailing_slash(): result = crawler.trim_link("http://example.com", "http://example.com") assert result == "http://example.com" # ... rest of the code ...
1ee414611fa6e01516d545bb284695a62bd69f0a
rtrss/daemon.py
rtrss/daemon.py
import sys import os import logging import atexit from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import app_init, worker_action worker_action('import_categories') # TODO run() _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)
import os import logging from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import worker_action worker_action('run') _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)
Change debug action to production
Change debug action to production
Python
apache-2.0
notapresent/rtrss,notapresent/rtrss,notapresent/rtrss,notapresent/rtrss
- import sys import os import logging - import atexit + from rtrss.basedaemon import BaseDaemon + _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) - + - from rtrss.worker import app_init, worker_action + from rtrss.worker import worker_action - worker_action('import_categories') # TODO run() - + worker_action('run') + _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() - + def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile) - +
Change debug action to production
## Code Before: import sys import os import logging import atexit from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import app_init, worker_action worker_action('import_categories') # TODO run() _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile) ## Instruction: Change debug action to production ## Code After: import os import logging from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import worker_action worker_action('run') _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)
// ... existing code ... import os import logging from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) // ... modified code ... class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import worker_action worker_action('run') _logger.info('Daemon is done and exiting') def start(self): ... def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' ... logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile) // ... rest of the code ...
0050711d85ba4084e9d0f32d3bad1b3400350476
name/feeds.py
name/feeds.py
from django.contrib.syndication.views import Feed from django.core.urlresolvers import reverse_lazy from django.utils.feedgenerator import Atom1Feed from . import app_settings from .models import Name class NameAtomFeedType(Atom1Feed): """Create an Atom feed that sets the Content-Type response header to application/xml. """ mime_type = 'application/xml' class NameAtomFeed(Feed): feed_type = NameAtomFeedType link = reverse_lazy("name_feed") title = "Name App" subtitle = "New Name Records" author_name = app_settings.NAME_FEED_AUTHOR_NAME author_email = app_settings.NAME_FEED_AUTHOR_EMAIL author_link = app_settings.NAME_FEED_AUTHOR_LINK def items(self): # last 5 added items return Name.objects.order_by('-date_created')[:20] def item_title(self, obj): return obj.name def item_description(self, obj): return 'Name Type: {0}'.format(obj.get_name_type_label()) def item_link(self, obj): return obj.get_absolute_url()
from django.contrib.syndication.views import Feed from django.core.urlresolvers import reverse_lazy from django.utils.feedgenerator import Atom1Feed from . import app_settings from .models import Name class NameAtomFeedType(Atom1Feed): """Create an Atom feed that sets the Content-Type response header to application/xml. """ mime_type = 'application/xml' def root_attributes(self): attrs = super(NameAtomFeedType, self).root_attributes() attrs['xmlns:georss'] = 'http://www.georss.org/georss' return attrs def add_item_elements(self, handler, item): super(NameAtomFeedType, self).add_item_elements(handler, item) if item.get('location'): handler.addQuickElement('georss:point', item['location']) class NameAtomFeed(Feed): feed_type = NameAtomFeedType link = reverse_lazy("name_feed") title = "Name App" subtitle = "New Name Records" author_name = app_settings.NAME_FEED_AUTHOR_NAME author_email = app_settings.NAME_FEED_AUTHOR_EMAIL author_link = app_settings.NAME_FEED_AUTHOR_LINK def items(self): # last 5 added items return Name.objects.order_by('-date_created')[:20] def item_title(self, obj): return obj.name def item_description(self, obj): return 'Name Type: {0}'.format(obj.get_name_type_label()) def item_link(self, obj): return obj.get_absolute_url() def item_location(self, obj): if obj.has_locations() and obj.location_set.current_location: return obj.location_set.current_location.geo_point() def item_extra_kwargs(self, obj): return dict(location=self.item_location(obj))
Add the location as a georss:point element.
Add the location as a georss:point element.
Python
bsd-3-clause
damonkelley/django-name,damonkelley/django-name,unt-libraries/django-name,damonkelley/django-name,unt-libraries/django-name,unt-libraries/django-name
from django.contrib.syndication.views import Feed from django.core.urlresolvers import reverse_lazy from django.utils.feedgenerator import Atom1Feed from . import app_settings from .models import Name class NameAtomFeedType(Atom1Feed): """Create an Atom feed that sets the Content-Type response header to application/xml. """ mime_type = 'application/xml' + + def root_attributes(self): + attrs = super(NameAtomFeedType, self).root_attributes() + attrs['xmlns:georss'] = 'http://www.georss.org/georss' + return attrs + + def add_item_elements(self, handler, item): + super(NameAtomFeedType, self).add_item_elements(handler, item) + if item.get('location'): + handler.addQuickElement('georss:point', item['location']) class NameAtomFeed(Feed): feed_type = NameAtomFeedType link = reverse_lazy("name_feed") title = "Name App" subtitle = "New Name Records" author_name = app_settings.NAME_FEED_AUTHOR_NAME author_email = app_settings.NAME_FEED_AUTHOR_EMAIL author_link = app_settings.NAME_FEED_AUTHOR_LINK def items(self): # last 5 added items return Name.objects.order_by('-date_created')[:20] def item_title(self, obj): return obj.name def item_description(self, obj): return 'Name Type: {0}'.format(obj.get_name_type_label()) def item_link(self, obj): return obj.get_absolute_url() + def item_location(self, obj): + if obj.has_locations() and obj.location_set.current_location: + return obj.location_set.current_location.geo_point() + + def item_extra_kwargs(self, obj): + return dict(location=self.item_location(obj)) +
Add the location as a georss:point element.
## Code Before: from django.contrib.syndication.views import Feed from django.core.urlresolvers import reverse_lazy from django.utils.feedgenerator import Atom1Feed from . import app_settings from .models import Name class NameAtomFeedType(Atom1Feed): """Create an Atom feed that sets the Content-Type response header to application/xml. """ mime_type = 'application/xml' class NameAtomFeed(Feed): feed_type = NameAtomFeedType link = reverse_lazy("name_feed") title = "Name App" subtitle = "New Name Records" author_name = app_settings.NAME_FEED_AUTHOR_NAME author_email = app_settings.NAME_FEED_AUTHOR_EMAIL author_link = app_settings.NAME_FEED_AUTHOR_LINK def items(self): # last 5 added items return Name.objects.order_by('-date_created')[:20] def item_title(self, obj): return obj.name def item_description(self, obj): return 'Name Type: {0}'.format(obj.get_name_type_label()) def item_link(self, obj): return obj.get_absolute_url() ## Instruction: Add the location as a georss:point element. ## Code After: from django.contrib.syndication.views import Feed from django.core.urlresolvers import reverse_lazy from django.utils.feedgenerator import Atom1Feed from . import app_settings from .models import Name class NameAtomFeedType(Atom1Feed): """Create an Atom feed that sets the Content-Type response header to application/xml. """ mime_type = 'application/xml' def root_attributes(self): attrs = super(NameAtomFeedType, self).root_attributes() attrs['xmlns:georss'] = 'http://www.georss.org/georss' return attrs def add_item_elements(self, handler, item): super(NameAtomFeedType, self).add_item_elements(handler, item) if item.get('location'): handler.addQuickElement('georss:point', item['location']) class NameAtomFeed(Feed): feed_type = NameAtomFeedType link = reverse_lazy("name_feed") title = "Name App" subtitle = "New Name Records" author_name = app_settings.NAME_FEED_AUTHOR_NAME author_email = app_settings.NAME_FEED_AUTHOR_EMAIL author_link = app_settings.NAME_FEED_AUTHOR_LINK def items(self): # last 5 added items return Name.objects.order_by('-date_created')[:20] def item_title(self, obj): return obj.name def item_description(self, obj): return 'Name Type: {0}'.format(obj.get_name_type_label()) def item_link(self, obj): return obj.get_absolute_url() def item_location(self, obj): if obj.has_locations() and obj.location_set.current_location: return obj.location_set.current_location.geo_point() def item_extra_kwargs(self, obj): return dict(location=self.item_location(obj))
# ... existing code ... header to application/xml. """ mime_type = 'application/xml' def root_attributes(self): attrs = super(NameAtomFeedType, self).root_attributes() attrs['xmlns:georss'] = 'http://www.georss.org/georss' return attrs def add_item_elements(self, handler, item): super(NameAtomFeedType, self).add_item_elements(handler, item) if item.get('location'): handler.addQuickElement('georss:point', item['location']) class NameAtomFeed(Feed): # ... modified code ... def item_link(self, obj): return obj.get_absolute_url() def item_location(self, obj): if obj.has_locations() and obj.location_set.current_location: return obj.location_set.current_location.geo_point() def item_extra_kwargs(self, obj): return dict(location=self.item_location(obj)) # ... rest of the code ...
4db714570a9ce58a08c72aa1477e9e7a48ed650c
tests/util_tests.py
tests/util_tests.py
from chai import Chai from arrow import util class UtilTests(Chai): def test_is_timestamp(self): timestamp_float = 1563047716.958061 timestamp_int = int(timestamp_float) self.assertTrue(util.is_timestamp(timestamp_int)) self.assertTrue(util.is_timestamp(timestamp_float)) self.assertFalse(util.is_timestamp(str(timestamp_int))) self.assertFalse(util.is_timestamp(str(timestamp_float))) self.assertFalse(util.is_timestamp(True)) self.assertFalse(util.is_timestamp(False)) full_datetime = "2019-06-23T13:12:42" self.assertFalse(util.is_timestamp(full_datetime)) overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999 with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_float) overflow_timestamp_int = int(overflow_timestamp_float) with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_int)
import time from chai import Chai from arrow import util class UtilTests(Chai): def test_is_timestamp(self): timestamp_float = time.time() timestamp_int = int(timestamp_float) self.assertTrue(util.is_timestamp(timestamp_int)) self.assertTrue(util.is_timestamp(timestamp_float)) self.assertFalse(util.is_timestamp(str(timestamp_int))) self.assertFalse(util.is_timestamp(str(timestamp_float))) self.assertFalse(util.is_timestamp(True)) self.assertFalse(util.is_timestamp(False)) full_datetime = "2019-06-23T13:12:42" self.assertFalse(util.is_timestamp(full_datetime)) overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999 with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_float) overflow_timestamp_int = int(overflow_timestamp_float) with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_int)
Replace hard coded timestamp with time.time()
Replace hard coded timestamp with time.time()
Python
apache-2.0
crsmithdev/arrow
+ import time + from chai import Chai from arrow import util class UtilTests(Chai): def test_is_timestamp(self): - timestamp_float = 1563047716.958061 + timestamp_float = time.time() timestamp_int = int(timestamp_float) self.assertTrue(util.is_timestamp(timestamp_int)) self.assertTrue(util.is_timestamp(timestamp_float)) self.assertFalse(util.is_timestamp(str(timestamp_int))) self.assertFalse(util.is_timestamp(str(timestamp_float))) self.assertFalse(util.is_timestamp(True)) self.assertFalse(util.is_timestamp(False)) full_datetime = "2019-06-23T13:12:42" self.assertFalse(util.is_timestamp(full_datetime)) overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999 with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_float) overflow_timestamp_int = int(overflow_timestamp_float) with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_int)
Replace hard coded timestamp with time.time()
## Code Before: from chai import Chai from arrow import util class UtilTests(Chai): def test_is_timestamp(self): timestamp_float = 1563047716.958061 timestamp_int = int(timestamp_float) self.assertTrue(util.is_timestamp(timestamp_int)) self.assertTrue(util.is_timestamp(timestamp_float)) self.assertFalse(util.is_timestamp(str(timestamp_int))) self.assertFalse(util.is_timestamp(str(timestamp_float))) self.assertFalse(util.is_timestamp(True)) self.assertFalse(util.is_timestamp(False)) full_datetime = "2019-06-23T13:12:42" self.assertFalse(util.is_timestamp(full_datetime)) overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999 with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_float) overflow_timestamp_int = int(overflow_timestamp_float) with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_int) ## Instruction: Replace hard coded timestamp with time.time() ## Code After: import time from chai import Chai from arrow import util class UtilTests(Chai): def test_is_timestamp(self): timestamp_float = time.time() timestamp_int = int(timestamp_float) self.assertTrue(util.is_timestamp(timestamp_int)) self.assertTrue(util.is_timestamp(timestamp_float)) self.assertFalse(util.is_timestamp(str(timestamp_int))) self.assertFalse(util.is_timestamp(str(timestamp_float))) self.assertFalse(util.is_timestamp(True)) self.assertFalse(util.is_timestamp(False)) full_datetime = "2019-06-23T13:12:42" self.assertFalse(util.is_timestamp(full_datetime)) overflow_timestamp_float = 99999999999999999999999999.99999999999999999999999999 with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_float) overflow_timestamp_int = int(overflow_timestamp_float) with self.assertRaises((OverflowError, ValueError)): util.is_timestamp(overflow_timestamp_int)
// ... existing code ... import time from chai import Chai from arrow import util // ... modified code ... class UtilTests(Chai): def test_is_timestamp(self): timestamp_float = time.time() timestamp_int = int(timestamp_float) self.assertTrue(util.is_timestamp(timestamp_int)) // ... rest of the code ...
4208538a2b7c5f2280f67520a73bd87b74de26dd
scripts/getsent.py
scripts/getsent.py
import sys import depio sentnum = int(sys.argv[2]) fnames = [sys.argv[1]] for fname in fnames: sents = list(depio.depread(fname)) i=0 out = open("%d.%s" % (sentnum,fname),'w') for outl in sents[sentnum]: out.write('\t'.join(outl) + '\n') break out.close()
import sys import depio sentnum = int(sys.argv[2]) fnames = [sys.argv[1]] for fname in fnames: sents = list(depio.depread(fname)) i=0 out = open("%d.%s" % (sentnum,fname),'w') for outl in sents[sentnum]: out.write('\t'.join(outl) + '\n') out.write('\n') out.close()
Fix script to output new line at end of file
Fix script to output new line at end of file
Python
apache-2.0
habeanf/yap,habeanf/yap
import sys import depio sentnum = int(sys.argv[2]) fnames = [sys.argv[1]] for fname in fnames: sents = list(depio.depread(fname)) i=0 out = open("%d.%s" % (sentnum,fname),'w') for outl in sents[sentnum]: out.write('\t'.join(outl) + '\n') - break + out.write('\n') out.close()
Fix script to output new line at end of file
## Code Before: import sys import depio sentnum = int(sys.argv[2]) fnames = [sys.argv[1]] for fname in fnames: sents = list(depio.depread(fname)) i=0 out = open("%d.%s" % (sentnum,fname),'w') for outl in sents[sentnum]: out.write('\t'.join(outl) + '\n') break out.close() ## Instruction: Fix script to output new line at end of file ## Code After: import sys import depio sentnum = int(sys.argv[2]) fnames = [sys.argv[1]] for fname in fnames: sents = list(depio.depread(fname)) i=0 out = open("%d.%s" % (sentnum,fname),'w') for outl in sents[sentnum]: out.write('\t'.join(outl) + '\n') out.write('\n') out.close()
# ... existing code ... out = open("%d.%s" % (sentnum,fname),'w') for outl in sents[sentnum]: out.write('\t'.join(outl) + '\n') out.write('\n') out.close() # ... rest of the code ...
ce67500ec566784f6f8883e1ffcaef6ad768d810
2018/05/solve.py
2018/05/solve.py
data = open("input.txt").read().strip() import re from collections import Counter def solve1(data): prevData = None while data != prevData: prevData = data for a,b in zip(data, data[1:]): if (a != b and a == b.lower()) or (a != b and a.lower() == b): data = data.replace(a+b, "") break return len(data) print(solve1("""dabAcCaCBAcCcaDA""")) print(solve1(data)) def solve2(data): min_len = len(data) min_chr = None for c in 'abcdefghijklmnopqrstubwxyz': d = data.replace(c, "").replace(c.upper(), "") l = solve1(d) if l < min_len: min_len = l min_chr = c return min_len print(solve2("""dabAcCaCBAcCcaDA""")) print(solve2(data))
data = open("input.txt").read().strip() import re import string from collections import Counter def solve1(data): prevData = None while data != prevData: prevData = data for a,b in zip(data, data[1:]): if (a != b and a == b.lower()) or (a != b and a.lower() == b): data = data.replace(a+b, "") break return len(data) print(solve1("""dabAcCaCBAcCcaDA""")) print(solve1(data)) def solve2(data): min_len = len(data) for c in string.ascii_lowercase: d = data.replace(c, "").replace(c.upper(), "") l = solve1(d) if l < min_len: min_len = l return min_len print(solve2("""dabAcCaCBAcCcaDA""")) print(solve2(data))
Fix bug with omitting v
Fix bug with omitting v
Python
mit
lamperi/aoc,lamperi/aoc,lamperi/aoc,lamperi/aoc,lamperi/aoc
data = open("input.txt").read().strip() import re + import string from collections import Counter def solve1(data): prevData = None while data != prevData: prevData = data for a,b in zip(data, data[1:]): if (a != b and a == b.lower()) or (a != b and a.lower() == b): data = data.replace(a+b, "") break return len(data) print(solve1("""dabAcCaCBAcCcaDA""")) print(solve1(data)) def solve2(data): min_len = len(data) + for c in string.ascii_lowercase: - min_chr = None - for c in 'abcdefghijklmnopqrstubwxyz': d = data.replace(c, "").replace(c.upper(), "") l = solve1(d) if l < min_len: min_len = l - min_chr = c return min_len print(solve2("""dabAcCaCBAcCcaDA""")) print(solve2(data))
Fix bug with omitting v
## Code Before: data = open("input.txt").read().strip() import re from collections import Counter def solve1(data): prevData = None while data != prevData: prevData = data for a,b in zip(data, data[1:]): if (a != b and a == b.lower()) or (a != b and a.lower() == b): data = data.replace(a+b, "") break return len(data) print(solve1("""dabAcCaCBAcCcaDA""")) print(solve1(data)) def solve2(data): min_len = len(data) min_chr = None for c in 'abcdefghijklmnopqrstubwxyz': d = data.replace(c, "").replace(c.upper(), "") l = solve1(d) if l < min_len: min_len = l min_chr = c return min_len print(solve2("""dabAcCaCBAcCcaDA""")) print(solve2(data)) ## Instruction: Fix bug with omitting v ## Code After: data = open("input.txt").read().strip() import re import string from collections import Counter def solve1(data): prevData = None while data != prevData: prevData = data for a,b in zip(data, data[1:]): if (a != b and a == b.lower()) or (a != b and a.lower() == b): data = data.replace(a+b, "") break return len(data) print(solve1("""dabAcCaCBAcCcaDA""")) print(solve1(data)) def solve2(data): min_len = len(data) for c in string.ascii_lowercase: d = data.replace(c, "").replace(c.upper(), "") l = solve1(d) if l < min_len: min_len = l return min_len print(solve2("""dabAcCaCBAcCcaDA""")) print(solve2(data))
// ... existing code ... data = open("input.txt").read().strip() import re import string from collections import Counter def solve1(data): // ... modified code ... def solve2(data): min_len = len(data) for c in string.ascii_lowercase: d = data.replace(c, "").replace(c.upper(), "") l = solve1(d) if l < min_len: min_len = l return min_len print(solve2("""dabAcCaCBAcCcaDA""")) // ... rest of the code ...
406abe34d82aeb7d72d7f4dc96d44d57807f928b
devilry/devilry_authenticate/urls.py
devilry/devilry_authenticate/urls.py
from devilry.devilry_authenticate.views import CustomLoginView from django.conf.urls import url from django_cradmin.apps.cradmin_authenticate.views import logout urlpatterns = [ url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'), url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'), ]
from devilry.devilry_authenticate.views import CustomLoginView from django.conf.urls import url from django_cradmin.apps.cradmin_authenticate.views import logout urlpatterns = [ url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'), url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'), # url(r'^feide/oauth-successful-login-callback', ...), ]
Add placeholder for feide oauth callback.
authenticate: Add placeholder for feide oauth callback.
Python
bsd-3-clause
devilry/devilry-django,devilry/devilry-django,devilry/devilry-django,devilry/devilry-django
from devilry.devilry_authenticate.views import CustomLoginView from django.conf.urls import url from django_cradmin.apps.cradmin_authenticate.views import logout urlpatterns = [ url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'), url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'), + # url(r'^feide/oauth-successful-login-callback', ...), ]
Add placeholder for feide oauth callback.
## Code Before: from devilry.devilry_authenticate.views import CustomLoginView from django.conf.urls import url from django_cradmin.apps.cradmin_authenticate.views import logout urlpatterns = [ url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'), url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'), ] ## Instruction: Add placeholder for feide oauth callback. ## Code After: from devilry.devilry_authenticate.views import CustomLoginView from django.conf.urls import url from django_cradmin.apps.cradmin_authenticate.views import logout urlpatterns = [ url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'), url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'), # url(r'^feide/oauth-successful-login-callback', ...), ]
// ... existing code ... urlpatterns = [ url(r'^login$', CustomLoginView.as_view(), name='cradmin-authenticate-login'), url(r'^logout$', logout.cradmin_logoutview, name='cradmin-authenticate-logout'), # url(r'^feide/oauth-successful-login-callback', ...), ] // ... rest of the code ...
2565df456ecb290f620ce4dadca19c76b0eeb1af
widgets/card.py
widgets/card.py
from flask import render_template from cache import cache from models.person import Person @cache.memoize(24 * 60 * 60) def card(person_or_id, **kwargs): if isinstance(person_or_id, Person): person = person_or_id else: person = Person.query.filter_by(id=person_or_id).first() return render_template('widgets/card.html', person=person, **kwargs)
from flask import render_template from cache import cache from models.person import Person @cache.memoize(24 * 60 * 60) def card(person_or_id, detailed=False, small=False): if isinstance(person_or_id, Person): person = person_or_id else: person = Person.query.filter_by(id=person_or_id).first() return render_template('widgets/card.html', person=person, detailed=detailed, small=small)
Fix a bug in caching
Fix a bug in caching
Python
apache-2.0
teampopong/pokr.kr,teampopong/pokr.kr,teampopong/pokr.kr,teampopong/pokr.kr
from flask import render_template from cache import cache from models.person import Person @cache.memoize(24 * 60 * 60) - def card(person_or_id, **kwargs): + def card(person_or_id, detailed=False, small=False): if isinstance(person_or_id, Person): person = person_or_id else: person = Person.query.filter_by(id=person_or_id).first() - return render_template('widgets/card.html', person=person, **kwargs) + return render_template('widgets/card.html', person=person, detailed=detailed, small=small)
Fix a bug in caching
## Code Before: from flask import render_template from cache import cache from models.person import Person @cache.memoize(24 * 60 * 60) def card(person_or_id, **kwargs): if isinstance(person_or_id, Person): person = person_or_id else: person = Person.query.filter_by(id=person_or_id).first() return render_template('widgets/card.html', person=person, **kwargs) ## Instruction: Fix a bug in caching ## Code After: from flask import render_template from cache import cache from models.person import Person @cache.memoize(24 * 60 * 60) def card(person_or_id, detailed=False, small=False): if isinstance(person_or_id, Person): person = person_or_id else: person = Person.query.filter_by(id=person_or_id).first() return render_template('widgets/card.html', person=person, detailed=detailed, small=small)
# ... existing code ... @cache.memoize(24 * 60 * 60) def card(person_or_id, detailed=False, small=False): if isinstance(person_or_id, Person): person = person_or_id # ... modified code ... else: person = Person.query.filter_by(id=person_or_id).first() return render_template('widgets/card.html', person=person, detailed=detailed, small=small) # ... rest of the code ...
e0d811f5146ba2c97af3da4ac904db4d16b5d9bb
python/ctci_big_o.py
python/ctci_big_o.py
p = int(input().strip()) for a0 in range(p): n = int(input().strip())
from collections import deque class Sieve(object): def __init__(self, upper_bound): self.upper_bound = upper_bound + 1 self.primes = [] self.populate_primes() # print("Primes " + str(self.primes)) def is_prime(self, potential_prime): return potential_prime in self.primes def populate_primes(self,): remaining = deque(range(2, self.upper_bound)) while remaining: prime = remaining.popleft() self.primes.append(prime) for multiple in self.multiples(prime): if multiple in remaining: remaining.remove(multiple) def multiples(self, num): return range(num, self.upper_bound, num) NUM_CASES = int(input().strip()) TEST_CASES = [] for _ in range(NUM_CASES): TEST_CASES.append(int(input().strip())) # print("Max: " + str(max(TEST_CASES))) SIEVE = Sieve(max(TEST_CASES)) for test_case in TEST_CASES: if SIEVE.is_prime(test_case): print("Prime") else: print("Not prime")
Solve all test cases but 2
Solve all test cases but 2
Python
mit
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
+ from collections import deque - p = int(input().strip()) - for a0 in range(p): - n = int(input().strip()) + + class Sieve(object): + + def __init__(self, upper_bound): + self.upper_bound = upper_bound + 1 + self.primes = [] + self.populate_primes() + # print("Primes " + str(self.primes)) + + def is_prime(self, potential_prime): + return potential_prime in self.primes + + def populate_primes(self,): + remaining = deque(range(2, self.upper_bound)) + while remaining: + prime = remaining.popleft() + self.primes.append(prime) + for multiple in self.multiples(prime): + if multiple in remaining: + remaining.remove(multiple) + + def multiples(self, num): + return range(num, self.upper_bound, num) + + + NUM_CASES = int(input().strip()) + TEST_CASES = [] + for _ in range(NUM_CASES): + TEST_CASES.append(int(input().strip())) + + # print("Max: " + str(max(TEST_CASES))) + SIEVE = Sieve(max(TEST_CASES)) + for test_case in TEST_CASES: + if SIEVE.is_prime(test_case): + print("Prime") + else: + print("Not prime") +
Solve all test cases but 2
## Code Before: p = int(input().strip()) for a0 in range(p): n = int(input().strip()) ## Instruction: Solve all test cases but 2 ## Code After: from collections import deque class Sieve(object): def __init__(self, upper_bound): self.upper_bound = upper_bound + 1 self.primes = [] self.populate_primes() # print("Primes " + str(self.primes)) def is_prime(self, potential_prime): return potential_prime in self.primes def populate_primes(self,): remaining = deque(range(2, self.upper_bound)) while remaining: prime = remaining.popleft() self.primes.append(prime) for multiple in self.multiples(prime): if multiple in remaining: remaining.remove(multiple) def multiples(self, num): return range(num, self.upper_bound, num) NUM_CASES = int(input().strip()) TEST_CASES = [] for _ in range(NUM_CASES): TEST_CASES.append(int(input().strip())) # print("Max: " + str(max(TEST_CASES))) SIEVE = Sieve(max(TEST_CASES)) for test_case in TEST_CASES: if SIEVE.is_prime(test_case): print("Prime") else: print("Not prime")
# ... existing code ... from collections import deque class Sieve(object): def __init__(self, upper_bound): self.upper_bound = upper_bound + 1 self.primes = [] self.populate_primes() # print("Primes " + str(self.primes)) def is_prime(self, potential_prime): return potential_prime in self.primes def populate_primes(self,): remaining = deque(range(2, self.upper_bound)) while remaining: prime = remaining.popleft() self.primes.append(prime) for multiple in self.multiples(prime): if multiple in remaining: remaining.remove(multiple) def multiples(self, num): return range(num, self.upper_bound, num) NUM_CASES = int(input().strip()) TEST_CASES = [] for _ in range(NUM_CASES): TEST_CASES.append(int(input().strip())) # print("Max: " + str(max(TEST_CASES))) SIEVE = Sieve(max(TEST_CASES)) for test_case in TEST_CASES: if SIEVE.is_prime(test_case): print("Prime") else: print("Not prime") # ... rest of the code ...
b86c53c388c39baee1ddfe3a615cdad20d272055
antcolony/util.py
antcolony/util.py
import json def avg(iterable): return sum(iterable) / len(iterable) def nice_json_dump(data, filepath): with open(filepath, 'w') as f: json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
import json def avg(iterable): sum_ = 0 element_count = 0 for element in iterable: sum_ += element element_count += 1 return sum_ / element_count def nice_json_dump(data, filepath): with open(filepath, 'w') as f: json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
Make avg() work with iterators
Make avg() work with iterators
Python
bsd-3-clause
ppolewicz/ant-colony,ppolewicz/ant-colony
import json def avg(iterable): - return sum(iterable) / len(iterable) + sum_ = 0 + element_count = 0 + for element in iterable: + sum_ += element + element_count += 1 + return sum_ / element_count def nice_json_dump(data, filepath): with open(filepath, 'w') as f: json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
Make avg() work with iterators
## Code Before: import json def avg(iterable): return sum(iterable) / len(iterable) def nice_json_dump(data, filepath): with open(filepath, 'w') as f: json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': ')) ## Instruction: Make avg() work with iterators ## Code After: import json def avg(iterable): sum_ = 0 element_count = 0 for element in iterable: sum_ += element element_count += 1 return sum_ / element_count def nice_json_dump(data, filepath): with open(filepath, 'w') as f: json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
... import json def avg(iterable): sum_ = 0 element_count = 0 for element in iterable: sum_ += element element_count += 1 return sum_ / element_count def nice_json_dump(data, filepath): with open(filepath, 'w') as f: ...
419d2ca4d53e33c58d556b45bcc6910bd28ef91a
djangae/apps.py
djangae/apps.py
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class DjangaeConfig(AppConfig): name = 'djangae' verbose_name = _("Djangae") def ready(self): from .patches.contenttypes import patch patch() from djangae.db.backends.appengine.caching import reset_context from django.core.signals import request_finished, request_started request_finished.connect(reset_context) request_started.connect(reset_context)
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class DjangaeConfig(AppConfig): name = 'djangae' verbose_name = _("Djangae") def ready(self): from .patches.contenttypes import patch patch() from djangae.db.backends.appengine.caching import reset_context from django.core.signals import request_finished, request_started request_finished.connect(reset_context, dispatch_uid="request_finished_context_reset") request_started.connect(reset_context, dispatch_uid="request_started_context_reset")
Make sure we only connect to the signals onces
Make sure we only connect to the signals onces
Python
bsd-3-clause
kirberich/djangae,asendecka/djangae,asendecka/djangae,SiPiggles/djangae,wangjun/djangae,potatolondon/djangae,kirberich/djangae,SiPiggles/djangae,SiPiggles/djangae,leekchan/djangae,armirusco/djangae,chargrizzle/djangae,trik/djangae,grzes/djangae,armirusco/djangae,jscissr/djangae,trik/djangae,jscissr/djangae,wangjun/djangae,asendecka/djangae,leekchan/djangae,chargrizzle/djangae,wangjun/djangae,grzes/djangae,trik/djangae,potatolondon/djangae,grzes/djangae,jscissr/djangae,chargrizzle/djangae,kirberich/djangae,armirusco/djangae,leekchan/djangae
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class DjangaeConfig(AppConfig): name = 'djangae' verbose_name = _("Djangae") def ready(self): from .patches.contenttypes import patch patch() from djangae.db.backends.appengine.caching import reset_context from django.core.signals import request_finished, request_started - request_finished.connect(reset_context) - request_started.connect(reset_context) + request_finished.connect(reset_context, dispatch_uid="request_finished_context_reset") + request_started.connect(reset_context, dispatch_uid="request_started_context_reset")
Make sure we only connect to the signals onces
## Code Before: from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class DjangaeConfig(AppConfig): name = 'djangae' verbose_name = _("Djangae") def ready(self): from .patches.contenttypes import patch patch() from djangae.db.backends.appengine.caching import reset_context from django.core.signals import request_finished, request_started request_finished.connect(reset_context) request_started.connect(reset_context) ## Instruction: Make sure we only connect to the signals onces ## Code After: from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class DjangaeConfig(AppConfig): name = 'djangae' verbose_name = _("Djangae") def ready(self): from .patches.contenttypes import patch patch() from djangae.db.backends.appengine.caching import reset_context from django.core.signals import request_finished, request_started request_finished.connect(reset_context, dispatch_uid="request_finished_context_reset") request_started.connect(reset_context, dispatch_uid="request_started_context_reset")
# ... existing code ... from djangae.db.backends.appengine.caching import reset_context from django.core.signals import request_finished, request_started request_finished.connect(reset_context, dispatch_uid="request_finished_context_reset") request_started.connect(reset_context, dispatch_uid="request_started_context_reset") # ... rest of the code ...
318c98ab5a9710dfdeedc0ee893e87993ac49727
robosync/test/test_robosync.py
robosync/test/test_robosync.py
import unittest import os import shutil class testMirror(unittest.TestCase): def setUp(self): os.mkdir('test_source') os.mkdir('test_dest') source_dirs = ['dir1', 'dir2', 'dir3'] filenames = ['file1.txt', 'file2.txt', 'file3.txt'] contents = ['foobar1', 'foobar2', 'foobar3'] for d_name, f_name, content, in zip(source_dirs, filenames, contents): new_dir = os.path.join('test_source', d_name) os.mkdir(new_dir) with open(os.path.join('test_source', d_name, f_name), 'w') as f: f.write(content) def tearDown(self): shutil.rmtree('test_source') shutil.rmtree('test_dest') def test_mirror(self): pass if __name__ == '__main__': unittest.main()
import unittest import os import shutil class testMirror(unittest.TestCase): def setUp(self): os.mkdir('test_source') os.mkdir('test_dest') source_dirs = ['dir1', 'dir2', 'dir3'] dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c'] filenames = ['file1.txt', 'file2.txt', 'file3.txt'] contents = ['foobar1', 'foobar2', 'foobar3'] with open('source_file.txt', 'w') as f: f.write('\n'.join(source_dirs)) with open('dest_file.txt', 'w') as f: f.write('\n'.join(dest_dirs)) for d_name, f_name, content, in zip(source_dirs, filenames, contents): new_dir = os.path.join('test_source', d_name) os.mkdir(new_dir) with open(os.path.join('test_source', d_name, f_name), 'w') as f: f.write(content) def tearDown(self): shutil.rmtree('test_source') shutil.rmtree('test_dest') os.remove('source_file.txt') os.remove('dest_file.txt') def test_mirror(self): pass if __name__ == '__main__': unittest.main()
Add source and destination list to setup and teardown
Add source and destination list to setup and teardown
Python
mit
rbn920/robosync
import unittest import os import shutil class testMirror(unittest.TestCase): def setUp(self): os.mkdir('test_source') os.mkdir('test_dest') source_dirs = ['dir1', 'dir2', 'dir3'] + dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c'] filenames = ['file1.txt', 'file2.txt', 'file3.txt'] contents = ['foobar1', 'foobar2', 'foobar3'] + with open('source_file.txt', 'w') as f: + f.write('\n'.join(source_dirs)) + + with open('dest_file.txt', 'w') as f: + f.write('\n'.join(dest_dirs)) + for d_name, f_name, content, in zip(source_dirs, filenames, contents): new_dir = os.path.join('test_source', d_name) os.mkdir(new_dir) with open(os.path.join('test_source', d_name, f_name), 'w') as f: f.write(content) def tearDown(self): shutil.rmtree('test_source') shutil.rmtree('test_dest') + os.remove('source_file.txt') + os.remove('dest_file.txt') def test_mirror(self): pass if __name__ == '__main__': unittest.main()
Add source and destination list to setup and teardown
## Code Before: import unittest import os import shutil class testMirror(unittest.TestCase): def setUp(self): os.mkdir('test_source') os.mkdir('test_dest') source_dirs = ['dir1', 'dir2', 'dir3'] filenames = ['file1.txt', 'file2.txt', 'file3.txt'] contents = ['foobar1', 'foobar2', 'foobar3'] for d_name, f_name, content, in zip(source_dirs, filenames, contents): new_dir = os.path.join('test_source', d_name) os.mkdir(new_dir) with open(os.path.join('test_source', d_name, f_name), 'w') as f: f.write(content) def tearDown(self): shutil.rmtree('test_source') shutil.rmtree('test_dest') def test_mirror(self): pass if __name__ == '__main__': unittest.main() ## Instruction: Add source and destination list to setup and teardown ## Code After: import unittest import os import shutil class testMirror(unittest.TestCase): def setUp(self): os.mkdir('test_source') os.mkdir('test_dest') source_dirs = ['dir1', 'dir2', 'dir3'] dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c'] filenames = ['file1.txt', 'file2.txt', 'file3.txt'] contents = ['foobar1', 'foobar2', 'foobar3'] with open('source_file.txt', 'w') as f: f.write('\n'.join(source_dirs)) with open('dest_file.txt', 'w') as f: f.write('\n'.join(dest_dirs)) for d_name, f_name, content, in zip(source_dirs, filenames, contents): new_dir = os.path.join('test_source', d_name) os.mkdir(new_dir) with open(os.path.join('test_source', d_name, f_name), 'w') as f: f.write(content) def tearDown(self): shutil.rmtree('test_source') shutil.rmtree('test_dest') os.remove('source_file.txt') os.remove('dest_file.txt') def test_mirror(self): pass if __name__ == '__main__': unittest.main()
... os.mkdir('test_source') os.mkdir('test_dest') source_dirs = ['dir1', 'dir2', 'dir3'] dest_dirs = ['dir1_c', 'dir2_c', 'dir3_c'] filenames = ['file1.txt', 'file2.txt', 'file3.txt'] contents = ['foobar1', 'foobar2', 'foobar3'] with open('source_file.txt', 'w') as f: f.write('\n'.join(source_dirs)) with open('dest_file.txt', 'w') as f: f.write('\n'.join(dest_dirs)) for d_name, f_name, content, in zip(source_dirs, filenames, contents): new_dir = os.path.join('test_source', d_name) os.mkdir(new_dir) ... def tearDown(self): shutil.rmtree('test_source') shutil.rmtree('test_dest') os.remove('source_file.txt') os.remove('dest_file.txt') def test_mirror(self): ...
34bf8d82580b83b1e0409db8636877a22203996b
cryptex/trade.py
cryptex/trade.py
class Trade(object): BUY = 0 SELL = 1 def __init__(self, trade_id, trade_type, base_currency, counter_currency, time, order_id, amount, price, fee=None): self.trade_id = trade_id self.trade_type = trade_type self.base_currency = base_currency self.counter_currency = counter_currency self.time = time self.order_id = order_id self.amount = amount self.price = price self.fee = fee def __str__(self): if self.trade_type == 0: ts = 'Buy' else: ts ='Sell' return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)
class Trade(object): BUY = 0 SELL = 1 def __init__(self, trade_id, trade_type, base_currency, counter_currency, time, order_id, amount, price, fee=None): self.trade_id = trade_id self.trade_type = trade_type self.base_currency = base_currency self.counter_currency = counter_currency self.time = time self.order_id = order_id self.amount = amount self.price = price self.fee = fee def __str__(self): if self.trade_type == Trade.BUY: ts = 'Buy' else: ts = 'Sell' return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)
Remove magic number check in Trade str method
Remove magic number check in Trade str method
Python
mit
coink/cryptex
class Trade(object): BUY = 0 SELL = 1 def __init__(self, trade_id, trade_type, base_currency, counter_currency, time, order_id, amount, price, fee=None): self.trade_id = trade_id self.trade_type = trade_type self.base_currency = base_currency self.counter_currency = counter_currency self.time = time self.order_id = order_id self.amount = amount self.price = price self.fee = fee def __str__(self): - if self.trade_type == 0: + if self.trade_type == Trade.BUY: ts = 'Buy' else: - ts ='Sell' + ts = 'Sell' return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)
Remove magic number check in Trade str method
## Code Before: class Trade(object): BUY = 0 SELL = 1 def __init__(self, trade_id, trade_type, base_currency, counter_currency, time, order_id, amount, price, fee=None): self.trade_id = trade_id self.trade_type = trade_type self.base_currency = base_currency self.counter_currency = counter_currency self.time = time self.order_id = order_id self.amount = amount self.price = price self.fee = fee def __str__(self): if self.trade_type == 0: ts = 'Buy' else: ts ='Sell' return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency) ## Instruction: Remove magic number check in Trade str method ## Code After: class Trade(object): BUY = 0 SELL = 1 def __init__(self, trade_id, trade_type, base_currency, counter_currency, time, order_id, amount, price, fee=None): self.trade_id = trade_id self.trade_type = trade_type self.base_currency = base_currency self.counter_currency = counter_currency self.time = time self.order_id = order_id self.amount = amount self.price = price self.fee = fee def __str__(self): if self.trade_type == Trade.BUY: ts = 'Buy' else: ts = 'Sell' return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency)
// ... existing code ... self.fee = fee def __str__(self): if self.trade_type == Trade.BUY: ts = 'Buy' else: ts = 'Sell' return '<%s of %.8f %s>' % (ts, self.amount, self.base_currency) // ... rest of the code ...
b2d9b56ceb96718d1f3edc8ec019ca7218e33e7d
src/rnaseq_lib/math/__init__.py
src/rnaseq_lib/math/__init__.py
import numpy as np # Outlier def iqr_bounds(ys): """ Return upper and lower bound for an array of values Lower bound: Q1 - (IQR * 1.5) Upper bound: Q3 + (IQR * 1.5) :param list ys: List of values to calculate IQR :return: Upper and lower bound :rtype: tuple(float, float) """ quartile_1, quartile_3 = np.percentile(ys, [25, 75]) iqr = quartile_3 - quartile_1 lower_bound = quartile_1 - (iqr * 1.5) upper_bound = quartile_3 + (iqr * 1.5) return upper_bound, lower_bound # Normalization def min_max_normalize(df): return (df - df.min()) / (df.max() - df.min()) def mean_normalize(df): return (df - df.mean()) / df.std() def l2norm(x, pad=0.001): """ Log2 normalization function :param float x: Input value :param int|float pad: Pad value (to handle zeros) :return: log2(x+1) normalized value :rtype: float """ return np.log2(x + pad)
import numpy as np # Outlier def iqr_bounds(ys): """ Return upper and lower bound for an array of values Lower bound: Q1 - (IQR * 1.5) Upper bound: Q3 + (IQR * 1.5) :param list ys: List of values to calculate IQR :return: Upper and lower bound :rtype: tuple(float, float) """ quartile_1, quartile_3 = np.percentile(ys, [25, 75]) iqr = quartile_3 - quartile_1 lower_bound = quartile_1 - (iqr * 1.5) upper_bound = quartile_3 + (iqr * 1.5) return upper_bound, lower_bound # Normalization def min_max_normalize(df): return (df - df.min()) / (df.max() - df.min()) def mean_normalize(df): return (df - df.mean()) / df.std() def softmax(df): """ Normalizes columns to sum to 1 :param pd.DataFrame df: Dataframe to normalize :return: Normalized DataFrame :rtype: pd.DataFrame """ return df.divide(df.sum()) def l2norm(x, pad=0.001): """ Log2 normalization function :param float x: Input value :param int|float pad: Pad value (to handle zeros) :return: log2(x+1) normalized value :rtype: float """ return np.log2(x + pad)
Add docstring for softmax normalization function
Add docstring for softmax normalization function
Python
mit
jvivian/rnaseq-lib,jvivian/rnaseq-lib
import numpy as np # Outlier def iqr_bounds(ys): """ Return upper and lower bound for an array of values Lower bound: Q1 - (IQR * 1.5) Upper bound: Q3 + (IQR * 1.5) :param list ys: List of values to calculate IQR :return: Upper and lower bound :rtype: tuple(float, float) """ quartile_1, quartile_3 = np.percentile(ys, [25, 75]) iqr = quartile_3 - quartile_1 lower_bound = quartile_1 - (iqr * 1.5) upper_bound = quartile_3 + (iqr * 1.5) return upper_bound, lower_bound # Normalization def min_max_normalize(df): return (df - df.min()) / (df.max() - df.min()) def mean_normalize(df): return (df - df.mean()) / df.std() + def softmax(df): + """ + Normalizes columns to sum to 1 + + :param pd.DataFrame df: Dataframe to normalize + :return: Normalized DataFrame + :rtype: pd.DataFrame + """ + return df.divide(df.sum()) + + def l2norm(x, pad=0.001): """ Log2 normalization function :param float x: Input value :param int|float pad: Pad value (to handle zeros) :return: log2(x+1) normalized value :rtype: float """ return np.log2(x + pad)
Add docstring for softmax normalization function
## Code Before: import numpy as np # Outlier def iqr_bounds(ys): """ Return upper and lower bound for an array of values Lower bound: Q1 - (IQR * 1.5) Upper bound: Q3 + (IQR * 1.5) :param list ys: List of values to calculate IQR :return: Upper and lower bound :rtype: tuple(float, float) """ quartile_1, quartile_3 = np.percentile(ys, [25, 75]) iqr = quartile_3 - quartile_1 lower_bound = quartile_1 - (iqr * 1.5) upper_bound = quartile_3 + (iqr * 1.5) return upper_bound, lower_bound # Normalization def min_max_normalize(df): return (df - df.min()) / (df.max() - df.min()) def mean_normalize(df): return (df - df.mean()) / df.std() def l2norm(x, pad=0.001): """ Log2 normalization function :param float x: Input value :param int|float pad: Pad value (to handle zeros) :return: log2(x+1) normalized value :rtype: float """ return np.log2(x + pad) ## Instruction: Add docstring for softmax normalization function ## Code After: import numpy as np # Outlier def iqr_bounds(ys): """ Return upper and lower bound for an array of values Lower bound: Q1 - (IQR * 1.5) Upper bound: Q3 + (IQR * 1.5) :param list ys: List of values to calculate IQR :return: Upper and lower bound :rtype: tuple(float, float) """ quartile_1, quartile_3 = np.percentile(ys, [25, 75]) iqr = quartile_3 - quartile_1 lower_bound = quartile_1 - (iqr * 1.5) upper_bound = quartile_3 + (iqr * 1.5) return upper_bound, lower_bound # Normalization def min_max_normalize(df): return (df - df.min()) / (df.max() - df.min()) def mean_normalize(df): return (df - df.mean()) / df.std() def softmax(df): """ Normalizes columns to sum to 1 :param pd.DataFrame df: Dataframe to normalize :return: Normalized DataFrame :rtype: pd.DataFrame """ return df.divide(df.sum()) def l2norm(x, pad=0.001): """ Log2 normalization function :param float x: Input value :param int|float pad: Pad value (to handle zeros) :return: log2(x+1) normalized value :rtype: float """ return np.log2(x + pad)
... return (df - df.mean()) / df.std() def softmax(df): """ Normalizes columns to sum to 1 :param pd.DataFrame df: Dataframe to normalize :return: Normalized DataFrame :rtype: pd.DataFrame """ return df.divide(df.sum()) def l2norm(x, pad=0.001): """ Log2 normalization function ...
b4bdd8e20b82f8016030037712094f257af9221f
cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py
cinder/db/sqlalchemy/migrate_repo/versions/006_snapshots_add_provider_location.py
from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = snapshots.columns.provider_location provider_location.drop()
from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) snapshots.update().values(provider_location=None).execute() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = snapshots.columns.provider_location snapshots.drop_column(provider_location)
Fix provider_location column add for PSQL
Fix provider_location column add for PSQL Migration 006 (commit 690cae58e6bbac5758ea2f7b60774c797d28fba5) didn't work properly for postgres, this patch corrects the upgrade by ensuring the execute is performed and the value is initialized to None. Since we haven't released a milestone etc with this migration in the code it should be safe to just fix it here and submit. Change-Id: I10a09aed3470c35c8ebbe22f29aa511592167c35
Python
apache-2.0
nexusriot/cinder,github-borat/cinder,mahak/cinder,CloudServer/cinder,eharney/cinder,spring-week-topos/cinder-week,blueboxgroup/cinder,potsmaster/cinder,julianwang/cinder,github-borat/cinder,Datera/cinder,j-griffith/cinder,cloudbau/cinder,cloudbase/cinder,redhat-openstack/cinder,NeCTAR-RC/cinder,rakeshmi/cinder,abusse/cinder,winndows/cinder,abusse/cinder,dims/cinder,rickerc/cinder_audit,petrutlucian94/cinder,duhzecca/cinder,julianwang/cinder,ntt-sic/cinder,maelnor/cinder,apporc/cinder,nikesh-mahalka/cinder,tlakshman26/cinder-new-branch,phenoxim/cinder,nikesh-mahalka/cinder,JioCloud/cinder,rickerc/cinder_audit,winndows/cinder,Akrog/cinder,Paul-Ezell/cinder-1,Hybrid-Cloud/cinder,Paul-Ezell/cinder-1,alex8866/cinder,tlakshman26/cinder-https-changes,tlakshman26/cinder-bug-fix-volume-conversion-full,inkerra/cinder,dims/cinder,hguemar/cinder,scottdangelo/RemoveVolumeMangerLocks,j-griffith/cinder,leilihh/cinder,Thingee/cinder,takeshineshiro/cinder,Datera/cinder,bswartz/cinder,nexusriot/cinder,phenoxim/cinder,NetApp/cinder,tlakshman26/cinder-new-branch,openstack/cinder,Thingee/cinder,bswartz/cinder,mahak/cinder,hguemar/cinder,Thingee/cinder,ntt-sic/cinder,leilihh/cinder,saeki-masaki/cinder,blueboxgroup/cinder,ge0rgi/cinder,alex8866/cinder,scality/cinder,spring-week-topos/cinder-week,maelnor/cinder,petrutlucian94/cinder,potsmaster/cinder,Akrog/cinder,scottdangelo/RemoveVolumeMangerLocks,rakeshmi/cinder,Nexenta/cinder,redhat-openstack/cinder,duhzecca/cinder,Nexenta/cinder,NetApp/cinder,Accelerite/cinder,openstack/cinder,eharney/cinder,manojhirway/ExistingImagesOnNFS,JioCloud/cinder,manojhirway/ExistingImagesOnNFS,sasukeh/cinder,saeki-masaki/cinder,NeCTAR-RC/cinder,cloudbase/cinder,Accelerite/cinder,sasukeh/cinder,takeshineshiro/cinder,Hybrid-Cloud/cinder,CloudServer/cinder,tlakshman26/cinder-https-changes,inkerra/cinder,scality/cinder,tlakshman26/cinder-bug-fix-volume-conversion-full,cloudbau/cinder,apporc/cinder
from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) + snapshots.update().values(provider_location=None).execute() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = snapshots.columns.provider_location - provider_location.drop() + snapshots.drop_column(provider_location)
Fix provider_location column add for PSQL
## Code Before: from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = snapshots.columns.provider_location provider_location.drop() ## Instruction: Fix provider_location column add for PSQL ## Code After: from sqlalchemy import Column from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) snapshots.update().values(provider_location=None).execute() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine snapshots = Table('snapshots', meta, autoload=True) provider_location = snapshots.columns.provider_location snapshots.drop_column(provider_location)
... snapshots = Table('snapshots', meta, autoload=True) provider_location = Column('provider_location', String(255)) snapshots.create_column(provider_location) snapshots.update().values(provider_location=None).execute() def downgrade(migrate_engine): ... snapshots = Table('snapshots', meta, autoload=True) provider_location = snapshots.columns.provider_location snapshots.drop_column(provider_location) ...
71df45002746b162e04a125403cad390accb949e
backend/main.py
backend/main.py
import logging from firebase import firebase from flask import Flask, jsonify, request import flask_cors from google.appengine.ext import ndb import google.auth.transport.requests import google.oauth2.id_token import requests_toolbelt.adapters.appengine requests_toolbelt.adapters.appengine.monkeypatch() HTTP_REQUEST = google.auth.transport.requests.Request() app = Flask(__name__) firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', None) flask_cors.CORS(app) @app.route('/') def index(): return "<h1>Welcome To Google HVZ (backend)!</h1>" @app.route('/test', methods=['GET']) def get_testdata(): testdata = firebase.get('testdata', None) return jsonify(testdata)
import logging from firebase import firebase from flask import Flask, jsonify, request import flask_cors from google.appengine.ext import ndb import google.auth.transport.requests import google.oauth2.id_token import requests_toolbelt.adapters.appengine requests_toolbelt.adapters.appengine.monkeypatch() HTTP_REQUEST = google.auth.transport.requests.Request() # Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database FIREBASE_SECRET = "" FIREBASE_EMAIL = "" app = Flask(__name__) auth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True) firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth) flask_cors.CORS(app) @app.route('/') def index(): return "<h1>Welcome To Google HVZ (backend)!</h1>" @app.route('/test', methods=['GET']) def get_testdata(): testdata = firebase.get('testdata', None) return jsonify(testdata)
Add proper authentication for db (without actual key).
Add proper authentication for db (without actual key).
Python
apache-2.0
google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz,google/playhvz
import logging from firebase import firebase from flask import Flask, jsonify, request import flask_cors from google.appengine.ext import ndb import google.auth.transport.requests import google.oauth2.id_token import requests_toolbelt.adapters.appengine requests_toolbelt.adapters.appengine.monkeypatch() HTTP_REQUEST = google.auth.transport.requests.Request() + # Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database + FIREBASE_SECRET = "" + FIREBASE_EMAIL = "" + app = Flask(__name__) + auth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True) - firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', None) + firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth) flask_cors.CORS(app) @app.route('/') def index(): return "<h1>Welcome To Google HVZ (backend)!</h1>" @app.route('/test', methods=['GET']) def get_testdata(): testdata = firebase.get('testdata', None) return jsonify(testdata)
Add proper authentication for db (without actual key).
## Code Before: import logging from firebase import firebase from flask import Flask, jsonify, request import flask_cors from google.appengine.ext import ndb import google.auth.transport.requests import google.oauth2.id_token import requests_toolbelt.adapters.appengine requests_toolbelt.adapters.appengine.monkeypatch() HTTP_REQUEST = google.auth.transport.requests.Request() app = Flask(__name__) firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', None) flask_cors.CORS(app) @app.route('/') def index(): return "<h1>Welcome To Google HVZ (backend)!</h1>" @app.route('/test', methods=['GET']) def get_testdata(): testdata = firebase.get('testdata', None) return jsonify(testdata) ## Instruction: Add proper authentication for db (without actual key). ## Code After: import logging from firebase import firebase from flask import Flask, jsonify, request import flask_cors from google.appengine.ext import ndb import google.auth.transport.requests import google.oauth2.id_token import requests_toolbelt.adapters.appengine requests_toolbelt.adapters.appengine.monkeypatch() HTTP_REQUEST = google.auth.transport.requests.Request() # Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database FIREBASE_SECRET = "" FIREBASE_EMAIL = "" app = Flask(__name__) auth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True) firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth) flask_cors.CORS(app) @app.route('/') def index(): return "<h1>Welcome To Google HVZ (backend)!</h1>" @app.route('/test', methods=['GET']) def get_testdata(): testdata = firebase.get('testdata', None) return jsonify(testdata)
# ... existing code ... requests_toolbelt.adapters.appengine.monkeypatch() HTTP_REQUEST = google.auth.transport.requests.Request() # Fill out with value from https://firebase.corp.google.com/project/trogdors-29fa4/settings/database FIREBASE_SECRET = "" FIREBASE_EMAIL = "" app = Flask(__name__) auth = firebase.FirebaseAuthentication(FIREBASE_SECRET, FIREBASE_EMAIL, admin=True) firebase = firebase.FirebaseApplication('https://trogdors-29fa4.firebaseio.com', authentication=auth) flask_cors.CORS(app) @app.route('/') # ... rest of the code ...
483ba69bca57899054270cb24c41b0d2c01e7ff0
opentreemap/stormwater/models.py
opentreemap/stormwater/models.py
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from django.contrib.gis.db import models from treemap.models import MapFeature class PolygonalMapFeature(MapFeature): area_field_name = 'polygon' skip_detail_form = True polygon = models.MultiPolygonField(srid=3857) class Bioswale(PolygonalMapFeature): pass
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from django.contrib.gis.db import models from treemap.models import MapFeature class PolygonalMapFeature(MapFeature): area_field_name = 'polygon' skip_detail_form = True polygon = models.MultiPolygonField(srid=3857) class Bioswale(PolygonalMapFeature): collection_udf_defaults = { 'Stewardship': [ {'name': 'Action', 'choices': ['Watered', 'Pruned', 'Mulched, Had Compost Added, or Soil Amended', 'Cleared of Trash or Debris'], 'type': 'choice'}, {'type': 'date', 'name': 'Date'}], }
Add placeholder defaults for bioswale stewardship
Add placeholder defaults for bioswale stewardship
Python
agpl-3.0
clever-crow-consulting/otm-core,clever-crow-consulting/otm-core,recklessromeo/otm-core,maurizi/otm-core,recklessromeo/otm-core,RickMohr/otm-core,RickMohr/otm-core,RickMohr/otm-core,clever-crow-consulting/otm-core,clever-crow-consulting/otm-core,RickMohr/otm-core,recklessromeo/otm-core,maurizi/otm-core,maurizi/otm-core,maurizi/otm-core,recklessromeo/otm-core
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from django.contrib.gis.db import models from treemap.models import MapFeature class PolygonalMapFeature(MapFeature): area_field_name = 'polygon' skip_detail_form = True polygon = models.MultiPolygonField(srid=3857) class Bioswale(PolygonalMapFeature): - pass + collection_udf_defaults = { + 'Stewardship': [ + {'name': 'Action', + 'choices': ['Watered', + 'Pruned', + 'Mulched, Had Compost Added, or Soil Amended', + 'Cleared of Trash or Debris'], + 'type': 'choice'}, + {'type': 'date', + 'name': 'Date'}], + }
Add placeholder defaults for bioswale stewardship
## Code Before: from __future__ import print_function from __future__ import unicode_literals from __future__ import division from django.contrib.gis.db import models from treemap.models import MapFeature class PolygonalMapFeature(MapFeature): area_field_name = 'polygon' skip_detail_form = True polygon = models.MultiPolygonField(srid=3857) class Bioswale(PolygonalMapFeature): pass ## Instruction: Add placeholder defaults for bioswale stewardship ## Code After: from __future__ import print_function from __future__ import unicode_literals from __future__ import division from django.contrib.gis.db import models from treemap.models import MapFeature class PolygonalMapFeature(MapFeature): area_field_name = 'polygon' skip_detail_form = True polygon = models.MultiPolygonField(srid=3857) class Bioswale(PolygonalMapFeature): collection_udf_defaults = { 'Stewardship': [ {'name': 'Action', 'choices': ['Watered', 'Pruned', 'Mulched, Had Compost Added, or Soil Amended', 'Cleared of Trash or Debris'], 'type': 'choice'}, {'type': 'date', 'name': 'Date'}], }
... class Bioswale(PolygonalMapFeature): collection_udf_defaults = { 'Stewardship': [ {'name': 'Action', 'choices': ['Watered', 'Pruned', 'Mulched, Had Compost Added, or Soil Amended', 'Cleared of Trash or Debris'], 'type': 'choice'}, {'type': 'date', 'name': 'Date'}], } ...
073dd8529c95f44d7d250508dd10b8ffc8208926
two_factor/migrations/0003_auto_20150817_1733.py
two_factor/migrations/0003_auto_20150817_1733.py
from __future__ import unicode_literals from django.db import models, migrations import two_factor.models class Migration(migrations.Migration): dependencies = [ ('two_factor', '0002_auto_20150110_0810'), ] operations = [ migrations.AlterField( model_name='phonedevice', name='number', field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'), ), ]
from __future__ import unicode_literals import logging from django.db import models, migrations import phonenumbers import two_factor.models logger = logging.getLogger(__name__) def migrate_phone_numbers(apps, schema_editor): PhoneDevice = apps.get_model("two_factor", "PhoneDevice") for device in PhoneDevice.objects.all(): try: number = phonenumbers.parse(device.number) if not phonenumbers.is_valid_number(number): logger.info("User '%s' has an invalid phone number '%s'." % (device.user.username, device.number)) device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164) device.save() except phonenumbers.NumberParseException as e: # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere, # so do log a warning. logger.warning("User '%s' has an invalid phone number '%s': %s. Please resolve this issue, " "as it might result in errors." % (device.user.username, device.number, e)) class Migration(migrations.Migration): dependencies = [ ('two_factor', '0002_auto_20150110_0810'), ] operations = [ migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None), migrations.AlterField( model_name='phonedevice', name='number', field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'), ), ]
Migrate phone numbers to E.164 format
Migrate phone numbers to E.164 format
Python
mit
koleror/django-two-factor-auth,Bouke/django-two-factor-auth,koleror/django-two-factor-auth,Bouke/django-two-factor-auth
from __future__ import unicode_literals + import logging from django.db import models, migrations + import phonenumbers import two_factor.models + + logger = logging.getLogger(__name__) + + + def migrate_phone_numbers(apps, schema_editor): + PhoneDevice = apps.get_model("two_factor", "PhoneDevice") + for device in PhoneDevice.objects.all(): + try: + number = phonenumbers.parse(device.number) + if not phonenumbers.is_valid_number(number): + logger.info("User '%s' has an invalid phone number '%s'." % (device.user.username, device.number)) + device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164) + device.save() + except phonenumbers.NumberParseException as e: + # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere, + # so do log a warning. + logger.warning("User '%s' has an invalid phone number '%s': %s. Please resolve this issue, " + "as it might result in errors." % (device.user.username, device.number, e)) class Migration(migrations.Migration): dependencies = [ ('two_factor', '0002_auto_20150110_0810'), ] operations = [ + migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None), migrations.AlterField( model_name='phonedevice', name='number', field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'), ), ]
Migrate phone numbers to E.164 format
## Code Before: from __future__ import unicode_literals from django.db import models, migrations import two_factor.models class Migration(migrations.Migration): dependencies = [ ('two_factor', '0002_auto_20150110_0810'), ] operations = [ migrations.AlterField( model_name='phonedevice', name='number', field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'), ), ] ## Instruction: Migrate phone numbers to E.164 format ## Code After: from __future__ import unicode_literals import logging from django.db import models, migrations import phonenumbers import two_factor.models logger = logging.getLogger(__name__) def migrate_phone_numbers(apps, schema_editor): PhoneDevice = apps.get_model("two_factor", "PhoneDevice") for device in PhoneDevice.objects.all(): try: number = phonenumbers.parse(device.number) if not phonenumbers.is_valid_number(number): logger.info("User '%s' has an invalid phone number '%s'." % (device.user.username, device.number)) device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164) device.save() except phonenumbers.NumberParseException as e: # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere, # so do log a warning. logger.warning("User '%s' has an invalid phone number '%s': %s. Please resolve this issue, " "as it might result in errors." % (device.user.username, device.number, e)) class Migration(migrations.Migration): dependencies = [ ('two_factor', '0002_auto_20150110_0810'), ] operations = [ migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None), migrations.AlterField( model_name='phonedevice', name='number', field=two_factor.models.PhoneNumberField(max_length=16, verbose_name='number'), ), ]
... from __future__ import unicode_literals import logging from django.db import models, migrations import phonenumbers import two_factor.models logger = logging.getLogger(__name__) def migrate_phone_numbers(apps, schema_editor): PhoneDevice = apps.get_model("two_factor", "PhoneDevice") for device in PhoneDevice.objects.all(): try: number = phonenumbers.parse(device.number) if not phonenumbers.is_valid_number(number): logger.info("User '%s' has an invalid phone number '%s'." % (device.user.username, device.number)) device.number = phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.E164) device.save() except phonenumbers.NumberParseException as e: # Do not modify/delete the device, as it worked before. However this might result in issues elsewhere, # so do log a warning. logger.warning("User '%s' has an invalid phone number '%s': %s. Please resolve this issue, " "as it might result in errors." % (device.user.username, device.number, e)) class Migration(migrations.Migration): ... ] operations = [ migrations.RunPython(migrate_phone_numbers, reverse_code=lambda apps, schema_editor: None), migrations.AlterField( model_name='phonedevice', name='number', ...
5933f9ef0ff7af0fd85a7dbe6578eefe9b8f7cdf
seqcluster/create_report.py
seqcluster/create_report.py
import os import shutil import logging from bcbio import install install._set_matplotlib_default_backend() import matplotlib matplotlib.use('Agg', force=True) from libs.read import load_data from libs.report import make_profile from libs.utils import safe_dirs from db import make_database import templates logger = logging.getLogger('report') def report(args): """ Create report in html format """ logger.info("reading sequeces") data = load_data(args.json) out_dir = os.path.join(args.out, "html") safe_dirs(out_dir) logger.info("create profile") data = make_profile(data, out_dir, args) logger.info("create database") make_database(data, "seqcluster.db", args.out) path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__))) css_template = os.path.join(path_template, "info.css") js_template = os.path.join(path_template, "jquery.tablesorter.min.js") css = os.path.join(out_dir, "info.css") js = os.path.join(out_dir, "jquery.tablesorter.min.js") if not os.path.exists(css): shutil.copy(css_template, css) shutil.copy(js_template, js) logger.info("Done")
import os import shutil import logging from bcbio import install install._set_matplotlib_default_backend() import matplotlib matplotlib.use('Agg', force=True) from libs.read import load_data from libs.report import make_profile from libs.utils import safe_dirs from db import make_database import templates logger = logging.getLogger('report') def report(args): """ Create report in html format """ logger.info("reading sequeces") data = load_data(args.json) out_dir = os.path.join(args.out, "html") safe_dirs(out_dir) logger.info("create profile") data = make_profile(data, out_dir, args) logger.info("create database") make_database(data, "seqcluster.db", args.out) path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__))) css_template = os.path.join(path_template, "info.css") js_template = os.path.join(path_template, "jquery.tablesorter.min.js") css = os.path.join(out_dir, "info.css") js = os.path.join(out_dir, "jquery.tablesorter.min.js") if not os.path.exists(css): shutil.copy(css_template, css) shutil.copy(js_template, js) logger.info("Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.")
Add message with link to seqclusterViz
Add message with link to seqclusterViz
Python
mit
lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster
import os import shutil import logging from bcbio import install install._set_matplotlib_default_backend() import matplotlib matplotlib.use('Agg', force=True) from libs.read import load_data from libs.report import make_profile from libs.utils import safe_dirs from db import make_database import templates logger = logging.getLogger('report') def report(args): """ Create report in html format """ logger.info("reading sequeces") data = load_data(args.json) out_dir = os.path.join(args.out, "html") safe_dirs(out_dir) logger.info("create profile") data = make_profile(data, out_dir, args) logger.info("create database") make_database(data, "seqcluster.db", args.out) path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__))) css_template = os.path.join(path_template, "info.css") js_template = os.path.join(path_template, "jquery.tablesorter.min.js") css = os.path.join(out_dir, "info.css") js = os.path.join(out_dir, "jquery.tablesorter.min.js") if not os.path.exists(css): shutil.copy(css_template, css) shutil.copy(js_template, js) - logger.info("Done") + logger.info("Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.")
Add message with link to seqclusterViz
## Code Before: import os import shutil import logging from bcbio import install install._set_matplotlib_default_backend() import matplotlib matplotlib.use('Agg', force=True) from libs.read import load_data from libs.report import make_profile from libs.utils import safe_dirs from db import make_database import templates logger = logging.getLogger('report') def report(args): """ Create report in html format """ logger.info("reading sequeces") data = load_data(args.json) out_dir = os.path.join(args.out, "html") safe_dirs(out_dir) logger.info("create profile") data = make_profile(data, out_dir, args) logger.info("create database") make_database(data, "seqcluster.db", args.out) path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__))) css_template = os.path.join(path_template, "info.css") js_template = os.path.join(path_template, "jquery.tablesorter.min.js") css = os.path.join(out_dir, "info.css") js = os.path.join(out_dir, "jquery.tablesorter.min.js") if not os.path.exists(css): shutil.copy(css_template, css) shutil.copy(js_template, js) logger.info("Done") ## Instruction: Add message with link to seqclusterViz ## Code After: import os import shutil import logging from bcbio import install install._set_matplotlib_default_backend() import matplotlib matplotlib.use('Agg', force=True) from libs.read import load_data from libs.report import make_profile from libs.utils import safe_dirs from db import make_database import templates logger = logging.getLogger('report') def report(args): """ Create report in html format """ logger.info("reading sequeces") data = load_data(args.json) out_dir = os.path.join(args.out, "html") safe_dirs(out_dir) logger.info("create profile") data = make_profile(data, out_dir, args) logger.info("create database") make_database(data, "seqcluster.db", args.out) path_template = os.path.normpath(os.path.dirname(os.path.realpath(templates.__file__))) css_template = os.path.join(path_template, "info.css") js_template = os.path.join(path_template, "jquery.tablesorter.min.js") css = os.path.join(out_dir, "info.css") js = os.path.join(out_dir, "jquery.tablesorter.min.js") if not os.path.exists(css): shutil.copy(css_template, css) shutil.copy(js_template, js) logger.info("Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.")
... if not os.path.exists(css): shutil.copy(css_template, css) shutil.copy(js_template, js) logger.info("Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.") ...
715dcb62966b5c80544ed9eee79a6c69d3b9d927
blog/posts/models.py
blog/posts/models.py
from django.db import models class Post(models.Model): body = models.TextField() title = models.CharField(max_length=50) display_title = models.CharField(max_length=50) publication_date = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.title class Comment(models.Model): text = models.TextField() author = models.CharField(max_length=100) date = models.DateTimeField(auto_now_add=True) post = models.ForeignKey(Post) def __unicode__(self): return self.author + u"'s comment on " + self.post.__unicode__()
from django.db import models class Post(models.Model): body = models.TextField() title = models.CharField(max_length=50) publication_date = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.title class Comment(models.Model): text = models.TextField() author = models.CharField(max_length=100) date = models.DateTimeField(auto_now_add=True) post = models.ForeignKey(Post) def __unicode__(self): return self.author + u"'s comment on " + self.post.__unicode__()
Remove display_title field from Post model.
Remove display_title field from Post model. It wasn't being used anyway.
Python
mit
Lukasa/minimalog
from django.db import models class Post(models.Model): body = models.TextField() title = models.CharField(max_length=50) - display_title = models.CharField(max_length=50) publication_date = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.title class Comment(models.Model): text = models.TextField() author = models.CharField(max_length=100) date = models.DateTimeField(auto_now_add=True) post = models.ForeignKey(Post) def __unicode__(self): return self.author + u"'s comment on " + self.post.__unicode__()
Remove display_title field from Post model.
## Code Before: from django.db import models class Post(models.Model): body = models.TextField() title = models.CharField(max_length=50) display_title = models.CharField(max_length=50) publication_date = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.title class Comment(models.Model): text = models.TextField() author = models.CharField(max_length=100) date = models.DateTimeField(auto_now_add=True) post = models.ForeignKey(Post) def __unicode__(self): return self.author + u"'s comment on " + self.post.__unicode__() ## Instruction: Remove display_title field from Post model. ## Code After: from django.db import models class Post(models.Model): body = models.TextField() title = models.CharField(max_length=50) publication_date = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.title class Comment(models.Model): text = models.TextField() author = models.CharField(max_length=100) date = models.DateTimeField(auto_now_add=True) post = models.ForeignKey(Post) def __unicode__(self): return self.author + u"'s comment on " + self.post.__unicode__()
// ... existing code ... class Post(models.Model): body = models.TextField() title = models.CharField(max_length=50) publication_date = models.DateTimeField(auto_now_add=True) def __unicode__(self): // ... rest of the code ...
2adc021a520baa356c46ad1316893c1cd96f3147
knights/lexer.py
knights/lexer.py
from enum import Enum import re Token = Enum('Token', 'load comment text var block',) tag_re = re.compile( '|'.join([ r'{\!\s*(?P<load>.+?)\s*\!}', r'{%\s*(?P<tag>.+?)\s*%}', r'{{\s*(?P<var>.+?)\s*}}', r'{#\s*(?P<comment>.+?)\s*#}' ]), re.DOTALL ) def tokenise(template): '''A generator which yields (type, content) pairs''' upto = 0 # XXX Track line numbers and update nodes, so we can annotate the code for m in tag_re.finditer(template): start, end = m.span() if upto < start: yield (Token.text, template[upto:start]) upto = end load, tag, var, comment = m.groups() if load is not None: yield (Token.load, load) elif tag is not None: yield (Token.block, tag) elif var is not None: yield (Token.var, var) else: yield (Token.comment, comment) if upto < len(template): yield (Token.text, template[upto:])
from enum import Enum import re TokenType = Enum('Token', 'load comment text var block',) tag_re = re.compile( '|'.join([ r'{\!\s*(?P<load>.+?)\s*\!}', r'{%\s*(?P<tag>.+?)\s*%}', r'{{\s*(?P<var>.+?)\s*}}', r'{#\s*(?P<comment>.+?)\s*#}' ]), re.DOTALL ) class Token: def __init__(self, mode, token, lineno=None): self.mode = mode self.token = token self.lineno = lineno def tokenise(template): '''A generator which yields Token instances''' upto = 0 # XXX Track line numbers and update nodes, so we can annotate the code for m in tag_re.finditer(template): start, end = m.span() lineno = template.count('\n', 0, start) if upto < start: yield Token(TokenType.text, template[upto:start], lineno) upto = end load, tag, var, comment = m.groups() if load is not None: yield Token(TokenType.load, load, lineno) elif tag is not None: yield Token(TokenType.block, tag, lineno) elif var is not None: yield Token(TokenType.var, var, lineno) else: yield Token(TokenType.comment, comment, lineno) if upto < len(template): yield Token(TokenType.text, template[upto:], lineno)
Rework Lexer to use Token object
Rework Lexer to use Token object
Python
mit
funkybob/knights-templater,funkybob/knights-templater
from enum import Enum import re - Token = Enum('Token', 'load comment text var block',) + TokenType = Enum('Token', 'load comment text var block',) + tag_re = re.compile( '|'.join([ r'{\!\s*(?P<load>.+?)\s*\!}', r'{%\s*(?P<tag>.+?)\s*%}', r'{{\s*(?P<var>.+?)\s*}}', r'{#\s*(?P<comment>.+?)\s*#}' ]), re.DOTALL ) + class Token: + def __init__(self, mode, token, lineno=None): + self.mode = mode + self.token = token + self.lineno = lineno + + def tokenise(template): - '''A generator which yields (type, content) pairs''' + '''A generator which yields Token instances''' upto = 0 # XXX Track line numbers and update nodes, so we can annotate the code for m in tag_re.finditer(template): start, end = m.span() + lineno = template.count('\n', 0, start) if upto < start: - yield (Token.text, template[upto:start]) + yield Token(TokenType.text, template[upto:start], lineno) upto = end load, tag, var, comment = m.groups() if load is not None: - yield (Token.load, load) + yield Token(TokenType.load, load, lineno) elif tag is not None: - yield (Token.block, tag) + yield Token(TokenType.block, tag, lineno) elif var is not None: - yield (Token.var, var) + yield Token(TokenType.var, var, lineno) else: - yield (Token.comment, comment) + yield Token(TokenType.comment, comment, lineno) if upto < len(template): - yield (Token.text, template[upto:]) + yield Token(TokenType.text, template[upto:], lineno)
Rework Lexer to use Token object
## Code Before: from enum import Enum import re Token = Enum('Token', 'load comment text var block',) tag_re = re.compile( '|'.join([ r'{\!\s*(?P<load>.+?)\s*\!}', r'{%\s*(?P<tag>.+?)\s*%}', r'{{\s*(?P<var>.+?)\s*}}', r'{#\s*(?P<comment>.+?)\s*#}' ]), re.DOTALL ) def tokenise(template): '''A generator which yields (type, content) pairs''' upto = 0 # XXX Track line numbers and update nodes, so we can annotate the code for m in tag_re.finditer(template): start, end = m.span() if upto < start: yield (Token.text, template[upto:start]) upto = end load, tag, var, comment = m.groups() if load is not None: yield (Token.load, load) elif tag is not None: yield (Token.block, tag) elif var is not None: yield (Token.var, var) else: yield (Token.comment, comment) if upto < len(template): yield (Token.text, template[upto:]) ## Instruction: Rework Lexer to use Token object ## Code After: from enum import Enum import re TokenType = Enum('Token', 'load comment text var block',) tag_re = re.compile( '|'.join([ r'{\!\s*(?P<load>.+?)\s*\!}', r'{%\s*(?P<tag>.+?)\s*%}', r'{{\s*(?P<var>.+?)\s*}}', r'{#\s*(?P<comment>.+?)\s*#}' ]), re.DOTALL ) class Token: def __init__(self, mode, token, lineno=None): self.mode = mode self.token = token self.lineno = lineno def tokenise(template): '''A generator which yields Token instances''' upto = 0 # XXX Track line numbers and update nodes, so we can annotate the code for m in tag_re.finditer(template): start, end = m.span() lineno = template.count('\n', 0, start) if upto < start: yield Token(TokenType.text, template[upto:start], lineno) upto = end load, tag, var, comment = m.groups() if load is not None: yield Token(TokenType.load, load, lineno) elif tag is not None: yield Token(TokenType.block, tag, lineno) elif var is not None: yield Token(TokenType.var, var, lineno) else: yield Token(TokenType.comment, comment, lineno) if upto < len(template): yield Token(TokenType.text, template[upto:], lineno)
// ... existing code ... from enum import Enum import re TokenType = Enum('Token', 'load comment text var block',) tag_re = re.compile( '|'.join([ // ... modified code ... ) class Token: def __init__(self, mode, token, lineno=None): self.mode = mode self.token = token self.lineno = lineno def tokenise(template): '''A generator which yields Token instances''' upto = 0 # XXX Track line numbers and update nodes, so we can annotate the code for m in tag_re.finditer(template): start, end = m.span() lineno = template.count('\n', 0, start) if upto < start: yield Token(TokenType.text, template[upto:start], lineno) upto = end load, tag, var, comment = m.groups() if load is not None: yield Token(TokenType.load, load, lineno) elif tag is not None: yield Token(TokenType.block, tag, lineno) elif var is not None: yield Token(TokenType.var, var, lineno) else: yield Token(TokenType.comment, comment, lineno) if upto < len(template): yield Token(TokenType.text, template[upto:], lineno) // ... rest of the code ...
7b939076fba1bb11d0ded504bcf10da457b3d092
scripts/add_identifiers_to_existing_preprints.py
scripts/add_identifiers_to_existing_preprints.py
import logging import time from website.app import init_app from website.identifiers.utils import get_top_level_domain, request_identifiers_from_ezid logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def add_identifiers_to_preprints(): from osf.models import PreprintService preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True) logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count())) for preprint in preprints_without_identifiers: logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name)) ezid_response = request_identifiers_from_ezid(preprint) preprint.set_preprint_identifiers(ezid_response) preprint.save() doi = preprint.get_identifier('doi') subdomain = get_top_level_domain(preprint.provider.external_url) assert subdomain.upper() in doi.value assert preprint._id.upper() in doi.value logger.info('Created DOI {} for Preprint from service {}'.format(doi.value, preprint.provider.name)) time.sleep(1) logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count())) if __name__ == '__main__': init_app(routes=False) add_identifiers_to_preprints()
import logging import time from website.app import init_app from website.identifiers.utils import request_identifiers_from_ezid logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def add_identifiers_to_preprints(): from osf.models import PreprintService preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True) logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count())) for preprint in preprints_without_identifiers: logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name)) ezid_response = request_identifiers_from_ezid(preprint) preprint.set_preprint_identifiers(ezid_response) preprint.save() doi = preprint.get_identifier('doi') assert preprint._id.upper() in doi.value logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name)) time.sleep(1) logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count())) if __name__ == '__main__': init_app(routes=False) add_identifiers_to_preprints()
Remove check for domain in DOI
Remove check for domain in DOI
Python
apache-2.0
mattclark/osf.io,crcresearch/osf.io,aaxelb/osf.io,saradbowman/osf.io,adlius/osf.io,mattclark/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,sloria/osf.io,mfraezz/osf.io,chrisseto/osf.io,pattisdr/osf.io,sloria/osf.io,adlius/osf.io,felliott/osf.io,cslzchen/osf.io,laurenrevere/osf.io,crcresearch/osf.io,saradbowman/osf.io,sloria/osf.io,felliott/osf.io,binoculars/osf.io,caneruguz/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,crcresearch/osf.io,adlius/osf.io,erinspace/osf.io,aaxelb/osf.io,baylee-d/osf.io,icereval/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,icereval/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,cwisecarver/osf.io,caneruguz/osf.io,cslzchen/osf.io,mfraezz/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,felliott/osf.io,TomBaxter/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,binoculars/osf.io,chennan47/osf.io,erinspace/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,caneruguz/osf.io,chrisseto/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,binoculars/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,erinspace/osf.io,mattclark/osf.io,baylee-d/osf.io,chrisseto/osf.io,laurenrevere/osf.io,cwisecarver/osf.io,felliott/osf.io,cslzchen/osf.io,leb2dg/osf.io
import logging import time from website.app import init_app - from website.identifiers.utils import get_top_level_domain, request_identifiers_from_ezid + from website.identifiers.utils import request_identifiers_from_ezid logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def add_identifiers_to_preprints(): from osf.models import PreprintService preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True) logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count())) for preprint in preprints_without_identifiers: logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name)) ezid_response = request_identifiers_from_ezid(preprint) preprint.set_preprint_identifiers(ezid_response) preprint.save() doi = preprint.get_identifier('doi') - subdomain = get_top_level_domain(preprint.provider.external_url) - assert subdomain.upper() in doi.value assert preprint._id.upper() in doi.value - logger.info('Created DOI {} for Preprint from service {}'.format(doi.value, preprint.provider.name)) + logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name)) time.sleep(1) logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count())) if __name__ == '__main__': init_app(routes=False) add_identifiers_to_preprints()
Remove check for domain in DOI
## Code Before: import logging import time from website.app import init_app from website.identifiers.utils import get_top_level_domain, request_identifiers_from_ezid logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def add_identifiers_to_preprints(): from osf.models import PreprintService preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True) logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count())) for preprint in preprints_without_identifiers: logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name)) ezid_response = request_identifiers_from_ezid(preprint) preprint.set_preprint_identifiers(ezid_response) preprint.save() doi = preprint.get_identifier('doi') subdomain = get_top_level_domain(preprint.provider.external_url) assert subdomain.upper() in doi.value assert preprint._id.upper() in doi.value logger.info('Created DOI {} for Preprint from service {}'.format(doi.value, preprint.provider.name)) time.sleep(1) logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count())) if __name__ == '__main__': init_app(routes=False) add_identifiers_to_preprints() ## Instruction: Remove check for domain in DOI ## Code After: import logging import time from website.app import init_app from website.identifiers.utils import request_identifiers_from_ezid logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) def add_identifiers_to_preprints(): from osf.models import PreprintService preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True) logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count())) for preprint in preprints_without_identifiers: logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name)) ezid_response = request_identifiers_from_ezid(preprint) preprint.set_preprint_identifiers(ezid_response) preprint.save() doi = preprint.get_identifier('doi') assert preprint._id.upper() in doi.value logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name)) time.sleep(1) logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count())) if __name__ == '__main__': init_app(routes=False) add_identifiers_to_preprints()
# ... existing code ... import time from website.app import init_app from website.identifiers.utils import request_identifiers_from_ezid logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) # ... modified code ... preprint.save() doi = preprint.get_identifier('doi') assert preprint._id.upper() in doi.value logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name)) time.sleep(1) logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count())) # ... rest of the code ...
3d91950735d8b42e030f6f479a32369804e90ac0
gaphas/picklers.py
gaphas/picklers.py
import copyreg import types import cairo from future import standard_library standard_library.install_aliases() # Allow instancemethod to be pickled: def construct_instancemethod(funcname, self, clazz): func = getattr(clazz, funcname) return types.MethodType(func, self) def reduce_instancemethod(im): return ( construct_instancemethod, (im.__func__.__name__, im.__self__, im.__self__.__class__), ) copyreg.pickle(types.MethodType, reduce_instancemethod, construct_instancemethod) # Allow cairo.Matrix to be pickled: def construct_cairo_matrix(*args): return cairo.Matrix(*args) def reduce_cairo_matrix(m): return construct_cairo_matrix, tuple(m) copyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)
import copyreg import types import cairo from future import standard_library standard_library.install_aliases() # Allow cairo.Matrix to be pickled: def construct_cairo_matrix(*args): return cairo.Matrix(*args) def reduce_cairo_matrix(m): return construct_cairo_matrix, tuple(m) copyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)
Remove ununsed pickle code for instance methods
Remove ununsed pickle code for instance methods
Python
lgpl-2.1
amolenaar/gaphas
import copyreg import types import cairo from future import standard_library standard_library.install_aliases() - - - # Allow instancemethod to be pickled: - def construct_instancemethod(funcname, self, clazz): - func = getattr(clazz, funcname) - return types.MethodType(func, self) - - - def reduce_instancemethod(im): - return ( - construct_instancemethod, - (im.__func__.__name__, im.__self__, im.__self__.__class__), - ) - - - copyreg.pickle(types.MethodType, reduce_instancemethod, construct_instancemethod) - # Allow cairo.Matrix to be pickled: def construct_cairo_matrix(*args): return cairo.Matrix(*args) def reduce_cairo_matrix(m): return construct_cairo_matrix, tuple(m) copyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)
Remove ununsed pickle code for instance methods
## Code Before: import copyreg import types import cairo from future import standard_library standard_library.install_aliases() # Allow instancemethod to be pickled: def construct_instancemethod(funcname, self, clazz): func = getattr(clazz, funcname) return types.MethodType(func, self) def reduce_instancemethod(im): return ( construct_instancemethod, (im.__func__.__name__, im.__self__, im.__self__.__class__), ) copyreg.pickle(types.MethodType, reduce_instancemethod, construct_instancemethod) # Allow cairo.Matrix to be pickled: def construct_cairo_matrix(*args): return cairo.Matrix(*args) def reduce_cairo_matrix(m): return construct_cairo_matrix, tuple(m) copyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix) ## Instruction: Remove ununsed pickle code for instance methods ## Code After: import copyreg import types import cairo from future import standard_library standard_library.install_aliases() # Allow cairo.Matrix to be pickled: def construct_cairo_matrix(*args): return cairo.Matrix(*args) def reduce_cairo_matrix(m): return construct_cairo_matrix, tuple(m) copyreg.pickle(cairo.Matrix, reduce_cairo_matrix, construct_cairo_matrix)
// ... existing code ... from future import standard_library standard_library.install_aliases() # Allow cairo.Matrix to be pickled: def construct_cairo_matrix(*args): // ... rest of the code ...
f275c8cc020119b52ed01bc6b56946279853d854
src/mmw/apps/bigcz/clients/cuahsi/details.py
src/mmw/apps/bigcz/clients/cuahsi/details.py
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from datetime import date, timedelta from rest_framework.exceptions import ValidationError DATE_FORMAT = '%m/%d/%Y' def details(wsdl, site): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_site_info(wsdl, site) def values(wsdl, site, variable, from_date=None, to_date=None): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not variable: raise ValidationError({ 'error': 'Required argument: variable'}) if not to_date: # Set to default value of today to_date = date.today().strftime(DATE_FORMAT) if not from_date: # Set to default value of one week ago from_date = (date.today() - timedelta(days=7)).strftime(DATE_FORMAT) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_values(wsdl, site, variable, from_date, to_date)
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from datetime import date, timedelta from rest_framework.exceptions import ValidationError DATE_FORMAT = '%m/%d/%Y' def details(wsdl, site): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_site_info(wsdl, site, None) def values(wsdl, site, variable, from_date=None, to_date=None): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not variable: raise ValidationError({ 'error': 'Required argument: variable'}) if not to_date: # Set to default value of today to_date = date.today().strftime(DATE_FORMAT) if not from_date: # Set to default value of one week ago from_date = (date.today() - timedelta(days=7)).strftime(DATE_FORMAT) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_values(wsdl, site, variable, from_date, to_date, None)
Stop ulmo caching for suds-jurko compliance
Stop ulmo caching for suds-jurko compliance Previously we were using ulmo with suds-jurko 0.6, which is the current latest release, but it is 4 years old. Most recent work on suds-jurko has been done on the development branch, including optimizations to memory use (which we need). Unfortunately, the development branch also includes some breaking changes, including one which "cleans up" the caching module: https://bitbucket.org/jurko/suds/commits/6b24afe3206fc648605cc8d19f7c58c605d9df5f?at=default This change renames .setduration() to .__set_duration(), which is called by ulmo here: https://github.com/emiliom/ulmo/blob/90dbfe31f38a72ea4cee9a52e572cfa8f8484adc/ulmo/cuahsi/wof/core.py#L290 By explicitly setting the caching to None, we ensure that line isn't executed and those errors don't crop up. The performance improvements we get from using the development branch of suds-jurko outweigh the benefits of caching for one day, since it is unlikely we will be accessing the exact same content repeatedly.
Python
apache-2.0
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
from __future__ import print_function from __future__ import unicode_literals from __future__ import division from datetime import date, timedelta from rest_framework.exceptions import ValidationError DATE_FORMAT = '%m/%d/%Y' def details(wsdl, site): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof - return wof.get_site_info(wsdl, site) + return wof.get_site_info(wsdl, site, None) def values(wsdl, site, variable, from_date=None, to_date=None): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not variable: raise ValidationError({ 'error': 'Required argument: variable'}) if not to_date: # Set to default value of today to_date = date.today().strftime(DATE_FORMAT) if not from_date: # Set to default value of one week ago from_date = (date.today() - timedelta(days=7)).strftime(DATE_FORMAT) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof - return wof.get_values(wsdl, site, variable, from_date, to_date) + return wof.get_values(wsdl, site, variable, from_date, to_date, None)
Stop ulmo caching for suds-jurko compliance
## Code Before: from __future__ import print_function from __future__ import unicode_literals from __future__ import division from datetime import date, timedelta from rest_framework.exceptions import ValidationError DATE_FORMAT = '%m/%d/%Y' def details(wsdl, site): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_site_info(wsdl, site) def values(wsdl, site, variable, from_date=None, to_date=None): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not variable: raise ValidationError({ 'error': 'Required argument: variable'}) if not to_date: # Set to default value of today to_date = date.today().strftime(DATE_FORMAT) if not from_date: # Set to default value of one week ago from_date = (date.today() - timedelta(days=7)).strftime(DATE_FORMAT) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_values(wsdl, site, variable, from_date, to_date) ## Instruction: Stop ulmo caching for suds-jurko compliance ## Code After: from __future__ import print_function from __future__ import unicode_literals from __future__ import division from datetime import date, timedelta from rest_framework.exceptions import ValidationError DATE_FORMAT = '%m/%d/%Y' def details(wsdl, site): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_site_info(wsdl, site, None) def values(wsdl, site, variable, from_date=None, to_date=None): if not wsdl: raise ValidationError({ 'error': 'Required argument: wsdl'}) if not site: raise ValidationError({ 'error': 'Required argument: site'}) if not variable: raise ValidationError({ 'error': 'Required argument: variable'}) if not to_date: # Set to default value of today to_date = date.today().strftime(DATE_FORMAT) if not from_date: # Set to default value of one week ago from_date = (date.today() - timedelta(days=7)).strftime(DATE_FORMAT) if not wsdl.upper().endswith('?WSDL'): wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_values(wsdl, site, variable, from_date, to_date, None)
// ... existing code ... wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_site_info(wsdl, site, None) def values(wsdl, site, variable, from_date=None, to_date=None): // ... modified code ... wsdl += '?WSDL' from ulmo.cuahsi import wof return wof.get_values(wsdl, site, variable, from_date, to_date, None) // ... rest of the code ...
f1c09bc9969cf9d66179baef80b5cbb3d28d5596
app/report/views.py
app/report/views.py
from flask import render_template from app import app @app.route('/') def index(): return render_template('index.html') @app.route('/report/<path:repository>') def report(): pass
from flask import flash, g, redirect, render_template, request, url_for from app import app from vcs.repository import is_valid_github_repository, parse_url_and_get_repo @app.route('/') def index(): return render_template('index.html') @app.route('/about') def about(): return render_template('about.html') @app.route('/check', methods=['POST']) def check(): url = request.form['url'] if not is_valid_github_repository(url): flash('Given repository url is not valid') return redirect(url_for('index')) return redirect(url_for('report', repo_url=url)) @app.route('/report/<path:repo_url>', methods=['GET']) def report(repo_url): repo = parse_url_and_get_repo(repo_url) if repo is None: flash('Given repository does not exists') return redirect(url_for('index')) results = {} # Analysis processing return render_template('report/results.html', results=results)
Create default behaviour for all routers
Create default behaviour for all routers
Python
mit
mingrammer/pyreportcard,mingrammer/pyreportcard
- from flask import render_template + from flask import flash, g, redirect, render_template, request, url_for from app import app - + from vcs.repository import is_valid_github_repository, parse_url_and_get_repo @app.route('/') def index(): return render_template('index.html') - @app.route('/report/<path:repository>') - def report(): - pass + @app.route('/about') + def about(): + return render_template('about.html') + + @app.route('/check', methods=['POST']) + def check(): + url = request.form['url'] + if not is_valid_github_repository(url): + flash('Given repository url is not valid') + return redirect(url_for('index')) + return redirect(url_for('report', repo_url=url)) + + + @app.route('/report/<path:repo_url>', methods=['GET']) + def report(repo_url): + repo = parse_url_and_get_repo(repo_url) + if repo is None: + flash('Given repository does not exists') + return redirect(url_for('index')) + results = {} + # Analysis processing + return render_template('report/results.html', results=results)
Create default behaviour for all routers
## Code Before: from flask import render_template from app import app @app.route('/') def index(): return render_template('index.html') @app.route('/report/<path:repository>') def report(): pass ## Instruction: Create default behaviour for all routers ## Code After: from flask import flash, g, redirect, render_template, request, url_for from app import app from vcs.repository import is_valid_github_repository, parse_url_and_get_repo @app.route('/') def index(): return render_template('index.html') @app.route('/about') def about(): return render_template('about.html') @app.route('/check', methods=['POST']) def check(): url = request.form['url'] if not is_valid_github_repository(url): flash('Given repository url is not valid') return redirect(url_for('index')) return redirect(url_for('report', repo_url=url)) @app.route('/report/<path:repo_url>', methods=['GET']) def report(repo_url): repo = parse_url_and_get_repo(repo_url) if repo is None: flash('Given repository does not exists') return redirect(url_for('index')) results = {} # Analysis processing return render_template('report/results.html', results=results)
# ... existing code ... from flask import flash, g, redirect, render_template, request, url_for from app import app from vcs.repository import is_valid_github_repository, parse_url_and_get_repo @app.route('/') def index(): # ... modified code ... return render_template('index.html') @app.route('/about') def about(): return render_template('about.html') @app.route('/check', methods=['POST']) def check(): url = request.form['url'] if not is_valid_github_repository(url): flash('Given repository url is not valid') return redirect(url_for('index')) return redirect(url_for('report', repo_url=url)) @app.route('/report/<path:repo_url>', methods=['GET']) def report(repo_url): repo = parse_url_and_get_repo(repo_url) if repo is None: flash('Given repository does not exists') return redirect(url_for('index')) results = {} # Analysis processing return render_template('report/results.html', results=results) # ... rest of the code ...