{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \\n \\n\\n'\n \teq_(page_html, actual_html)\n \n def test_get_html_404():\n-\timport requests\n-\tr=requests.get('http://ananth.co.in/test404.html')\n-\teq_(r.status_code, 404)\n+\tfrom image_scraper.utils import get_html\n+\tfrom image_scraper.exceptions import PageLoadError\n+\ttry:\n+\t\tpage_html, url=get_html('http://ananth.co.in/test404.html', False)\n+\texcept PageLoadError as e:\n+\t\teq_(e.status_code, 404)\n+\teq_(1,2) #Fails if page loads.\n\\ No newline at end of file"},"message":{"kind":"string","value":"Added test for get_html."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2043,"cells":{"diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex .. 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@ deps = {\n 'eth': [\n \"cached-property>=1.5.1,<2\",\n \"eth-bloom>=1.0.3,<2.0.0\",\n- \"eth-keys>=0.2.1,<1.0.0\",\n+ \"eth-keys>=0.2.1,<0.3.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.5.2,<2.0.0\",\n \"lru-dict>=1.1.6\","},"message":{"kind":"string","value":"eth-keys shouldn't be allowed to minor bump It's causing dependency conflicts with other libraries that are requiring eth-keys < -- since semver allows breaking changes in minor releases in a v0 major release, it makes more sense to constrain at the minor version here."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2044,"cells":{"diff":{"kind":"string","value":"diff --git a/zipline/finance/risk.py b/zipline/finance/risk.py\nindex .. 100644\n--- a/zipline/finance/risk.py\n+++ b/zipline/finance/risk.py\n@@ -615,10 +615,10 @@ class RiskMetricsIterative(RiskMetricsBase):\n \n def update(self, dt, algorithm_returns, benchmark_returns):\n self.algorithm_returns_cont[dt] = algorithm_returns\n- self.algorithm_returns = self.algorithm_returns_cont[:dt]\n+ self.algorithm_returns = self.algorithm_returns_cont.valid()\n \n self.benchmark_returns_cont[dt] = benchmark_returns\n- self.benchmark_returns = self.benchmark_returns_cont[:dt]\n+ self.benchmark_returns = self.benchmark_returns_cont.valid()\n \n self.num_trading_days = len(self.algorithm_returns)"},"message":{"kind":"string","value":"MAINT: Revert slice into returns containers instead of using .valid() Backing out slice vs. valid(), because of an incompatiblity with starting a minutely emitted session mid-day, since the midday start date is not yet wired through SimulationParameters."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2045,"cells":{"diff":{"kind":"string","value":"diff --git a/utils/json2csv.py b/utils/json2csv.py\nindex .. 100755\n--- a/utils/json2csv.py\n+++ b/utils/json2csv.py\n@@ -52,7 +52,6 @@ def get_headings():\n 'user_listed_count',\n 'user_location',\n 'user_name',\n- 'user_screen_name',\n 'user_statuses_count',\n 'user_time_zone',\n 'user_urls',\n@@ -92,7 +91,6 @@ def get_row(t):\n user('listed_count'),\n user('location'),\n user('name'),\n- user('screen_name'),\n user('statuses_count'),\n user('time_zone'),\n user_urls(t),"},"message":{"kind":"string","value":"remove duplicate user_screen_name The get_row() and get_heading functions had \"user_screen_name\" variable twice, which can cause problems when importing csv to pandas later."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2046,"cells":{"diff":{"kind":"string","value":"diff --git a/eve_docs/config.py b/eve_docs/config.py\nindex .. 100644\n--- a/eve_docs/config.py\n+++ b/eve_docs/config.py\n@@ -1,7 +1,7 @@\n from flask import current_app as capp\n from eve.utils import home_link\n from .labels import LABELS\n-\n+import re\n \n def get_cfg():\n cfg = {}\n@@ -67,12 +67,13 @@ def schema(resource, field=None):\n \n def paths(domain, resource):\n ret = {}\n- path = '/{0}'.format(domain)\n+ path = '/{0}'.format(resource.get('url', domain))\n+ path = re.sub(r'<(?:[^>]+:)?([^>]+)>', '{\\\\1}', path)\n pathtype = 'resource'\n ret[path] = methods(domain, resource, pathtype)\n \n primary = identifier(resource)\n- path = '/{0}/{1}'.format(domain, pathparam(primary['name']))\n+ path = '{0}/{1}'.format(path, pathparam(primary['name']))\n pathtype = 'item'\n ret[path] = methods(domain, resource, pathtype)"},"message":{"kind":"string","value":"Add support for sub-resources"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2047,"cells":{"diff":{"kind":"string","value":"diff --git a/astroid/brain/brain_six.py b/astroid/brain/brain_six.py\nindex .. 100644\n--- a/astroid/brain/brain_six.py\n+++ b/astroid/brain/brain_six.py\n@@ -51,7 +51,7 @@ from sys import intern\n map = map\n range = range\n from importlib import reload\n-reload_module = reload\n+reload_module = lambda module: reload(module)\n from functools import reduce\n from shlex import quote as shlex_quote\n from io import StringIO"},"message":{"kind":"string","value":"Transform read_module() into a lambda to prevent it being marked as a bound method"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2048,"cells":{"diff":{"kind":"string","value":"diff --git a/irc/tests/test_client.py b/irc/tests/test_client.py\nindex .. 100644\n--- a/irc/tests/test_client.py\n+++ b/irc/tests/test_client.py\n@@ -55,15 +55,16 @@ class TestThrottler(object):\n \t\twhile time.time() < deadline:\n \t\t\tlimited_next(counter)\n \t\t# ensure the counter was advanced about 30 times\n-\t\tlast_count = next(counter)\n-\t\tassert 29 <= last_count <= 31\n-\t\t# ensure that another burst of calls will also get throttled\n-\t\tlast_count += 1\n+\t\tassert 29 <= next(counter) <= 31\n+\n+\t\t# ensure that another burst of calls after some idle period will also\n+\t\t# get throttled\n \t\ttime.sleep(1)\n \t\tdeadline = time.time() + 1\n+\t\tcounter = itertools.count()\n \t\twhile time.time() < deadline:\n \t\t\tlimited_next(counter)\n-\t\tassert 29 <= next(counter) - last_count <= 31\n+\t\tassert 29 <= next(counter) <= 31\n \n \tdef test_reconstruct_unwraps(self):\n \t\t\"\"\""},"message":{"kind":"string","value":"Reset the counter in the test for matching technique"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2049,"cells":{"diff":{"kind":"string","value":"diff --git a/cmd2/parsing.py b/cmd2/parsing.py\nindex .. 100644\n--- a/cmd2/parsing.py\n+++ b/cmd2/parsing.py\n@@ -57,21 +57,20 @@ class Statement(str):\n :type output_to: str or None\n \n \"\"\"\n- def __new__(\n- cls,\n- obj: object,\n- *,\n- raw: str = None,\n- command: str = None,\n- args: str = None,\n- argv: List[str] = None,\n- multiline_command: str = None,\n- terminator: str = None,\n- suffix: str = None,\n- pipe_to: str = None,\n- output: str = None,\n- output_to:str = None,\n- ):\n+ def __new__(cls,\n+ obj: object,\n+ *,\n+ raw: str = None,\n+ command: str = None,\n+ args: str = None,\n+ argv: List[str] = None,\n+ multiline_command: str = None,\n+ terminator: str = None,\n+ suffix: str = None,\n+ pipe_to: str = None,\n+ output: str = None,\n+ output_to:str = None\n+ ):\n \"\"\"Create a new instance of Statement\n \n We must override __new__ because we are subclassing `str` which is"},"message":{"kind":"string","value":"Fixed issue with parsing.py on Python and Apparently versions of Python prior to don't allow a comma after the last argument being passed to a method."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2050,"cells":{"diff":{"kind":"string","value":"diff --git a/sos/jupyter/sos_step.py b/sos/jupyter/sos_step.py\nindex .. 100755\n--- a/sos/jupyter/sos_step.py\n+++ b/sos/jupyter/sos_step.py\n@@ -59,7 +59,7 @@ class Interactive_Step_Executor(Step_Executor):\n return host.retrieve_results(tasks)\n # no pending\n elif not env.config['wait_for_task']:\n- raise PendingTasks([x for x,y in zip(tasks, res) if y in ('pending', 'submitted', 'running')])\n+ raise PendingTasks([(queue, x) for x,y in zip(tasks, res) if y in ('pending', 'submitted', 'running')])\n time.sleep(1)"},"message":{"kind":"string","value":"Fix jupyter notebook for recent change"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2051,"cells":{"diff":{"kind":"string","value":"diff --git a/pyemma/coordinates/io/featurizer.py b/pyemma/coordinates/io/featurizer.py\nindex .. 100644\n--- a/pyemma/coordinates/io/featurizer.py\n+++ b/pyemma/coordinates/io/featurizer.py\n@@ -122,14 +122,14 @@ class BackboneTorsionFeature:\n self.dim = len(self._phi_inds) + len(self._psi_inds)\n \n def describe(self):\n- labels = []\n- for ires in self._phi_inds:\n- labels.append(\"PHI: %s %i\" %\n- (self.topology.residue(ires).name, ires))\n- for ires in self._psi_inds:\n- labels.append(\"PSI: %s %i\" %\n- (self.topology.residue(ires).name, ires))\n- return labels\n+ top = self.topology\n+ labels_phi = [\"PHI %s %i\" % (top.atom(ires[0]).residue.name, ires[0])\n+ for ires in self._phi_inds]\n+\n+ labels_psi = [\"PHI %s %i\" % (top.atom(ires[0]).residue.name, ires[0])\n+ for ires in self._psi_inds]\n+\n+ return labels_phi + labels_psi\n \n def map(self, traj):\n y1 = compute_dihedrals(traj, self._phi_inds).astype(np.float32)"},"message":{"kind":"string","value":"[featurizer] fix describe in backbone torsions: show only first residue index/name."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2052,"cells":{"diff":{"kind":"string","value":"diff --git a/netmiko/base_connection.py b/netmiko/base_connection.py\nindex .. 100644\n--- a/netmiko/base_connection.py\n+++ b/netmiko/base_connection.py\n@@ -78,7 +78,7 @@ class BaseSSHConnection(object):\n try:\n self.remote_conn_pre.connect(hostname=self.ip, port=self.port,\n username=self.username, password=self.password,\n- look_for_keys=False, timeout=timeout)\n+ look_for_keys=False, allow_agent=False, timeout=timeout)\n except socket.error as e:\n msg = \"Connection to device timed-out: {device_type} {ip}:{port}\".format(\n device_type=self.device_type, ip=self.ip, port=self.port)"},"message":{"kind":"string","value":"Set allow_agent=False for the client connection Authentication on Cisco IOS fails when using the ssh agent."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2053,"cells":{"diff":{"kind":"string","value":"diff --git a/python-package/lightgbm/engine.py b/python-package/lightgbm/engine.py\nindex .. 100644\n--- a/python-package/lightgbm/engine.py\n+++ b/python-package/lightgbm/engine.py\n@@ -188,7 +188,7 @@ def train(params, train_set, num_boost_round=100,\n train_data_name = valid_names[i]\n continue\n if not isinstance(valid_data, Dataset):\n- raise TypeError(\"Traninig only accepts Dataset object\")\n+ raise TypeError(\"Training only accepts Dataset object\")\n reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))\n if valid_names is not None and len(valid_names) > i:\n name_valid_sets.append(valid_names[i])\n@@ -488,7 +488,7 @@ def cv(params, train_set, num_boost_round=100,\n ...}.\n \"\"\"\n if not isinstance(train_set, Dataset):\n- raise TypeError(\"Traninig only accepts Dataset object\")\n+ raise TypeError(\"Training only accepts Dataset object\")\n \n params = copy.deepcopy(params)\n if fobj is not None:"},"message":{"kind":"string","value":"[Python] Fix typo in engine.py (#) Replace \"Traninig\" with \"Training\""},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2054,"cells":{"diff":{"kind":"string","value":"diff --git a/epab/cli.py b/epab/cli.py\nindex .. 100644\n--- a/epab/cli.py\n+++ b/epab/cli.py\n@@ -18,7 +18,7 @@ import epab.utils\n from epab import __version__\n \n with open('epab.yml') as config_file:\n- CONFIG = yaml.load(config_file)\n+ CONFIG = yaml.safe_load(config_file)\n \n \n def _install_pyinstaller(ctx: click.Context, force: bool = False):"},"message":{"kind":"string","value":"fix: fix unsafe YAML loading"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2055,"cells":{"diff":{"kind":"string","value":"diff --git a/pyqode/core/__init__.py b/pyqode/core/__init__.py\nindex .. 100644\n--- a/pyqode/core/__init__.py\n+++ b/pyqode/core/__init__.py\n@@ -78,7 +78,7 @@ def getRcDirectory():\n \"rc\")\n \n # import the core rc modules\n-if os.environ[\"QT_API\"] == \"PyQt4\":\n+if os.environ[\"QT_API\"] == \"PyQt\":\n from pyqode.core.ui import pyqode_icons_pyqt_rc\n else:\n from pyqode.core.ui import pyqode_icons_pyside_rc"},"message":{"kind":"string","value":"Fix issues on linux (due to previous commits on windows)"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2056,"cells":{"diff":{"kind":"string","value":"diff --git a/asset/synth.py b/asset/synth.py\nindex .. 100644\n--- a/asset/synth.py\n+++ b/asset/synth.py\n@@ -29,15 +29,13 @@ excludes = [\"setup.py\", \"nox*.py\", \"README.rst\", \"docs/conf.py\", \"docs/index.rst\n for version in versions:\n if version == \"v1p1beta1\":\n config_path = \"/google/cloud/asset/v1p1beta1/artman_cloudasset_v1p1beta1.yaml\"\n- artman_output_name = f\"cloudasset-{version}\"\n else:\n config_path = f\"/google/cloud/asset/artman_cloudasset_{version}.yaml\"\n- artman_output_name=f\"asset-{version}\"\n library = gapic.py_library(\n \"asset\",\n version,\n config_path=config_path,\n- artman_output_name=artman_output_name,\n+ artman_output_name=f\"asset-{version}\",\n include_protos=True,\n )"},"message":{"kind":"string","value":"fix(asset): correct asset synthfile (#)"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2057,"cells":{"diff":{"kind":"string","value":"diff --git a/tests/calculators/hazard/event_based/core_test.py b/tests/calculators/hazard/event_based/core_test.py\nindex .. 100644\n--- a/tests/calculators/hazard/event_based/core_test.py\n+++ b/tests/calculators/hazard/event_based/core_test.py\n@@ -253,7 +253,6 @@ class EventBasedHazardCalculatorTestCase(unittest.TestCase):\n \n # check that the parameters are read correctly from the files\n self.assertEqual(hc.ses_per_logic_tree_path, 5)\n- self.assertEqual(job.calc.n_sources, 4)\n \n # Check that we have the right number of gmf_sets.\n # The correct number is (num_real * ses_per_logic_tree_path)."},"message":{"kind":"string","value":"tests/calcs/hazard/event_based/core_test: Removed an assertion referencing the `n_sources` variable, which has been deleted. Former-commit-id: bc7edcaaccc8bdadc4c1"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2058,"cells":{"diff":{"kind":"string","value":"diff --git a/atomicpress/models.py b/atomicpress/models.py\nindex .. 100644\n--- a/atomicpress/models.py\n+++ b/atomicpress/models.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n import datetime\n-from app import db\n+from .app import db\n from sqlalchemy.orm import relationship"},"message":{"kind":"string","value":"Fixed python3 path issue"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2059,"cells":{"diff":{"kind":"string","value":"diff --git a/spatialist/vector.py b/spatialist/vector.py\nindex .. 100644\n--- a/spatialist/vector.py\n+++ b/spatialist/vector.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n ################################################################\n # OGR wrapper for convenient vector data handling and processing\n-# John Truckenbrodt 2015-2018\n+# John Truckenbrodt 2015-2019\n ################################################################"},"message":{"kind":"string","value":"[vector] updated year in file description"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2060,"cells":{"diff":{"kind":"string","value":"diff --git a/bf/styles.py b/bf/styles.py\nindex .. 100644\n--- a/bf/styles.py\n+++ b/bf/styles.py\n@@ -70,12 +70,6 @@ class Styles(Dict):\n indent is how much to indent indented lines (such as inside braces).\n \"\"\"\n from unum import Unum\n- def render_dict(d):\n- return ('{\\n' \n- + c.render(styles[k], \n- margin=margin+indent, # add indent to margin\n- indent=indent) \n- + '}\\n')\n s = \"\"\n # render the css text\n for k in styles.keys():\n@@ -93,7 +87,9 @@ class Styles(Dict):\n if type(i) == bytes:\n s += str(i, 'utf-8') + ' '\n elif type(i) in [dict, Dict]:\n- s += render_dict(i)\n+ s += '{\\n' + c.render(i, # recurse\n+ margin=margin+indent, # add indent to margin\n+ indent=indent) + '}\\n'\n else:\n s += ';'\n s += '\\n'"},"message":{"kind":"string","value":"simplified Styles.render() by removing the nested method and simply doing its work of recursion inline."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2061,"cells":{"diff":{"kind":"string","value":"diff --git a/pycbc/ahope/datafind_utils.py b/pycbc/ahope/datafind_utils.py\nindex .. 100644\n--- a/pycbc/ahope/datafind_utils.py\n+++ b/pycbc/ahope/datafind_utils.py\n@@ -483,16 +483,11 @@ def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs,\n \n # Now need to convert each frame file into an AhopeFile\n for cache in datafindcaches:\n+ curr_ifo = cache.ifo\n for frame in cache:\n # Why does datafind not return the ifo as the \"observatory\"\n # like every other code!?\n- ifo = frame.description[0:2]\n- if ifo[0] != frame.observatory:\n- # HACK TO USE V1 S6 FRAMES\n- # BECAUSE THE FRAME-TYPE DOES NOT START WITH \"V1_\"\n- ifo = \"V1\"\n- # raise ValueError(\"Cannot determine ifo of frame.\")\n- currFile = AhopeFile(ifo, frame.description,\n+ currFile = AhopeFile(curr_ifo, frame.description,\n frame.segment, file_url=frame.url)\n datafindouts.append(currFile)"},"message":{"kind":"string","value":"Get the ifo of the frames correctly, not from a hack"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2062,"cells":{"diff":{"kind":"string","value":"diff --git a/src/accounts/search_indexes.py b/src/accounts/search_indexes.py\nindex .. 100644\n--- a/src/accounts/search_indexes.py\n+++ b/src/accounts/search_indexes.py\n@@ -47,16 +47,11 @@ class UserIndex(indexes.SearchIndex, indexes.Indexable):\n def prepare(self, obj):\n prepared_data = super(UserIndex, self).prepare(obj)\n \n- message_count = self.prepared_data['message_count']\n- changeset_count = self.prepared_data['changeset_count']\n- ticket_count = self.prepared_data['ticket_count']\n- wiki_count = self.prepared_data['wiki_count']\n-\n prepared_data['contribution_count'] = sum((\n- message_count,\n- changeset_count,\n- ticket_count,\n- wiki_count\n+ self.prepared_data['message_count'],\n+ self.prepared_data['changeset_count'],\n+ self.prepared_data['ticket_count'],\n+ self.prepared_data['wiki_count']\n ))\n \n return prepared_data"},"message":{"kind":"string","value":"Removing a few variables on accounts search_indexes"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2063,"cells":{"diff":{"kind":"string","value":"diff --git a/src/pythonfinder/models/windows.py b/src/pythonfinder/models/windows.py\nindex .. 100644\n--- a/src/pythonfinder/models/windows.py\n+++ b/src/pythonfinder/models/windows.py\n@@ -34,7 +34,10 @@ class WindowsFinder(BaseFinder):\n path = Path(version_object.info.install_path.__getattr__(''))\n version = version_object.info.sys_version\n py_version = PythonVersion.from_windows_launcher(version_object)\n- exe_path = version_object.info.install_path.executable_path\n+ default_path = path / 'python.exe'\n+ if not default_path.exists():\n+ default_path = path / 'Scripts' / 'python.exe'\n+ exe_path = getattr(version_object.info.install_path, 'executable_path', default_path)\n path_entry_dict = {\n 'path': path,\n 'only_python': True,"},"message":{"kind":"string","value":"Use fallback paths for windows lookups"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2064,"cells":{"diff":{"kind":"string","value":"diff --git a/nbtools/form.py b/nbtools/form.py\nindex .. 100644\n--- a/nbtools/form.py\n+++ b/nbtools/form.py\n@@ -396,7 +396,7 @@ class InteractiveForm(interactive):\n elif param_type == 'number' and self.is_float(default_value):\n return FloatFormInput(spec, value=default_value)\n elif param_type == 'number' and (default_value is None or default_value == ''):\n- return FloatFormInput(spec, value=0)\n+ return TextFormInput(spec, value='')\n elif param_type == 'file':\n return FileFormInput(spec, value=unicode_type(default_value), parent=self.parent, upload_callback=self.upload_callback)"},"message":{"kind":"string","value":"Render optional number parameters as text inputs so that the empty string isn't cast to 0"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2065,"cells":{"diff":{"kind":"string","value":"diff --git a/test/test_SimulatedLinkamT95.py b/test/test_SimulatedLinkamT95.py\nindex .. 100644\n--- a/test/test_SimulatedLinkamT95.py\n+++ b/test/test_SimulatedLinkamT95.py\n@@ -186,10 +186,23 @@ class TestSimulatedLinkamT95(unittest.TestCase):\n self.assertEqual(status_bytes[0], '\\x50') # Manually holding\n self.assertNotEqual(status_bytes[6:10], '0028') # Temp != 4.0 C\n \n- # Finish cooling\n+ # Cool some more\n linkam.cool()\n+ linkam.process(15)\n+ status_bytes = linkam.getStatus()\n+ self.assertNotEqual(status_bytes[6:10], '0028') # Temp != 4.0 C\n+\n+ # Hold again\n+ linkam.hold()\n linkam.process(30)\n status_bytes = linkam.getStatus()\n+ self.assertEqual(status_bytes[0], '\\x50') # Manually holding\n+ self.assertNotEqual(status_bytes[6:10], '0028') # Temp != 4.0 C\n+\n+ # Finish cooling via heat command (should also work)\n+ linkam.heat()\n+ linkam.process(15)\n+ status_bytes = linkam.getStatus()\n self.assertEqual(status_bytes[6:10], '0028') # Temp == 4.0 C\n \n # Make sure transitions to auto-holding"},"message":{"kind":"string","value":"Also test that heat command can be used to stop holding, even while cooling"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2066,"cells":{"diff":{"kind":"string","value":"diff --git a/userprofile/backends/simple/__init__.py b/userprofile/backends/simple/__init__.py\nindex .. 100644\n--- a/userprofile/backends/simple/__init__.py\n+++ b/userprofile/backends/simple/__init__.py\n@@ -39,11 +39,18 @@ class SimpleBackend(SimpleBackend):\n \n def user_registered(sender, user, request, *args, **kwargs):\n profile = user.profile\n- for field in request.POST:\n+ \n+ # Build from from post\n+ form = utils.get_profile_model().registration_form(request.POST)\n+ # Username causes clean to fail, remove it.\n+ del form.fields['username']\n+ form.full_clean()\n+ # Assign cleaned values to user or profile objects.\n+ for field, value in form.cleaned_data.items():\n if hasattr(user, field):\n- setattr(user, field, request.POST.get(field))\n+ setattr(user, field, value)\n if hasattr(profile, field):\n- setattr(profile, field, request.POST.get(field))\n+ setattr(profile, field, value)\n \n user.save()\n profile.save()"},"message":{"kind":"string","value":"build field values form form instead of post"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2067,"cells":{"diff":{"kind":"string","value":"diff --git a/pandas/io/tests/parser/dtypes.py b/pandas/io/tests/parser/dtypes.py\nindex .. 100644\n--- a/pandas/io/tests/parser/dtypes.py\n+++ b/pandas/io/tests/parser/dtypes.py\n@@ -241,6 +241,9 @@ one,two\n result = self.read_csv(StringIO(data), header=0,\n dtype='category')\n tm.assert_frame_equal(result, expected)\n+ result = self.read_csv(StringIO(data), header=0,\n+ dtype={'a': 'category', 'b': 'category'})\n+ tm.assert_frame_equal(result, expected)\n \n expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')\n result = self.read_csv(StringIO(data), header=0,"},"message":{"kind":"string","value":"TST: add test to confirm GH (specify category dtype for empty) (#) Issue # was fixed by PR #, adding one more specific test to confirm this"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2068,"cells":{"diff":{"kind":"string","value":"diff --git a/tests/test_blockchain.py b/tests/test_blockchain.py\nindex .. 100644\n--- a/tests/test_blockchain.py\n+++ b/tests/test_blockchain.py\n@@ -1,8 +1,8 @@\n-from provider_backend.blockchain.ocean_contracts import OceanContracts\n+from provider_backend.blockchain.OceanContractsWrapper import OceanContractsWrapper\n from provider_backend.acl.acl import generate_encription_keys,decode,encode,generate_encoding_pair\n \n \n-ocean = OceanContracts()\n+ocean = OceanContractsWrapper()\n ocean.init_contracts()\n \n acl_concise = ocean.concise_contracts['Auth.json']"},"message":{"kind":"string","value":"#: some restructuring and clean up (needed for last commit)."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2069,"cells":{"diff":{"kind":"string","value":"diff --git a/src/python/test/test_dxclient.py b/src/python/test/test_dxclient.py\nindex .. 100755\n--- a/src/python/test/test_dxclient.py\n+++ b/src/python/test/test_dxclient.py\n@@ -217,7 +217,9 @@ class TestDXClient(DXTestCase):\n run(u'dx rm -r mkdirtest')\n \n def test_dxpy_session_isolation(self):\n- del os.environ[\"DX_PROJECT_CONTEXT_ID\"], os.environ[\"DX_PROJECT_CONTEXT_NAME\"], os.environ['DX_CLI_WD']\n+ for var in 'DX_PROJECT_CONTEXT_ID', 'DX_PROJECT_CONTEXT_NAME', 'DX_CLI_WD':\n+ if var in os.environ:\n+ del os.environ[var]\n shell1 = pexpect.spawn(\"bash\")\n shell2 = pexpect.spawn(\"bash\")\n shell1.logfile = shell2.logfile = sys.stdout"},"message":{"kind":"string","value":"Tweak test to be more robust"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2070,"cells":{"diff":{"kind":"string","value":"diff --git a/eth/tools/fixtures/helpers.py b/eth/tools/fixtures/helpers.py\nindex .. 100644\n--- a/eth/tools/fixtures/helpers.py\n+++ b/eth/tools/fixtures/helpers.py\n@@ -154,7 +154,7 @@ def chain_vm_configuration(fixture: Dict[str, Any]) -> Iterable[Tuple[int, Type[\n elif network == 'ByzantiumToConstantinopleFixAt5':\n return (\n (0, ByzantiumVM),\n- (5, ConstantinopleVM),\n+ (5, PetersburgVM),\n )\n else:\n raise ValueError(f\"Network {network} does not match any known VM rules\")"},"message":{"kind":"string","value":"tests: transition to correct VM (Petersburg, not Constantinople) in helper."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2071,"cells":{"diff":{"kind":"string","value":"diff --git a/pyqode/core/widgets/outline.py b/pyqode/core/widgets/outline.py\nindex .. 100644\n--- a/pyqode/core/widgets/outline.py\n+++ b/pyqode/core/widgets/outline.py\n@@ -23,6 +23,7 @@ class OutlineTreeWidget(QtWidgets.QTreeWidget):\n \"\"\"\n def __init__(self, parent=None):\n super(OutlineTreeWidget, self).__init__(parent)\n+ self._definitions = None\n self._editor = None\n self._outline_mode = None\n self._folding_panel = None\n@@ -205,7 +206,7 @@ class OutlineTreeWidget(QtWidgets.QTreeWidget):\n ret_val += flatten(sub_d.children)\n return ret_val\n \n- if self._editor is None:\n+ if self._editor is None or not self._definitions:\n return\n \n to_select = None\n@@ -225,4 +226,10 @@ class OutlineTreeWidget(QtWidgets.QTreeWidget):\n if previous:\n to_select = previous.tree_item\n \n- self.setCurrentItem(to_select)\n+ if to_select:\n+ try:\n+ self.setCurrentItem(to_select)\n+ except RuntimeError:\n+ # RuntimeError: wrapped C/C++ object of type QTreeWidgetItem\n+ # has been deleted\n+ pass"},"message":{"kind":"string","value":"Fix bugs preventing the OpenCobolIDE test suite from succeeding"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2072,"cells":{"diff":{"kind":"string","value":"diff --git a/test/test_request.py b/test/test_request.py\nindex .. 100644\n--- a/test/test_request.py\n+++ b/test/test_request.py\n@@ -55,6 +55,10 @@ class RequestTest(unittest.TestCase):\n r = ice.Request({})\n self.assertEqual(r.path, '/')\n \n+ def test_empty_path(self):\n+ r = ice.Request({'PATH_INFO': ''})\n+ self.assertEqual(r.path, '/')\n+\n def test_query_with_two_names(self):\n r = ice.Request({'QUERY_STRING': 'a=foo&b=bar'})\n self.assertEqual(r.query.data, {'a': ['foo'], 'b': ['bar']})"},"message":{"kind":"string","value":"Add test for empty PATH_INFO in environ There is a check in ice.Request that tests if the value for PATH_INFO key in environ dictionary is an empty string. If it is found to be an empty string, the value is set to '/'. This check has not been tested in the tests for this class. As a result, the ice module does not have % test coverage. Fix this by adding a test for this check."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2073,"cells":{"diff":{"kind":"string","value":"diff --git a/tests/unit_tests/test_climate.py b/tests/unit_tests/test_climate.py\nindex .. 100644\n--- a/tests/unit_tests/test_climate.py\n+++ b/tests/unit_tests/test_climate.py\n@@ -231,9 +231,8 @@ async def test_set_preset_mode_invalid_modes(monkeypatch):\n \n await _climate.async_update()\n \n- preset_modes = _climate.preset_modes\n- with pytest.raises(UnknownPresetMode):\n- bad_modes = [\"UKNOWN_MODE\", \"home\", \"auto\", \"away\", \"hot\"]\n- for mode in bad_modes:\n- assert mode not in preset_modes\n+ bad_modes = [\"UKNOWN_MODE\", \"home\", \"auto\", \"away\", \"hot\"]\n+ for mode in bad_modes:\n+ assert mode not in _climate.preset_modes\n+ with pytest.raises(UnknownPresetMode):\n await _climate.set_preset_mode(mode)"},"message":{"kind":"string","value":"test: fix bad mode preset_test"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2074,"cells":{"diff":{"kind":"string","value":"diff --git a/plexapi/server.py b/plexapi/server.py\nindex .. 100644\n--- a/plexapi/server.py\n+++ b/plexapi/server.py\n@@ -366,15 +366,9 @@ class PlexServer(PlexObject):\n \n def optimizedItems(self):\n \"\"\" Returns list of all :class:`~plexapi.media.Optimized` objects connected to server. \"\"\"\n- items = []\n-\n- backgroundProcessing = self.query('/playlists?type=42')\n- for elem in backgroundProcessing:\n- key = elem.attrib.get('key')\n- for elem in self.query(key):\n- items.append(Optimized(server=self, data=elem))\n \n- return items\n+ backgroundProcessing = self.fetchItem('/playlists?type=42')\n+ return self.fetchItems('%s/items' % backgroundProcessing.key, cls=Optimized)\n \n def conversions(self):\n \"\"\" Returns list of all :class:`~plexapi.media.Conversion` objects connected to server. \"\"\""},"message":{"kind":"string","value":"use fetchItem and fetchItems instead of query"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2075,"cells":{"diff":{"kind":"string","value":"diff --git a/jobstamps/jobstamp.py b/jobstamps/jobstamp.py\nindex .. 100644\n--- a/jobstamps/jobstamp.py\n+++ b/jobstamps/jobstamp.py\n@@ -101,7 +101,11 @@ class HashMethod(object):\n def check_dependency(self, dependency_path):\n \"\"\"Check if mtime of dependency_path is greater than stored mtime.\"\"\"\n stored_hash = self._stamp_file_hashes.get(dependency_path)\n- assert stored_hash is not None\n+\n+ # This file was newly added, or we don't have a file\n+ # with stored hashes yet. Assume out of date.\n+ if not stored_hash:\n+ return False\n \n return stored_hash == _sha1_for_file(dependency_path)"},"message":{"kind":"string","value":"Don't assume hashes have been stored"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2076,"cells":{"diff":{"kind":"string","value":"diff --git a/bakery/tasks.py b/bakery/tasks.py\nindex .. 100644\n--- a/bakery/tasks.py\n+++ b/bakery/tasks.py\n@@ -56,7 +56,11 @@ def run(command, cwd = None, log = None):\n raise ValueError\n \n def prun(command, cwd, log=None):\n- \"\"\" Wrapper for subprocess.Popen that capture output and return as result\n+ \"\"\"\n+ \n+ THIS METHOD IS DEPRECATED\n+ \n+ Wrapper for subprocess.Popen that capture output and return as result\n \n :param command: shell command to run\n :param cwd: current working dir\n@@ -266,7 +270,8 @@ def lint_process(project, log):\n def ttfautohint_process(project, log):\n \"\"\"\n Run ttfautohint with project command line settings for each\n- ttf file in result folder\n+ ttf file in result src folder, outputting them in the _out root,\n+ or just copy the ttfs there.\n \"\"\"\n # $ ttfautohint -l 7 -r 28 -G 0 -x 13 -w \"\" -W -c original_font.ttf final_font.ttf\n config = project.config"},"message":{"kind":"string","value":"Improving tasks.py comments (minor)"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2077,"cells":{"diff":{"kind":"string","value":"diff --git a/slackclient/_channel.py b/slackclient/_channel.py\nindex .. 100644\n--- a/slackclient/_channel.py\n+++ b/slackclient/_channel.py\n@@ -6,7 +6,7 @@ class Channel(object):\n self.members = members\n \n def __eq__(self, compare_str):\n- if self.name == compare_str or self.id == compare_str:\n+ if self.name == compare_str or self.name == \"#\" + compare_str or self.id == compare_str:\n return True\n else:\n return False"},"message":{"kind":"string","value":"allow channels to be found with leading #"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2078,"cells":{"diff":{"kind":"string","value":"diff --git a/horizon/setup.py b/horizon/setup.py\nindex .. 100644\n--- a/horizon/setup.py\n+++ b/horizon/setup.py\n@@ -41,7 +41,7 @@ setup(\n + findall('horizon/dashboards/nova/templates') \\\n + findall('horizon/dashboards/syspanel/templates') \\\n + findall('horizon/dashboards/settings/templates')]},\n- install_requires = ['setuptools', 'mox>=0.5.3', 'django_nose'],\n+ install_requires = [],\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Framework :: Django',"},"message":{"kind":"string","value":"Removes unmainted requirements from setup.py Horizon maintains a requirements list in tools/pip-requires. This patch removes the unmaintained and incomplete list from setup.py, to avoid misleading naive users. Change-Id: I1ef3c0dcaceedabaca"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2079,"cells":{"diff":{"kind":"string","value":"diff --git a/installers/Windows/installer.py b/installers/Windows/installer.py\nindex .. 100644\n--- a/installers/Windows/installer.py\n+++ b/installers/Windows/installer.py\n@@ -99,7 +99,6 @@ packages=\n ntsecuritycon\n {packages}\n files={package_dist_info} > $INSTDIR/pkgs\n- black-20.8b1.dist-info > $INSTDIR/pkgs\n __main__.py > $INSTDIR/pkgs/jedi/inference/compiled/subprocess\n __init__.py > $INSTDIR/pkgs/pylint\n lib\n@@ -340,13 +339,6 @@ def run(python_version, bitness, repo_root, entrypoint, package, icon_path,\n prefix=\"installer-pynsist-\") as work_dir:\n print(\"Temporary working directory at\", work_dir)\n \n- # NOTE: SHOULD BE TEMPORAL (until black has wheels available).\n- # See the 'files' section on the pynsist template config too.\n- print(\"Copying dist.info for black-20.8b1\")\n- shutil.copytree(\n- \"installers/Windows/assets/black/black-20.8b1.dist-info\",\n- os.path.join(work_dir, \"black-20.8b1.dist-info\"))\n-\n # NOTE: SHOULD BE TEMPORAL (until jedi has the fix available).\n # See the 'files' section on the pynsist template config too.\n print(\"Copying patched CompiledSubprocess __main__.py for jedi\")"},"message":{"kind":"string","value":"Installer: Stop copying black dist-info when building Windows installers"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2080,"cells":{"diff":{"kind":"string","value":"diff --git a/src/bidi/algorithm.py b/src/bidi/algorithm.py\nindex .. 100644\n--- a/src/bidi/algorithm.py\n+++ b/src/bidi/algorithm.py\n@@ -275,19 +275,22 @@ def resolve_neutral_types(sor, eor, extended_chars):\n # used at level run boundaries.\n \n \n+ #TODO seems buggy for now\n dummy = [{'biditype':sor}] + extended_chars + [{'biditype':eor}]\n for i in range(1, len(dummy)-1):\n prev_type, curr_type, next_type = dummy[i-1:i+2]\n+ print prev_type, curr_type, next_type \n if prev_type in ('EN', 'AN'):\n prev_type = 'R'\n \n if next_type in ('EN', 'AN'):\n next_type = 'R'\n \n- if curr_type == 'ON' and prev_type == next_type:\n- dummy[i]['biditype'] = next_type\n- else:\n- dummy[i]['biditype'] = ['L', 'R'][dummy[i]['level'] % 2]\n+ if curr_type == 'ON':\n+ if prev_type == next_type:\n+ dummy[i]['biditype'] = next_type\n+ else:\n+ dummy[i]['biditype'] = ['L', 'R'][dummy[i]['level'] % 2]\n \n return sor, eor, dummy[1:-1]"},"message":{"kind":"string","value":"Intermediate commit, before moving to class based implementation"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2081,"cells":{"diff":{"kind":"string","value":"diff --git a/script/update-external-binaries.py b/script/update-external-binaries.py\nindex .. 100755\n--- a/script/update-external-binaries.py\n+++ b/script/update-external-binaries.py\n@@ -8,7 +8,7 @@ from lib.config import get_target_arch\n from lib.util import safe_mkdir, rm_rf, extract_zip, tempdir, download\n \n \n-VERSION = 'v1.0.0'\n+VERSION = 'v1.1.0'\n SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n FRAMEWORKS_URL = 'http://github.com/electron/electron-frameworks/releases' \\\n '/download/' + VERSION"},"message":{"kind":"string","value":"Update external frameworks version Bump to []() which upgraded Squirrel.Mac."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2082,"cells":{"diff":{"kind":"string","value":"diff --git a/ibis/backends/pyspark/compiler.py b/ibis/backends/pyspark/compiler.py\nindex .. 100644\n--- a/ibis/backends/pyspark/compiler.py\n+++ b/ibis/backends/pyspark/compiler.py\n@@ -486,7 +486,11 @@ def compile_group_concat(t, expr, scope, timecontext, context=None, **kwargs):\n sep = expr.op().sep.op().value\n \n def fn(col):\n- return F.concat_ws(sep, F.collect_list(col))\n+ collected = F.collect_list(col)\n+ return F.array_join(\n+ F.when(F.size(collected) == 0, F.lit(None)).otherwise(collected),\n+ sep,\n+ )\n \n return compile_aggregator(\n t, expr, scope, timecontext, fn=fn, context=context"},"message":{"kind":"string","value":"fix(pyspark): use empty check for collect_list in GroupConcat rule"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2083,"cells":{"diff":{"kind":"string","value":"diff --git a/rash/cli.py b/rash/cli.py\nindex .. 100644\n--- a/rash/cli.py\n+++ b/rash/cli.py\n@@ -99,7 +99,7 @@ def version_add_arguments(parser):\n pass\n \n \n-def locate_run(output, target):\n+def locate_run(output, target, no_newline):\n \"\"\"\n Print location of RASH related file.\n \"\"\"\n@@ -107,6 +107,8 @@ def locate_run(output, target):\n cfstore = ConfigStore()\n path = getattr(cfstore, \"{0}_path\".format(target))\n output.write(path)\n+ if not no_newline:\n+ output.write(\"\\n\")\n \n \n def locate_add_arguments(parser):\n@@ -115,6 +117,9 @@ def locate_add_arguments(parser):\n choices=['base', 'config', 'db', 'daemon_pid', 'daemon_log'],\n help='Name of file to show the path (e.g., config).')\n parser.add_argument(\n+ '--no-newline', '-n', action='store_true',\n+ help='do not output the trailing newline.')\n+ parser.add_argument(\n '--output', default='-', type=argparse.FileType('w'),\n help=\"\"\"\n Output file to write the results in. Default is stdout."},"message":{"kind":"string","value":"Make \"rash locate\" bash friendly"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2084,"cells":{"diff":{"kind":"string","value":"diff --git a/infoblox_client/objects.py b/infoblox_client/objects.py\nindex .. 100644\n--- a/infoblox_client/objects.py\n+++ b/infoblox_client/objects.py\n@@ -281,8 +281,9 @@ class InfobloxObject(BaseObject):\n fields = [field for field in self._fields\n if field in self._updateable_search_fields or\n field not in self._search_for_update_fields]\n+\n elif search_fields == 'extra':\n- fields = [field for field in self._fields \n+ fields = [field for field in self._fields\n if field not in update_fields]\n \n return {field: self.field_to_dict(field) for field in fields\n@@ -419,7 +420,8 @@ class InfobloxObject(BaseObject):\n \n def update(self):\n update_fields = self.to_dict(search_fields='exclude')\n- fields = self.to_dict(search_fields='extra', update_fields=update_fields)\n+ fields = self.to_dict(search_fields='extra',\n+ update_fields=update_fields)\n for key in fields:\n LOG.info(\n \"Field is not allowed for update: %s - ignoring\","},"message":{"kind":"string","value":"Changes as per linting"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2085,"cells":{"diff":{"kind":"string","value":"diff --git a/slackminion/plugin/base.py b/slackminion/plugin/base.py\nindex .. 100644\n--- a/slackminion/plugin/base.py\n+++ b/slackminion/plugin/base.py\n@@ -57,11 +57,11 @@ class BasePlugin(object):\n if channel[0] == '@':\n self._bot.send_im(channel[1:], text)\n elif channel[0] == '#':\n- self._bot.send_message(channel[1:], text, thread)\n+ self._bot.send_message(channel[1:], text, thread, reply_broadcast)\n else:\n- self._bot.send_message(channel, text, thread)\n+ self._bot.send_message(channel, text, thread, reply_broadcast)\n else:\n- self._bot.send_message(channel, text, thread)\n+ self._bot.send_message(channel, text, thread, reply_broadcast)\n \n def start_timer(self, duration, func, *args):\n \"\"\""},"message":{"kind":"string","value":"add reply_broadcast to send_message calls"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2086,"cells":{"diff":{"kind":"string","value":"diff --git a/ontrack-delivery/publish.py b/ontrack-delivery/publish.py\nindex .. 100755\n--- a/ontrack-delivery/publish.py\n+++ b/ontrack-delivery/publish.py\n@@ -2,6 +2,7 @@\n \n import argparse\n import os\n+import re\n \n import github\n import ontrack\n@@ -74,9 +75,11 @@ def github_publish(options):\n \n def get_release_name(branch):\n \"\"\"Extracts the release name from the name of the branch\"\"\"\n- # TODO get_release_name\n- # TODO Checks this is actually a release branch\n- return '2.0-rc'\n+ matcher = re.match('release/(.*)', branch)\n+ if matcher is not None:\n+ return matcher.group(1)\n+ else:\n+ raise Exception('Can only release... releases.')\n \n \n # Publication main method"},"message":{"kind":"string","value":"Release: Release name extraction from the branch"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2087,"cells":{"diff":{"kind":"string","value":"diff --git a/geomdl/abstract.py b/geomdl/abstract.py\nindex .. 100644\n--- a/geomdl/abstract.py\n+++ b/geomdl/abstract.py\n@@ -887,6 +887,7 @@ class Curve(SplineGeometry):\n max_k = self.knotvector[-1]\n new_kv = [max_k - k for k in self.knotvector]\n self._knot_vector[0] = list(reversed(new_kv))\n+ self.reset(evalpts=True)\n \n def set_ctrlpts(self, ctrlpts, *args, **kwargs):\n \"\"\" Sets control points and checks if the data is consistent."},"message":{"kind":"string","value":"Don't forget to reset the curve after reversing"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2088,"cells":{"diff":{"kind":"string","value":"diff --git a/src/oidcservice/oauth2/service.py b/src/oidcservice/oauth2/service.py\nindex .. 100644\n--- a/src/oidcservice/oauth2/service.py\n+++ b/src/oidcservice/oauth2/service.py\n@@ -173,6 +173,10 @@ class RefreshAccessToken(Service):\n _args = self.extend_request_args({}, oauth2.AccessTokenResponse,\n 'token_response', _state, parameters)\n \n+ _args = self.extend_request_args({}, oauth2.AccessTokenResponse,\n+ 'refresh_token_response', _state,\n+ parameters)\n+\n if request_args is None:\n request_args = _args\n else:"},"message":{"kind":"string","value":"Refresh token likely in refresh access token response."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2089,"cells":{"diff":{"kind":"string","value":"diff --git a/uiautomator.py b/uiautomator.py\nindex .. 100644\n--- a/uiautomator.py\n+++ b/uiautomator.py\n@@ -197,6 +197,8 @@ class Adb(object):\n \"Adb not found in $ANDROID_HOME path: %s.\" % os.environ[\"ANDROID_HOME\"])\n else:\n import distutils\n+ if \"spawn\" not in dir(distutils):\n+ import distutils.spawn\n adb_cmd = distutils.spawn.find_executable(\"adb\")\n if adb_cmd:\n adb_cmd = os.path.realpath(adb_cmd)"},"message":{"kind":"string","value":"fix distutils.spawn import issue in some python distributions"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2090,"cells":{"diff":{"kind":"string","value":"diff --git a/setup.py b/setup.py\nindex .. 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -63,8 +63,14 @@ setup(\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n- \"License :: OSI Approved :: Apache License 2.0\",\n+ \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n+ \"Programming Language :: Python :: 2\",\n+ \"Programming Language :: Python :: 2.7\",\n+ \"Programming Language :: Python :: 3\",\n+ \"Programming Language :: Python :: 3.4\",\n+ \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n \"Topic :: Database\",\n \"Topic :: Database :: Database Engines/Servers\",\n \"Operating System :: OS Independent\""},"message":{"kind":"string","value":"Fix PyPI classifiers (#) * Add PyPI classifiers for supported python versions * Fix PyPI classifiers for Apache License"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2091,"cells":{"diff":{"kind":"string","value":"diff --git a/plaid/version.py b/plaid/version.py\nindex .. 100644\n--- a/plaid/version.py\n+++ b/plaid/version.py\n@@ -1 +1 @@\n-__version__ = '2.3.0'\n+__version__ = '2.3.1'"},"message":{"kind":"string","value":"plaid-python@"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2092,"cells":{"diff":{"kind":"string","value":"diff --git a/raiden/ui/cli.py b/raiden/ui/cli.py\nindex .. 100644\n--- a/raiden/ui/cli.py\n+++ b/raiden/ui/cli.py\n@@ -140,13 +140,13 @@ def options(func):\n \n @options\n @click.command()\n-def app(address, # pylint: disable=too-many-arguments,too-many-locals\n+def app(address,\n keystore_path,\n eth_rpc_endpoint,\n registry_contract_address,\n discovery_contract_address,\n listen_address,\n- rpccorsdomain,\n+ rpccorsdomain, # pylint: disable=unused-argument\n socket,\n logging,\n logfile,"},"message":{"kind":"string","value":"rpccordsdomain is in run but unconditionally given"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2093,"cells":{"diff":{"kind":"string","value":"diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py\nindex .. 100644\n--- a/cassandra/io/libevreactor.py\n+++ b/cassandra/io/libevreactor.py\n@@ -123,10 +123,8 @@ class LibevLoop(object):\n conn.close()\n if conn._write_watcher:\n conn._write_watcher.stop()\n- del conn._write_watcher\n if conn._read_watcher:\n conn._read_watcher.stop()\n- del conn._read_watcher\n \n self.notify() # wake the timer watcher\n log.debug(\"Waiting for event loop thread to join...\")"},"message":{"kind":"string","value":"Do not del libev IO wrappers while thread is still running fixes an issue where the runtime would occasionally segfault when exiting without cluster shutdown. This is because the IO object was deacllocated while the thread was running and might still enter the IO callback."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2094,"cells":{"diff":{"kind":"string","value":"diff --git a/bin/check_mutations.py b/bin/check_mutations.py\nindex .. 100644\n--- a/bin/check_mutations.py\n+++ b/bin/check_mutations.py\n@@ -61,7 +61,7 @@ def start_logging(log_file='', log_level='INFO'):\n \n \n def parse_arguments():\n- info = 'Extracts gene sequences from a genomic FASTA file'\n+ info = 'Checks mutations to see what strand they are reported on and for unmapped mutations.'\n parser = argparse.ArgumentParser(description=info)\n \n # logging arguments"},"message":{"kind":"string","value":"Corrected CLI description for check_mutations script"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2095,"cells":{"diff":{"kind":"string","value":"diff --git a/pynailgun/test_ng.py b/pynailgun/test_ng.py\nindex .. 100644\n--- a/pynailgun/test_ng.py\n+++ b/pynailgun/test_ng.py\n@@ -130,5 +130,7 @@ class TestNailgunConnection(unittest.TestCase):\n \n \n if __name__ == '__main__':\n- for i in range(10):\n- unittest.main(exit=False)\n+ for i in range(50):\n+ was_successful = unittest.main(exit=False).result.wasSuccessful()\n+ if not was_successful:\n+ sys.exit(1)"},"message":{"kind":"string","value":"test_ng.py: running tests in a row"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2096,"cells":{"diff":{"kind":"string","value":"diff --git a/astrocats/catalog/catalog.py b/astrocats/catalog/catalog.py\nindex .. 100644\n--- a/astrocats/catalog/catalog.py\n+++ b/astrocats/catalog/catalog.py\n@@ -714,7 +714,7 @@ class Catalog:\n # Write it all out!\n # NOTE: this needs to use a `list` wrapper to allow modification of\n # dict\n- for name in self.entries:\n+ for name in list(self.entries.keys()):\n if self.args.write_entries:\n # If this is a stub and we aren't writing stubs, skip\n if self.entries[name]._stub and not write_stubs:"},"message":{"kind":"string","value":"MAINT: reverted for loop change"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2097,"cells":{"diff":{"kind":"string","value":"diff --git a/src/Gelatin/compiler/SyntaxCompiler.py b/src/Gelatin/compiler/SyntaxCompiler.py\nindex .. 100644\n--- a/src/Gelatin/compiler/SyntaxCompiler.py\n+++ b/src/Gelatin/compiler/SyntaxCompiler.py\n@@ -97,9 +97,9 @@ class SyntaxCompiler(DispatchProcessor):\n matcher.statements = self._suite(sublist[1], buffer)\n return matcher\n \n- def _when_stmt(self, (tag, left, right, sublist), buffer):\n+ def _when_stmt(self, (tag, left, right, sublist), buffer, flags = 0):\n matcher = WhenStatement()\n- matcher.matchlist = self._match_list(sublist[0], buffer, None)\n+ matcher.matchlist = self._match_list(sublist[0], buffer, flags)\n matcher.statements = self._suite(sublist[1], buffer)\n return matcher"},"message":{"kind":"string","value":"fix: last commit broke the 'when' statement."},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2098,"cells":{"diff":{"kind":"string","value":"diff --git a/moto/route53/models.py b/moto/route53/models.py\nindex .. 100644\n--- a/moto/route53/models.py\n+++ b/moto/route53/models.py\n@@ -2,11 +2,20 @@ from __future__ import unicode_literals\n \n from collections import defaultdict\n \n+import string\n+import random\n import uuid\n from jinja2 import Template\n \n from moto.core import BaseBackend, BaseModel\n-from moto.core.utils import get_random_hex\n+\n+\n+ROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits\n+\n+\n+def create_route53_zone_id():\n+ # New ID's look like this Z1RWWTK7Y8UDDQ\n+ return ''.join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)])\n \n \n class HealthCheck(BaseModel):\n@@ -247,7 +256,7 @@ class Route53Backend(BaseBackend):\n self.resource_tags = defaultdict(dict)\n \n def create_hosted_zone(self, name, private_zone, comment=None):\n- new_id = get_random_hex()\n+ new_id = create_route53_zone_id()\n new_zone = FakeZone(\n name, new_id, private_zone=private_zone, comment=comment)\n self.zones[new_id] = new_zone"},"message":{"kind":"string","value":"Updated R ID's to match what AWS do now"},"diff_languages":{"kind":"string","value":"py"}}},{"rowIdx":2099,"cells":{"diff":{"kind":"string","value":"diff --git a/bokeh/__init__.py b/bokeh/__init__.py\nindex .. 100644\n--- a/bokeh/__init__.py\n+++ b/bokeh/__init__.py\n@@ -1,11 +1,14 @@\n from __future__ import absolute_import, print_function\n \n+import logging\n import warnings\n from . import utils\n from . import sampledata\n from ._version import get_versions\n from .settings import settings\n \n+log = logging.getLogger(__name__)\n+\n try:\n from .__conda_version__ import conda_version\n __version__ = conda_version.replace(\"'\",\"\")\n@@ -87,7 +90,9 @@ try:\n if not skip_load:\n load_notebook(resources=resources, verbose=verbose, hide_banner=hide_banner)\n except ImportError:\n- pass\n+ log.debug(\"You don't have IPython/Jupyter installed.\")\n+except IOError:\n+ log.debug(\"You don't have the static files available.\")\n \n def _print_versions():\n import platform as pt"},"message":{"kind":"string","value":"Add logging and catch IOError when you don't have the static files."},"diff_languages":{"kind":"string","value":"py"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":20,"numItemsPerPage":100,"numTotalItems":278877,"offset":2000,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzAyNDM0MCwic3ViIjoiL2RhdGFzZXRzL2hrczM1MGQvZ2l0LWRpZmYtdG8tY29tbWl0LWdlbW1hLTMtMjcwbSIsImV4cCI6MTc1NzAyNzk0MCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.yJLrixY8z_10nTM9wfO5vU5DxqUIeNqRU2NfV52JLdLJxdRu0pNiH115j9_lKSeE6IlQtYMBQJ3NePrEyJyrAQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/gimmemotifs/maelstrom.py b/gimmemotifs/maelstrom.py index <HASH>..<HASH> 100644 --- a/gimmemotifs/maelstrom.py +++ b/gimmemotifs/maelstrom.py @@ -185,6 +185,11 @@ def visualize_maelstrom(outdir, sig_cutoff=3, pwmfile=None): def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=True, score_table=None, count_table=None): + df = pd.read_table(infile, index_col=0) + # Check for duplicates + if df.index.duplicated(keep=False).any(): + raise ValueError("Index contains duplicate regions! Please remove them.") + if not os.path.exists(outdir): os.mkdir(outdir) @@ -200,11 +205,7 @@ def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=True, score_table = os.path.join(outdir, "motif.score.txt.gz") scores.to_csv(score_table, sep="\t", float_format="%.3f", compression="gzip") - - df = pd.read_table(infile, index_col=0) - # Drop duplicate indices, doesn't work very well downstream - df = df.loc[df.index.drop_duplicates(keep=False)] exps = [] clusterfile = infile if df.shape[1] != 1:
Check for duplicate regions and raise Exception
py
diff --git a/datasketch/experimental/aio/storage.py b/datasketch/experimental/aio/storage.py index <HASH>..<HASH> 100644 --- a/datasketch/experimental/aio/storage.py +++ b/datasketch/experimental/aio/storage.py @@ -150,6 +150,8 @@ if motor is not None and ReturnDocument is not None: self._batch_size = 1000 self._mongo_client = motor.motor_asyncio.AsyncIOMotorClient(dsn, **additional_args) self._collection = self._mongo_client.get_default_database(db_lsh).get_collection(self._collection_name) + self._collection.create_index("key", background=True) + self._initialized = True self._buffer = AsyncMongoBuffer(self._collection, self._batch_size)
Fixes #<I>; MinhashLSH creates mongo index key. (#<I>)
py
diff --git a/src/rudiments/reamed/click.py b/src/rudiments/reamed/click.py index <HASH>..<HASH> 100644 --- a/src/rudiments/reamed/click.py +++ b/src/rudiments/reamed/click.py @@ -39,7 +39,7 @@ __all__ = [encode_filename(_) for _ in __all__] def pretty_path(path, _home_re=re.compile('^' + re.escape(os.path.expanduser('~') + os.sep))): """Prettify path for humans, and make it Unicode.""" path = format_filename(path) - path = _home_re.sub('~' + os.sep, path) + path = _home_re.sub('~' + re.escape(os.sep), path) return path
fix re.sub for backslashed Windows paths
py
diff --git a/perceval/backends/stackexchange.py b/perceval/backends/stackexchange.py index <HASH>..<HASH> 100644 --- a/perceval/backends/stackexchange.py +++ b/perceval/backends/stackexchange.py @@ -89,8 +89,9 @@ class StackExchange(Backend): for whole_page in whole_pages: self._push_cache_queue(whole_page) self._flush_cache_queue() - question = self.parse_questions(whole_page) - return question + questions = self.parse_questions(whole_page) + for question in questions: + yield question @metadata(get_update_time) def fetch_from_cache(self): @@ -107,11 +108,12 @@ class StackExchange(Backend): cache_items = self.cache.retrieve() for items in cache_items: - question = self.parse_questions(items) - return question + questions = self.parse_questions(items) + for question in questions: + yield question @staticmethod - def parse_questions(items): + def parse_questions(raw_page): """Parse a StackExchange API raw response. The method parses the API response retrieving the @@ -121,7 +123,7 @@ class StackExchange(Backend): :returns: a generator of questions """ - raw_questions = json.loads(items) + raw_questions = json.loads(raw_page) questions = raw_questions['items'] for question in questions: yield question
[stackexchange] Return element instead of iterator
py
diff --git a/openfisca_survey_manager/scenarios.py b/openfisca_survey_manager/scenarios.py index <HASH>..<HASH> 100644 --- a/openfisca_survey_manager/scenarios.py +++ b/openfisca_survey_manager/scenarios.py @@ -98,7 +98,10 @@ class AbstractSurveyScenario(object): # waiting for the new pandas version to hit Travis repo input_data_frame = input_data_frame.drop(column_name, axis = 1) # , inplace = True) # TODO: effet de bords ? + for column_name in input_data_frame: + if column_name in id_variables + role_variables: + continue if column_by_name[column_name].formula_class is not None: log.info('Column "{}" in survey set to be calculated, dropped from input table'.format(column_name)) input_data_frame = input_data_frame.drop(column_name, axis = 1)
Do not remove entities id and roles from input data frame
py
diff --git a/canvasapi/util.py b/canvasapi/util.py index <HASH>..<HASH> 100644 --- a/canvasapi/util.py +++ b/canvasapi/util.py @@ -1,7 +1,6 @@ from __future__ import absolute_import, division, print_function, unicode_literals import os -from collections import Iterable from six import binary_type, string_types, text_type @@ -25,7 +24,11 @@ def is_multivalued(value): return False # general rule: multivalued if iterable - return isinstance(value, Iterable) + try: + iter(value) + return True + except TypeError: + return False def combine_kwargs(**kwargs):
Update is iterable detection to use iter() instead of checking isinstance Iterable (#<I>) Resolves #<I>
py
diff --git a/pytodoist/test/api.py b/pytodoist/test/api.py index <HASH>..<HASH> 100644 --- a/pytodoist/test/api.py +++ b/pytodoist/test/api.py @@ -1,7 +1,6 @@ #!/usr/bin/env python """This module contains unit tests for the pytodoist.api module.""" -from __future__ import print_function import sys import unittest from pytodoist.api import TodoistAPI @@ -166,7 +165,6 @@ class TodoistAPITest(unittest.TestCase): response = self.t.archive_project(self.user.token, project['id']) self.assertEqual(response.status_code, 200) archived_ids = response.json() - print(archived_ids) self.assertEqual(len(archived_ids), 1) def test_get_archived_projects(self): @@ -174,7 +172,6 @@ class TodoistAPITest(unittest.TestCase): self.t.archive_project(self.user.token, project['id']) response = self.t.archive_project(self.user.token, project['id']) archived_projects = response.json() - print(response.json()) self.assertEqual(len(archived_projects), 1) def test_unarchive_project(self):
Removed print statements from two tests.
py
diff --git a/rest_framework_json_api/metadata.py b/rest_framework_json_api/metadata.py index <HASH>..<HASH> 100644 --- a/rest_framework_json_api/metadata.py +++ b/rest_framework_json_api/metadata.py @@ -42,11 +42,19 @@ class JSONAPIMetadata(SimpleMetadata): serializers.Serializer: 'Serializer', }) - relation_type_lookup = ClassLookupDict({ - related.ManyToManyDescriptor: 'ManyToMany', - related.ReverseManyToOneDescriptor: 'OneToMany', - related.ForwardManyToOneDescriptor: 'ManyToOne', - }) + try: + relation_type_lookup = ClassLookupDict({ + related.ManyToManyDescriptor: 'ManyToMany', + related.ReverseManyToOneDescriptor: 'ManyToOne', + related.ForwardManyToOneDescriptor: 'OneToMany', + }) + except AttributeError: + relation_type_lookup = ClassLookupDict({ + related.ManyRelatedObjectsDescriptor: 'ManyToMany', + related.ReverseManyRelatedObjectsDescriptor: 'ManyToMany', + related.ForeignRelatedObjectsDescriptor: 'OneToMany', + related.ReverseSingleRelatedObjectDescriptor: 'ManyToOne', + }) def determine_metadata(self, request, view): metadata = OrderedDict()
relation_type_lookup on <=<I>
py
diff --git a/src/sagemaker/sklearn/estimator.py b/src/sagemaker/sklearn/estimator.py index <HASH>..<HASH> 100644 --- a/src/sagemaker/sklearn/estimator.py +++ b/src/sagemaker/sklearn/estimator.py @@ -80,7 +80,7 @@ class SKLearn(Framework): and values, but ``str()`` will be called to convert them before training. py_version (str): Python version you want to use for executing your - model training code (default: 'py2'). One of 'py2' or 'py3'. + model training code (default: 'py3'). One of 'py2' or 'py3'. image_name (str): If specified, the estimator will use this image for training and hosting, instead of selecting the appropriate SageMaker official image based on framework_version and
documentation: Correct comment in SKLearn Estimator about default Python version (#<I>)
py
diff --git a/hypercorn/base.py b/hypercorn/base.py index <HASH>..<HASH> 100644 --- a/hypercorn/base.py +++ b/hypercorn/base.py @@ -1,10 +1,10 @@ import asyncio -from email.utils import formatdate from enum import auto, Enum from socket import AF_INET, AF_INET6 from ssl import SSLObject, SSLSocket from time import time from typing import List, Optional, Tuple, Union +from wsgiref.handlers import format_date_time from .config import Config @@ -96,8 +96,8 @@ def suppress_body(method: str, status_code: int) -> bool: def response_headers(protocol: str) -> List[Tuple[bytes, bytes]]: return [ - (b'date', formatdate(time(), usegmt=True).encode()), - (b'server', f"hypercorn-{protocol}".encode()), + (b'date', format_date_time(time()).encode('ascii')), + (b'server', f"hypercorn-{protocol}".encode('ascii')), ]
Bugfix use the wsgiref datetime formatter This exists for servers to set the date header, and is hence correct and likely to remain so.
py
diff --git a/examples/plot_fashion-mnist_example.py b/examples/plot_fashion-mnist_example.py index <HASH>..<HASH> 100644 --- a/examples/plot_fashion-mnist_example.py +++ b/examples/plot_fashion-mnist_example.py @@ -41,7 +41,6 @@ pal = [ '#5e4fa2' ] color_key = {str(d):c for d,c in enumerate(pal)} -color_key = {str(d):c for d,c in enumerate(pal)} reducer = umap.UMAP(random_state=42) embedding = reducer.fit_transform(data)
dedupe line in Fashion mnist example
py
diff --git a/pronto/synonym.py b/pronto/synonym.py index <HASH>..<HASH> 100644 --- a/pronto/synonym.py +++ b/pronto/synonym.py @@ -13,7 +13,7 @@ if typing.TYPE_CHECKING: from .ontology import Ontology -_SCOPES = frozenset({"EXACT", "RELATED", "BROAD", "NARROW"}) +_SCOPES = frozenset({"EXACT", "RELATED", "BROAD", "NARROW", None}) @functools.total_ordering @@ -30,7 +30,7 @@ class SynonymType(object): @typechecked() def __init__(self, id: str, description: str, scope: Optional[str] = None): - if scope is not None and scope not in _SCOPES: + if scope not in _SCOPES: raise ValueError(f"invalid synonym scope: {scope}") self.id = id self.description = description @@ -97,7 +97,7 @@ class SynonymData(object): type: Optional[str] = None, xrefs: Optional[Iterable[Xref]] = None, ): - if scope is not None and scope not in _SCOPES: + if scope not in _SCOPES: raise ValueError(f"invalid synonym scope: {scope}") self.description = description self.scope = scope
Don't have a special case for `None` synonym scope
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ If you get errors, check the following things: """ setup(name='CleanerVersion', - version='1.4.3', + version='1.4.4', description='A versioning solution for relational data models using the Django ORM', long_description='CleanerVersion is a solution that allows you to read and write multiple versions of an entry ' 'to and from your relational database. It allows to keep track of modifications on an object '
Bumped version number to <I>; M2M self-references have been added
py
diff --git a/indra/util/statement_presentation.py b/indra/util/statement_presentation.py index <HASH>..<HASH> 100644 --- a/indra/util/statement_presentation.py +++ b/indra/util/statement_presentation.py @@ -554,9 +554,13 @@ def group_and_sort_statements(stmt_list, sort_by='default', stmt_data=None, # Return the sorted statements, if that's all you want. if grouping_level == 'statement': - return sorted(stmt_list, - key=lambda s: _sort_func(stmt_data[s.get_hash()] - .get_dict())) + sorted_stmts = sorted( + ((_sort_func(stmt_data[s.get_hash()].get_dict()), s, + stmt_data[s.get_hash()].get_dict()) + for s in stmt_list), + key=lambda t: t[0] + ) + return sorted_stmts # Create gathering metrics from the statement data. relation_metrics = stmt_data.get_new_instance()
Refactor return for statements to be more like other output.
py
diff --git a/yolk/cli.py b/yolk/cli.py index <HASH>..<HASH> 100755 --- a/yolk/cli.py +++ b/yolk/cli.py @@ -499,7 +499,7 @@ class Yolk(object): #Search for source, egg, and svn self.print_download_uri(version, True) self.print_download_uri(version, False) - self.print_download_uri("dev", True, True) + self.print_download_uri("dev", True) else: if self.options.file_type == "source": source = True
Fix problem with -D svn
py
diff --git a/satpy/writers/cf_writer.py b/satpy/writers/cf_writer.py index <HASH>..<HASH> 100644 --- a/satpy/writers/cf_writer.py +++ b/satpy/writers/cf_writer.py @@ -149,7 +149,10 @@ def make_time_bounds(dataarray, start_times, end_times): if start_time is not None) end_time = min(end_time for end_time in end_times if end_time is not None) - dtnp64 = dataarray['time'].data[0] + try: + dtnp64 = dataarray['time'].data[0] + except IndexError: + dtnp64 = dataarray['time'].data time_bnds = [(np.datetime64(start_time) - dtnp64), (np.datetime64(end_time) - dtnp64)] return xr.DataArray(np.array(time_bnds) / np.timedelta64(1, 's'),
Fix the cf_writer to accept single-valued time coordinate variable
py
diff --git a/esper/world.py b/esper/world.py index <HASH>..<HASH> 100644 --- a/esper/world.py +++ b/esper/world.py @@ -11,6 +11,10 @@ class World: self._next_entity_id = 0 self._database = {} + def clear_database(self): + """Remove all entities and components from the world.""" + self._database.clear() + def add_processor(self, processor_instance, priority=0): """Add a Processor instance to the world.
Add method to clear database (used by benchmark)
py
diff --git a/satpy/readers/virr_l1b.py b/satpy/readers/virr_l1b.py index <HASH>..<HASH> 100644 --- a/satpy/readers/virr_l1b.py +++ b/satpy/readers/virr_l1b.py @@ -53,7 +53,7 @@ LOG = logging.getLogger(__name__) class VIRR_L1B(HDF5FileHandler): """VIRR_L1B reader.""" - def __init__(self, filename, filename_info, filetype_info, **kwargs): + def __init__(self, filename, filename_info, filetype_info): super(VIRR_L1B, self).__init__(filename, filename_info, filetype_info) LOG.debug('day/night flag for {0}: {1}'.format(filename, self['/attr/Day Or Night Flag'])) self.geolocation_prefix = filetype_info['geolocation_prefix']
Remove kwargs from init
py
diff --git a/superset/db_engine_specs.py b/superset/db_engine_specs.py index <HASH>..<HASH> 100644 --- a/superset/db_engine_specs.py +++ b/superset/db_engine_specs.py @@ -1426,6 +1426,10 @@ class ImpalaEngineSpec(BaseEngineSpec): ) @classmethod + def epoch_to_dttm(cls): + return 'from_unixtime({col})' + + @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE':
impala support for epoch timestamps (#<I>)
py
diff --git a/src/cs50/__init__.py b/src/cs50/__init__.py index <HASH>..<HASH> 100644 --- a/src/cs50/__init__.py +++ b/src/cs50/__init__.py @@ -1,10 +1,5 @@ import sys -from os.path import abspath, join -from site import getsitepackages, getusersitepackages -from termcolor import cprint -from traceback import extract_tb, format_list, format_exception_only - from .cs50 import *
removed unneeded packages from __init__.py
py
diff --git a/tests/testcipher.py b/tests/testcipher.py index <HASH>..<HASH> 100644 --- a/tests/testcipher.py +++ b/tests/testcipher.py @@ -85,8 +85,6 @@ class TestEncryptDecrypt(unittest.TestCase): c=cipher.new("bf-ofb",encryptkey,iv=iv) ciphertext=c.update(data)+c.finish() decryptkey=encryptkey[0:5]+encryptkey[5:] - with open("cipher.txt","w") as f: - f.write(repr(ciphertext)+"\n") d=cipher.new("bf-ofb",decryptkey,encrypt=False,iv=iv) deciph=d.update(ciphertext)+d.finish() self.assertEqual(deciph,data)
Remove writing of cipher.txt
py
diff --git a/postmark_inbound/__init__.py b/postmark_inbound/__init__.py index <HASH>..<HASH> 100644 --- a/postmark_inbound/__init__.py +++ b/postmark_inbound/__init__.py @@ -126,6 +126,8 @@ class Attachment(object): raise Exception('Postmark Inbound Error: the file type %s is not allowed' % self.content_type()) try: + if 'b' not in mode: + mode += 'b' attachment = open('%s%s' % (directory, self.name()), mode) attachment.write(self.read()) except IOError:
add binary to write mode if it isn't present
py
diff --git a/tools/profiling/microbenchmarks/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff.py index <HASH>..<HASH> 100755 --- a/tools/profiling/microbenchmarks/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff.py @@ -101,7 +101,7 @@ argp.add_argument('-t', '--track', help='Which metrics to track') argp.add_argument('-b', '--benchmarks', nargs='+', choices=_AVAILABLE_BENCHMARK_TESTS, default=['bm_cq']) argp.add_argument('-d', '--diff_base', type=str) -argp.add_argument('-r', '--repetitions', type=int, default=7) +argp.add_argument('-r', '--repetitions', type=int, default=4) argp.add_argument('-p', '--p_threshold', type=float, default=0.05) args = argp.parse_args()
Update bm_diff.py
py
diff --git a/bcbio/distributed/multitasks.py b/bcbio/distributed/multitasks.py index <HASH>..<HASH> 100644 --- a/bcbio/distributed/multitasks.py +++ b/bcbio/distributed/multitasks.py @@ -4,7 +4,7 @@ from bcbio import structural, utils, chipseq from bcbio.bam import callable from bcbio.ngsalign import alignprep, tophat, star from bcbio.pipeline import (disambiguate, lane, qcsummary, sample, shared, variation, - rnaseq, alignment) + rnaseq) from bcbio.variation import (bamprep, coverage, realign, genotype, ensemble, multi, population, recalibrate, validate, vcfutils) @@ -19,8 +19,11 @@ def trim_lane(*args): @utils.map_wrap def process_alignment(*args): return lane.process_alignment(*args) -process_alignment.metadata = {"resources": ["novoalign", "bwa", "bowtie2", "tophat2"], - "ensure": {"tophat2": tophat.job_requirements}} +process_alignment.metadata = {"resources": ["star", "novoalign", "bwa", "bowtie2", + "tophat2", "bowtie", "tophat"], + "ensure": {"tophat": tophat.job_requirements, + "tophat2": tophat.job_requirements, + "star": star.job_requirements}} @utils.map_wrap def postprocess_alignment(*args):
Update process_alignment resources to match ipython specifications
py
diff --git a/aiortc/codecs/g711.py b/aiortc/codecs/g711.py index <HASH>..<HASH> 100644 --- a/aiortc/codecs/g711.py +++ b/aiortc/codecs/g711.py @@ -51,16 +51,16 @@ class PcmEncoder: class PcmaEncoder(PcmEncoder): - _convert = audioop.lin2alaw + _convert = staticmethod(audioop.lin2alaw) class PcmaDecoder(PcmDecoder): - _convert = audioop.alaw2lin + _convert = staticmethod(audioop.alaw2lin) class PcmuDecoder(PcmDecoder): - _convert = audioop.ulaw2lin + _convert = staticmethod(audioop.ulaw2lin) class PcmuEncoder(PcmEncoder): - _convert = audioop.lin2ulaw + _convert = staticmethod(audioop.lin2ulaw)
[codecs] wrap G<I> converters in staticmethod CPython seems to tolerate the lack of staticmethod, PyPy does not.
py
diff --git a/scikits/umfpack/tests/test_interface.py b/scikits/umfpack/tests/test_interface.py index <HASH>..<HASH> 100644 --- a/scikits/umfpack/tests/test_interface.py +++ b/scikits/umfpack/tests/test_interface.py @@ -67,10 +67,12 @@ class TestSolvers(object): lu = um.splu(A) - Pr = csc_matrix((4, 4)) + Pr = np.zeros((4, 4)) Pr[lu.perm_r, np.arange(4)] = 1 - Pc = csc_matrix((4, 4)) + Pr = csc_matrix(Pr) + Pc = np.zeros((4, 4)) Pc[np.arange(4), lu.perm_c] = 1 + Pc = csc_matrix(Pc) R = csc_matrix((4, 4)) R.setdiag(lu.R)
BUG: make test work on older scipy versions
py
diff --git a/python/ray/services.py b/python/ray/services.py index <HASH>..<HASH> 100644 --- a/python/ray/services.py +++ b/python/ray/services.py @@ -246,6 +246,14 @@ def get_node_ip_address(address="8.8.8.8:53"): node_ip_address = s.getsockname()[0] except Exception as e: node_ip_address = "127.0.0.1" + # [Errno 101] Network is unreachable + if e.errno == 101: + try: + # try get node ip address from host name + host_name = socket.getfqdn(socket.gethostname()) + node_ip_address = socket.gethostbyname(host_name) + except Exception: + pass return node_ip_address
The function get_node_ip_address while catch an exception and return … (#<I>) …'<I>', when we forbid the external network. Instead of we can get ip address from hostname. The function get_node_ip_address while catch an exception and return '<I>' when we forbid the external network. Instead of we can get ip address from hostname. <URL>
py
diff --git a/tests/test_.py b/tests/test_.py index <HASH>..<HASH> 100755 --- a/tests/test_.py +++ b/tests/test_.py @@ -40,9 +40,11 @@ True >>> test.testBAS('typecast1.bas') typecast1.bas:5: Cannot convert value to string. Use STR() function True +>>> test.testBAS('typecast2.bas') +typecast2.bas:10: Cannot convert string to a value. Use VAL() function +True ''' - import test if __name__ == '__main__':
Test updated. Updated na_th_an typecast test.
py
diff --git a/salt/transport/tcp.py b/salt/transport/tcp.py index <HASH>..<HASH> 100644 --- a/salt/transport/tcp.py +++ b/salt/transport/tcp.py @@ -4,8 +4,6 @@ TCP transport classes Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})" """ - - import errno import logging import os
Drop Py2 and six on salt/transport/tcp.py
py
diff --git a/tensor2tensor/layers/common_attention.py b/tensor2tensor/layers/common_attention.py index <HASH>..<HASH> 100644 --- a/tensor2tensor/layers/common_attention.py +++ b/tensor2tensor/layers/common_attention.py @@ -299,7 +299,7 @@ def get_standardized_layers(hparams, dp=None): def add_standard_attention_hparams(hparams): - """Adds the hparams used by get_standadized_layers.""" + """Adds the hparams used by get_standardized_layers.""" # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode.
fix get_standardized_layers spelling (#<I>)
py
diff --git a/inginious/agent/docker_agent.py b/inginious/agent/docker_agent.py index <HASH>..<HASH> 100644 --- a/inginious/agent/docker_agent.py +++ b/inginious/agent/docker_agent.py @@ -575,7 +575,8 @@ class DockerAgent(object): self._containers_ending[container_id] = (message, container_path, retval, future_results) # Close sub containers - for student_container_id in self._student_containers_for_job[message.job_id]: + for student_container_id_loop in self._student_containers_for_job[message.job_id]: + student_container_id = student_container_id_loop def close_and_delete(): try: self._docker.kill_container(student_container_id)
Fix possible race condition in docker_agent
py
diff --git a/moderngl/texture.py b/moderngl/texture.py index <HASH>..<HASH> 100644 --- a/moderngl/texture.py +++ b/moderngl/texture.py @@ -310,6 +310,11 @@ class Texture: ''' Read the pixel data as bytes into system memory. + The texture can also be attached to a :py:class:`Framebuffer` + to gain access to :py:meth:`Framebuffer.read` for additional + features such ad reading a subsection or converting to + another ``dtype``. + Keyword Args: level (int): The mipmap level. alignment (int): The byte alignment of the pixels.
Texture.read: Mention Framebuffer.read in docstring
py
diff --git a/gtdoit/logbook.py b/gtdoit/logbook.py index <HASH>..<HASH> 100644 --- a/gtdoit/logbook.py +++ b/gtdoit/logbook.py @@ -101,7 +101,7 @@ class IdGenerator: return key -class _LogBookRunner(object): +class LogBookRunner(object): """ Helper class for splitting the runnable part of Logbook into logical parts. @@ -240,8 +240,8 @@ def run(args): # things in the correct order, particularly also if we have an exception # or similar. - runner = _LogBookRunner(eventstore, incoming_socket, query_socket, - streaming_socket, args.exit_message) + runner = LogBookRunner(eventstore, incoming_socket, query_socket, + streaming_socket, args.exit_message) runner.run() return 0
Changing name on class `_LogBookRunner` The new name is `LogBookRunner`. It fits better with the naming scheme of logbook as a whole. Possibly all non-public methods/classes should have initial underscore in the future. I'll sleep on that...
py
diff --git a/tests/test_hdate.py b/tests/test_hdate.py index <HASH>..<HASH> 100644 --- a/tests/test_hdate.py +++ b/tests/test_hdate.py @@ -3,7 +3,6 @@ import hdate import hdate.hdate_julian as hj import datetime -import random HEBREW_YEARS_INFO = { @@ -183,9 +182,9 @@ class TestHDate(object): random_hdate.hdate_set_hdate(day, 9, random_hdate._h_year) assert random_hdate.get_omer_day() == day + 44 - @pytest.mark.parametrize('execution_number', range(10)) + @pytest.mark.parametrize('execution_number', range(40)) def test_get_holyday_type(self, execution_number): - holyday = random.randint(0, 37) + holyday = execution_number # regular day if holyday == 0: assert hdate.get_holyday_type(holyday) == 0
Test all the different holidays for get_holyday_type
py
diff --git a/{{cookiecutter.project_slug}}/config/settings/test.py b/{{cookiecutter.project_slug}}/config/settings/test.py index <HASH>..<HASH> 100644 --- a/{{cookiecutter.project_slug}}/config/settings/test.py +++ b/{{cookiecutter.project_slug}}/config/settings/test.py @@ -48,10 +48,6 @@ TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405 # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend" -# https://docs.djangoproject.com/en/dev/ref/settings/#email-host -EMAIL_HOST = "localhost" -# https://docs.djangoproject.com/en/dev/ref/settings/#email-port -EMAIL_PORT = 1025 # Your stuff... # ------------------------------------------------------------------------------
Remove EMAIL_HOST & EMAIL_PORT with locmem backend These settings should not be required since Django never connects to an external component when sending email. Instead it's stored in memory. <URL>
py
diff --git a/poloniex/poloniex.py b/poloniex/poloniex.py index <HASH>..<HASH> 100644 --- a/poloniex/poloniex.py +++ b/poloniex/poloniex.py @@ -38,8 +38,11 @@ def _api_wrapper(fn): self.semaphore.acquire() resp = fn(self, command, **params).json(object_hook=_AutoCastDict) + # check for 'error' then check for status due to Poloniex inconsistency if 'error' in resp: raise PoloniexCommandException(resp['error']) + else: + resp.raise_for_status() return resp return _fn @@ -71,7 +74,6 @@ class PoloniexPublic(object): """Invoke the 'command' public API with optional params.""" params['command'] = command response = self.session.get(self._public_url, params=params) - response.raise_for_status() return response def returnTicker(self): @@ -167,7 +169,6 @@ class Poloniex(PoloniexPublic): response = self.session.post( self._private_url, data=params, auth=Poloniex._PoloniexAuth(self._apikey, self._secret)) - response.raise_for_status() return response def returnBalances(self):
always include error message from poloniex in exception
py
diff --git a/polyaxon/polypod/notebook.py b/polyaxon/polypod/notebook.py index <HASH>..<HASH> 100644 --- a/polyaxon/polypod/notebook.py +++ b/polyaxon/polypod/notebook.py @@ -130,6 +130,7 @@ class NotebookSpawner(ProjectJobSpawner): "--port={port} " "--ip=0.0.0.0 " "--allow-root " + "--NotebookApp.allow_origin='*' " "--NotebookApp.token={token} " "--NotebookApp.trust_xheaders=True " "--NotebookApp.base_url={base_url} "
Update notebook spawner: set allow_origin to "*" fixes #<I>
py
diff --git a/examples/iti_21/server.py b/examples/iti_21/server.py index <HASH>..<HASH> 100644 --- a/examples/iti_21/server.py +++ b/examples/iti_21/server.py @@ -85,14 +85,16 @@ class PDQSupplier(AbstractHandler): def reply(self): print('Received a message') - query_params = dict((self.FIELD_NAMES[q.qip_1], q.qip_2) + print(repr(self.incoming_message.to_er7())) + query_params = dict((self.FIELD_NAMES[q.qip_1.value], q.qip_2.value) for q in self.incoming_message.qpd.qpd_3 - if q.qip_1 in self.FIELD_NAMES) - if '' in query_params.values() or not check_date(query_params.get('DOB', '')): + if q.qip_1.value in self.FIELD_NAMES) + print("Extracted query params: {}".format(query_params)) + if '' in query_params.values(): return self._create_error(1) else: patients = [('0001', 'John', 'Smith')] - return self._create_response('AA', 'NF', patients) + return self._create_response('AA', 'OK', patients) class HL7ErrorHandler(AbstractHandler):
Changed to send a response with results
py
diff --git a/lastfm.py b/lastfm.py index <HASH>..<HASH> 100644 --- a/lastfm.py +++ b/lastfm.py @@ -14,6 +14,7 @@ class Scrobbler(Module): self.running = True self.authenticated = False self.queue = list() + self.osc_creds() def osc_creds(self): if (not hasattr(self, 'username') or not hasattr(self, 'password')):
lastfm: fix assumption about initialization order
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,7 @@ install_requires = [ "django-classification-banner>=0.1.4", "django-maploom==1.2.1", "psycopg2==2.4.5", - "django-tilebundler==0.1-alpha5" + "django-tilebundler==0.1-alpha6" ] tests_requires = [
bumped up tilebundler version
py
diff --git a/src/util/fileUtils.py b/src/util/fileUtils.py index <HASH>..<HASH> 100644 --- a/src/util/fileUtils.py +++ b/src/util/fileUtils.py @@ -63,6 +63,7 @@ def openFD(fd): """ if type(fd).__name__ == "str" : return open(fd) if type(fd).__name__ == "file" : return open(fd.name) + if type(fd).__name__ == "mmap" : return fd nfd = copy(fd) nfd.reset() return nfd
optomised memory usage for wig elements
py
diff --git a/tests/test_cache.py b/tests/test_cache.py index <HASH>..<HASH> 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -172,6 +172,18 @@ class CacheTest(PeruTest): await self.cache.export_tree( self.content_tree, export_dir, previous_tree=self.content_tree) assert_contents(export_dir, self.content) + # This should work even if we run it from a subdirectory of + # content_tree. (Past buggy behavior here: + # https://github.com/buildinspace/peru/issues/210.) + prev_dir = os.getcwd() + try: + os.remove(os.path.join(export_dir, 'a')) + os.chdir(os.path.join(export_dir, 'b')) + await self.cache.export_tree( + self.content_tree, export_dir, previous_tree=self.content_tree) + assert_contents(export_dir, self.content) + finally: + os.chdir(prev_dir) @make_synchronous async def test_merge_trees(self):
add a failing test for restoring missing files from a subdir Reported by @Tweakbert at <URL>
py
diff --git a/tests/test.py b/tests/test.py index <HASH>..<HASH> 100644 --- a/tests/test.py +++ b/tests/test.py @@ -7,13 +7,16 @@ def test_sum(): eq_(2+2,4) def test_get_html_200(): - import requests from image_scraper.utils import get_html page_html, url=get_html('http://ananth.co.in/test.html', False) actual_html=u'<html>\n\n<head>\n \n</head>\n\n<body>\n<img src="images/test1.jpg"/>\n<img src="images/test.png"/>\n<img src="images/test4.gif"/>\n</body>\n \n</html>\n' eq_(page_html, actual_html) def test_get_html_404(): - import requests - r=requests.get('http://ananth.co.in/test404.html') - eq_(r.status_code, 404) + from image_scraper.utils import get_html + from image_scraper.exceptions import PageLoadError + try: + page_html, url=get_html('http://ananth.co.in/test404.html', False) + except PageLoadError as e: + eq_(e.status_code, 404) + eq_(1,2) #Fails if page loads. \ No newline at end of file
Added <I> test for get_html.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ deps = { 'eth': [ "cached-property>=1.5.1,<2", "eth-bloom>=1.0.3,<2.0.0", - "eth-keys>=0.2.1,<1.0.0", + "eth-keys>=0.2.1,<0.3.0", "eth-typing>=2.0.0,<3.0.0", "eth-utils>=1.5.2,<2.0.0", "lru-dict>=1.1.6",
eth-keys shouldn't be allowed to minor bump It's causing dependency conflicts with other libraries that are requiring eth-keys <<I> -- since semver allows breaking changes in minor releases in a v0 major release, it makes more sense to constrain at the minor version here.
py
diff --git a/zipline/finance/risk.py b/zipline/finance/risk.py index <HASH>..<HASH> 100644 --- a/zipline/finance/risk.py +++ b/zipline/finance/risk.py @@ -615,10 +615,10 @@ class RiskMetricsIterative(RiskMetricsBase): def update(self, dt, algorithm_returns, benchmark_returns): self.algorithm_returns_cont[dt] = algorithm_returns - self.algorithm_returns = self.algorithm_returns_cont[:dt] + self.algorithm_returns = self.algorithm_returns_cont.valid() self.benchmark_returns_cont[dt] = benchmark_returns - self.benchmark_returns = self.benchmark_returns_cont[:dt] + self.benchmark_returns = self.benchmark_returns_cont.valid() self.num_trading_days = len(self.algorithm_returns)
MAINT: Revert slice into returns containers instead of using .valid() Backing out slice vs. valid(), because of an incompatiblity with starting a minutely emitted session mid-day, since the midday start date is not yet wired through SimulationParameters.
py
diff --git a/utils/json2csv.py b/utils/json2csv.py index <HASH>..<HASH> 100755 --- a/utils/json2csv.py +++ b/utils/json2csv.py @@ -52,7 +52,6 @@ def get_headings(): 'user_listed_count', 'user_location', 'user_name', - 'user_screen_name', 'user_statuses_count', 'user_time_zone', 'user_urls', @@ -92,7 +91,6 @@ def get_row(t): user('listed_count'), user('location'), user('name'), - user('screen_name'), user('statuses_count'), user('time_zone'), user_urls(t),
remove duplicate user_screen_name The get_row() and get_heading functions had "user_screen_name" variable twice, which can cause problems when importing csv to pandas later.
py
diff --git a/eve_docs/config.py b/eve_docs/config.py index <HASH>..<HASH> 100644 --- a/eve_docs/config.py +++ b/eve_docs/config.py @@ -1,7 +1,7 @@ from flask import current_app as capp from eve.utils import home_link from .labels import LABELS - +import re def get_cfg(): cfg = {} @@ -67,12 +67,13 @@ def schema(resource, field=None): def paths(domain, resource): ret = {} - path = '/{0}'.format(domain) + path = '/{0}'.format(resource.get('url', domain)) + path = re.sub(r'<(?:[^>]+:)?([^>]+)>', '{\\1}', path) pathtype = 'resource' ret[path] = methods(domain, resource, pathtype) primary = identifier(resource) - path = '/{0}/{1}'.format(domain, pathparam(primary['name'])) + path = '{0}/{1}'.format(path, pathparam(primary['name'])) pathtype = 'item' ret[path] = methods(domain, resource, pathtype)
Add support for sub-resources
py
diff --git a/astroid/brain/brain_six.py b/astroid/brain/brain_six.py index <HASH>..<HASH> 100644 --- a/astroid/brain/brain_six.py +++ b/astroid/brain/brain_six.py @@ -51,7 +51,7 @@ from sys import intern map = map range = range from importlib import reload -reload_module = reload +reload_module = lambda module: reload(module) from functools import reduce from shlex import quote as shlex_quote from io import StringIO
Transform read_module() into a lambda to prevent it being marked as a bound method
py
diff --git a/irc/tests/test_client.py b/irc/tests/test_client.py index <HASH>..<HASH> 100644 --- a/irc/tests/test_client.py +++ b/irc/tests/test_client.py @@ -55,15 +55,16 @@ class TestThrottler(object): while time.time() < deadline: limited_next(counter) # ensure the counter was advanced about 30 times - last_count = next(counter) - assert 29 <= last_count <= 31 - # ensure that another burst of calls will also get throttled - last_count += 1 + assert 29 <= next(counter) <= 31 + + # ensure that another burst of calls after some idle period will also + # get throttled time.sleep(1) deadline = time.time() + 1 + counter = itertools.count() while time.time() < deadline: limited_next(counter) - assert 29 <= next(counter) - last_count <= 31 + assert 29 <= next(counter) <= 31 def test_reconstruct_unwraps(self): """
Reset the counter in the test for matching technique
py
diff --git a/cmd2/parsing.py b/cmd2/parsing.py index <HASH>..<HASH> 100644 --- a/cmd2/parsing.py +++ b/cmd2/parsing.py @@ -57,21 +57,20 @@ class Statement(str): :type output_to: str or None """ - def __new__( - cls, - obj: object, - *, - raw: str = None, - command: str = None, - args: str = None, - argv: List[str] = None, - multiline_command: str = None, - terminator: str = None, - suffix: str = None, - pipe_to: str = None, - output: str = None, - output_to:str = None, - ): + def __new__(cls, + obj: object, + *, + raw: str = None, + command: str = None, + args: str = None, + argv: List[str] = None, + multiline_command: str = None, + terminator: str = None, + suffix: str = None, + pipe_to: str = None, + output: str = None, + output_to:str = None + ): """Create a new instance of Statement We must override __new__ because we are subclassing `str` which is
Fixed issue with parsing.py on Python <I> and <I> Apparently versions of Python prior to <I> don't allow a comma after the last argument being passed to a method.
py
diff --git a/sos/jupyter/sos_step.py b/sos/jupyter/sos_step.py index <HASH>..<HASH> 100755 --- a/sos/jupyter/sos_step.py +++ b/sos/jupyter/sos_step.py @@ -59,7 +59,7 @@ class Interactive_Step_Executor(Step_Executor): return host.retrieve_results(tasks) # no pending elif not env.config['wait_for_task']: - raise PendingTasks([x for x,y in zip(tasks, res) if y in ('pending', 'submitted', 'running')]) + raise PendingTasks([(queue, x) for x,y in zip(tasks, res) if y in ('pending', 'submitted', 'running')]) time.sleep(1)
Fix jupyter notebook for recent change
py
diff --git a/pyemma/coordinates/io/featurizer.py b/pyemma/coordinates/io/featurizer.py index <HASH>..<HASH> 100644 --- a/pyemma/coordinates/io/featurizer.py +++ b/pyemma/coordinates/io/featurizer.py @@ -122,14 +122,14 @@ class BackboneTorsionFeature: self.dim = len(self._phi_inds) + len(self._psi_inds) def describe(self): - labels = [] - for ires in self._phi_inds: - labels.append("PHI: %s %i" % - (self.topology.residue(ires).name, ires)) - for ires in self._psi_inds: - labels.append("PSI: %s %i" % - (self.topology.residue(ires).name, ires)) - return labels + top = self.topology + labels_phi = ["PHI %s %i" % (top.atom(ires[0]).residue.name, ires[0]) + for ires in self._phi_inds] + + labels_psi = ["PHI %s %i" % (top.atom(ires[0]).residue.name, ires[0]) + for ires in self._psi_inds] + + return labels_phi + labels_psi def map(self, traj): y1 = compute_dihedrals(traj, self._phi_inds).astype(np.float32)
[featurizer] fix describe in backbone torsions: show only first residue index/name.
py
diff --git a/netmiko/base_connection.py b/netmiko/base_connection.py index <HASH>..<HASH> 100644 --- a/netmiko/base_connection.py +++ b/netmiko/base_connection.py @@ -78,7 +78,7 @@ class BaseSSHConnection(object): try: self.remote_conn_pre.connect(hostname=self.ip, port=self.port, username=self.username, password=self.password, - look_for_keys=False, timeout=timeout) + look_for_keys=False, allow_agent=False, timeout=timeout) except socket.error as e: msg = "Connection to device timed-out: {device_type} {ip}:{port}".format( device_type=self.device_type, ip=self.ip, port=self.port)
Set allow_agent=False for the client connection Authentication on Cisco IOS fails when using the ssh agent.
py
diff --git a/python-package/lightgbm/engine.py b/python-package/lightgbm/engine.py index <HASH>..<HASH> 100644 --- a/python-package/lightgbm/engine.py +++ b/python-package/lightgbm/engine.py @@ -188,7 +188,7 @@ def train(params, train_set, num_boost_round=100, train_data_name = valid_names[i] continue if not isinstance(valid_data, Dataset): - raise TypeError("Traninig only accepts Dataset object") + raise TypeError("Training only accepts Dataset object") reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set)) if valid_names is not None and len(valid_names) > i: name_valid_sets.append(valid_names[i]) @@ -488,7 +488,7 @@ def cv(params, train_set, num_boost_round=100, ...}. """ if not isinstance(train_set, Dataset): - raise TypeError("Traninig only accepts Dataset object") + raise TypeError("Training only accepts Dataset object") params = copy.deepcopy(params) if fobj is not None:
[Python] Fix typo in engine.py (#<I>) Replace "Traninig" with "Training"
py
diff --git a/epab/cli.py b/epab/cli.py index <HASH>..<HASH> 100644 --- a/epab/cli.py +++ b/epab/cli.py @@ -18,7 +18,7 @@ import epab.utils from epab import __version__ with open('epab.yml') as config_file: - CONFIG = yaml.load(config_file) + CONFIG = yaml.safe_load(config_file) def _install_pyinstaller(ctx: click.Context, force: bool = False):
fix: fix unsafe YAML loading
py
diff --git a/pyqode/core/__init__.py b/pyqode/core/__init__.py index <HASH>..<HASH> 100644 --- a/pyqode/core/__init__.py +++ b/pyqode/core/__init__.py @@ -78,7 +78,7 @@ def getRcDirectory(): "rc") # import the core rc modules -if os.environ["QT_API"] == "PyQt4": +if os.environ["QT_API"] == "PyQt": from pyqode.core.ui import pyqode_icons_pyqt_rc else: from pyqode.core.ui import pyqode_icons_pyside_rc
Fix issues on linux (due to previous commits on windows)
py
diff --git a/asset/synth.py b/asset/synth.py index <HASH>..<HASH> 100644 --- a/asset/synth.py +++ b/asset/synth.py @@ -29,15 +29,13 @@ excludes = ["setup.py", "nox*.py", "README.rst", "docs/conf.py", "docs/index.rst for version in versions: if version == "v1p1beta1": config_path = "/google/cloud/asset/v1p1beta1/artman_cloudasset_v1p1beta1.yaml" - artman_output_name = f"cloudasset-{version}" else: config_path = f"/google/cloud/asset/artman_cloudasset_{version}.yaml" - artman_output_name=f"asset-{version}" library = gapic.py_library( "asset", version, config_path=config_path, - artman_output_name=artman_output_name, + artman_output_name=f"asset-{version}", include_protos=True, )
fix(asset): correct asset synthfile (#<I>)
py
diff --git a/tests/calculators/hazard/event_based/core_test.py b/tests/calculators/hazard/event_based/core_test.py index <HASH>..<HASH> 100644 --- a/tests/calculators/hazard/event_based/core_test.py +++ b/tests/calculators/hazard/event_based/core_test.py @@ -253,7 +253,6 @@ class EventBasedHazardCalculatorTestCase(unittest.TestCase): # check that the parameters are read correctly from the files self.assertEqual(hc.ses_per_logic_tree_path, 5) - self.assertEqual(job.calc.n_sources, 4) # Check that we have the right number of gmf_sets. # The correct number is (num_real * ses_per_logic_tree_path).
tests/calcs/hazard/event_based/core_test: Removed an assertion referencing the `n_sources` variable, which has been deleted. Former-commit-id: bc7e<I>d<I>ca<I>acc<I>c8bd<I>ad<I>c4c1
py
diff --git a/atomicpress/models.py b/atomicpress/models.py index <HASH>..<HASH> 100644 --- a/atomicpress/models.py +++ b/atomicpress/models.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import datetime -from app import db +from .app import db from sqlalchemy.orm import relationship
Fixed python3 path issue
py
diff --git a/spatialist/vector.py b/spatialist/vector.py index <HASH>..<HASH> 100644 --- a/spatialist/vector.py +++ b/spatialist/vector.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- ################################################################ # OGR wrapper for convenient vector data handling and processing -# John Truckenbrodt 2015-2018 +# John Truckenbrodt 2015-2019 ################################################################
[vector] updated year in file description
py
diff --git a/bf/styles.py b/bf/styles.py index <HASH>..<HASH> 100644 --- a/bf/styles.py +++ b/bf/styles.py @@ -70,12 +70,6 @@ class Styles(Dict): indent is how much to indent indented lines (such as inside braces). """ from unum import Unum - def render_dict(d): - return ('{\n' - + c.render(styles[k], - margin=margin+indent, # add indent to margin - indent=indent) - + '}\n') s = "" # render the css text for k in styles.keys(): @@ -93,7 +87,9 @@ class Styles(Dict): if type(i) == bytes: s += str(i, 'utf-8') + ' ' elif type(i) in [dict, Dict]: - s += render_dict(i) + s += '{\n' + c.render(i, # recurse + margin=margin+indent, # add indent to margin + indent=indent) + '}\n' else: s += ';' s += '\n'
simplified Styles.render() by removing the nested method and simply doing its work of recursion inline.
py
diff --git a/pycbc/ahope/datafind_utils.py b/pycbc/ahope/datafind_utils.py index <HASH>..<HASH> 100644 --- a/pycbc/ahope/datafind_utils.py +++ b/pycbc/ahope/datafind_utils.py @@ -483,16 +483,11 @@ def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs, # Now need to convert each frame file into an AhopeFile for cache in datafindcaches: + curr_ifo = cache.ifo for frame in cache: # Why does datafind not return the ifo as the "observatory" # like every other code!? - ifo = frame.description[0:2] - if ifo[0] != frame.observatory: - # HACK TO USE V1 S6 FRAMES - # BECAUSE THE FRAME-TYPE DOES NOT START WITH "V1_" - ifo = "V1" - # raise ValueError("Cannot determine ifo of frame.") - currFile = AhopeFile(ifo, frame.description, + currFile = AhopeFile(curr_ifo, frame.description, frame.segment, file_url=frame.url) datafindouts.append(currFile)
Get the ifo of the frames correctly, not from a hack
py
diff --git a/src/accounts/search_indexes.py b/src/accounts/search_indexes.py index <HASH>..<HASH> 100644 --- a/src/accounts/search_indexes.py +++ b/src/accounts/search_indexes.py @@ -47,16 +47,11 @@ class UserIndex(indexes.SearchIndex, indexes.Indexable): def prepare(self, obj): prepared_data = super(UserIndex, self).prepare(obj) - message_count = self.prepared_data['message_count'] - changeset_count = self.prepared_data['changeset_count'] - ticket_count = self.prepared_data['ticket_count'] - wiki_count = self.prepared_data['wiki_count'] - prepared_data['contribution_count'] = sum(( - message_count, - changeset_count, - ticket_count, - wiki_count + self.prepared_data['message_count'], + self.prepared_data['changeset_count'], + self.prepared_data['ticket_count'], + self.prepared_data['wiki_count'] )) return prepared_data
Removing a few variables on accounts search_indexes
py
diff --git a/src/pythonfinder/models/windows.py b/src/pythonfinder/models/windows.py index <HASH>..<HASH> 100644 --- a/src/pythonfinder/models/windows.py +++ b/src/pythonfinder/models/windows.py @@ -34,7 +34,10 @@ class WindowsFinder(BaseFinder): path = Path(version_object.info.install_path.__getattr__('')) version = version_object.info.sys_version py_version = PythonVersion.from_windows_launcher(version_object) - exe_path = version_object.info.install_path.executable_path + default_path = path / 'python.exe' + if not default_path.exists(): + default_path = path / 'Scripts' / 'python.exe' + exe_path = getattr(version_object.info.install_path, 'executable_path', default_path) path_entry_dict = { 'path': path, 'only_python': True,
Use fallback paths for windows lookups
py
diff --git a/nbtools/form.py b/nbtools/form.py index <HASH>..<HASH> 100644 --- a/nbtools/form.py +++ b/nbtools/form.py @@ -396,7 +396,7 @@ class InteractiveForm(interactive): elif param_type == 'number' and self.is_float(default_value): return FloatFormInput(spec, value=default_value) elif param_type == 'number' and (default_value is None or default_value == ''): - return FloatFormInput(spec, value=0) + return TextFormInput(spec, value='') elif param_type == 'file': return FileFormInput(spec, value=unicode_type(default_value), parent=self.parent, upload_callback=self.upload_callback)
Render optional number parameters as text inputs so that the empty string isn't cast to 0
py
diff --git a/test/test_SimulatedLinkamT95.py b/test/test_SimulatedLinkamT95.py index <HASH>..<HASH> 100644 --- a/test/test_SimulatedLinkamT95.py +++ b/test/test_SimulatedLinkamT95.py @@ -186,10 +186,23 @@ class TestSimulatedLinkamT95(unittest.TestCase): self.assertEqual(status_bytes[0], '\x50') # Manually holding self.assertNotEqual(status_bytes[6:10], '0028') # Temp != 4.0 C - # Finish cooling + # Cool some more linkam.cool() + linkam.process(15) + status_bytes = linkam.getStatus() + self.assertNotEqual(status_bytes[6:10], '0028') # Temp != 4.0 C + + # Hold again + linkam.hold() linkam.process(30) status_bytes = linkam.getStatus() + self.assertEqual(status_bytes[0], '\x50') # Manually holding + self.assertNotEqual(status_bytes[6:10], '0028') # Temp != 4.0 C + + # Finish cooling via heat command (should also work) + linkam.heat() + linkam.process(15) + status_bytes = linkam.getStatus() self.assertEqual(status_bytes[6:10], '0028') # Temp == 4.0 C # Make sure transitions to auto-holding
Also test that heat command can be used to stop holding, even while cooling
py
diff --git a/userprofile/backends/simple/__init__.py b/userprofile/backends/simple/__init__.py index <HASH>..<HASH> 100644 --- a/userprofile/backends/simple/__init__.py +++ b/userprofile/backends/simple/__init__.py @@ -39,11 +39,18 @@ class SimpleBackend(SimpleBackend): def user_registered(sender, user, request, *args, **kwargs): profile = user.profile - for field in request.POST: + + # Build from from post + form = utils.get_profile_model().registration_form(request.POST) + # Username causes clean to fail, remove it. + del form.fields['username'] + form.full_clean() + # Assign cleaned values to user or profile objects. + for field, value in form.cleaned_data.items(): if hasattr(user, field): - setattr(user, field, request.POST.get(field)) + setattr(user, field, value) if hasattr(profile, field): - setattr(profile, field, request.POST.get(field)) + setattr(profile, field, value) user.save() profile.save()
build field values form form instead of post
py
diff --git a/pandas/io/tests/parser/dtypes.py b/pandas/io/tests/parser/dtypes.py index <HASH>..<HASH> 100644 --- a/pandas/io/tests/parser/dtypes.py +++ b/pandas/io/tests/parser/dtypes.py @@ -241,6 +241,9 @@ one,two result = self.read_csv(StringIO(data), header=0, dtype='category') tm.assert_frame_equal(result, expected) + result = self.read_csv(StringIO(data), header=0, + dtype={'a': 'category', 'b': 'category'}) + tm.assert_frame_equal(result, expected) expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]') result = self.read_csv(StringIO(data), header=0,
TST: add test to confirm GH<I> (specify category dtype for empty) (#<I>) Issue #<I> was fixed by PR #<I>, adding one more specific test to confirm this
py
diff --git a/tests/test_blockchain.py b/tests/test_blockchain.py index <HASH>..<HASH> 100644 --- a/tests/test_blockchain.py +++ b/tests/test_blockchain.py @@ -1,8 +1,8 @@ -from provider_backend.blockchain.ocean_contracts import OceanContracts +from provider_backend.blockchain.OceanContractsWrapper import OceanContractsWrapper from provider_backend.acl.acl import generate_encription_keys,decode,encode,generate_encoding_pair -ocean = OceanContracts() +ocean = OceanContractsWrapper() ocean.init_contracts() acl_concise = ocean.concise_contracts['Auth.json']
#<I>: some restructuring and clean up (needed for last commit).
py
diff --git a/src/python/test/test_dxclient.py b/src/python/test/test_dxclient.py index <HASH>..<HASH> 100755 --- a/src/python/test/test_dxclient.py +++ b/src/python/test/test_dxclient.py @@ -217,7 +217,9 @@ class TestDXClient(DXTestCase): run(u'dx rm -r mkdirtest') def test_dxpy_session_isolation(self): - del os.environ["DX_PROJECT_CONTEXT_ID"], os.environ["DX_PROJECT_CONTEXT_NAME"], os.environ['DX_CLI_WD'] + for var in 'DX_PROJECT_CONTEXT_ID', 'DX_PROJECT_CONTEXT_NAME', 'DX_CLI_WD': + if var in os.environ: + del os.environ[var] shell1 = pexpect.spawn("bash") shell2 = pexpect.spawn("bash") shell1.logfile = shell2.logfile = sys.stdout
Tweak test to be more robust
py
diff --git a/eth/tools/fixtures/helpers.py b/eth/tools/fixtures/helpers.py index <HASH>..<HASH> 100644 --- a/eth/tools/fixtures/helpers.py +++ b/eth/tools/fixtures/helpers.py @@ -154,7 +154,7 @@ def chain_vm_configuration(fixture: Dict[str, Any]) -> Iterable[Tuple[int, Type[ elif network == 'ByzantiumToConstantinopleFixAt5': return ( (0, ByzantiumVM), - (5, ConstantinopleVM), + (5, PetersburgVM), ) else: raise ValueError(f"Network {network} does not match any known VM rules")
tests: transition to correct VM (Petersburg, not Constantinople) in helper.
py
diff --git a/pyqode/core/widgets/outline.py b/pyqode/core/widgets/outline.py index <HASH>..<HASH> 100644 --- a/pyqode/core/widgets/outline.py +++ b/pyqode/core/widgets/outline.py @@ -23,6 +23,7 @@ class OutlineTreeWidget(QtWidgets.QTreeWidget): """ def __init__(self, parent=None): super(OutlineTreeWidget, self).__init__(parent) + self._definitions = None self._editor = None self._outline_mode = None self._folding_panel = None @@ -205,7 +206,7 @@ class OutlineTreeWidget(QtWidgets.QTreeWidget): ret_val += flatten(sub_d.children) return ret_val - if self._editor is None: + if self._editor is None or not self._definitions: return to_select = None @@ -225,4 +226,10 @@ class OutlineTreeWidget(QtWidgets.QTreeWidget): if previous: to_select = previous.tree_item - self.setCurrentItem(to_select) + if to_select: + try: + self.setCurrentItem(to_select) + except RuntimeError: + # RuntimeError: wrapped C/C++ object of type QTreeWidgetItem + # has been deleted + pass
Fix bugs preventing the OpenCobolIDE test suite from succeeding
py
diff --git a/test/test_request.py b/test/test_request.py index <HASH>..<HASH> 100644 --- a/test/test_request.py +++ b/test/test_request.py @@ -55,6 +55,10 @@ class RequestTest(unittest.TestCase): r = ice.Request({}) self.assertEqual(r.path, '/') + def test_empty_path(self): + r = ice.Request({'PATH_INFO': ''}) + self.assertEqual(r.path, '/') + def test_query_with_two_names(self): r = ice.Request({'QUERY_STRING': 'a=foo&b=bar'}) self.assertEqual(r.query.data, {'a': ['foo'], 'b': ['bar']})
Add test for empty PATH_INFO in environ There is a check in ice.Request that tests if the value for PATH_INFO key in environ dictionary is an empty string. If it is found to be an empty string, the value is set to '/'. This check has not been tested in the tests for this class. As a result, the ice module does not have <I>% test coverage. Fix this by adding a test for this check.
py
diff --git a/tests/unit_tests/test_climate.py b/tests/unit_tests/test_climate.py index <HASH>..<HASH> 100644 --- a/tests/unit_tests/test_climate.py +++ b/tests/unit_tests/test_climate.py @@ -231,9 +231,8 @@ async def test_set_preset_mode_invalid_modes(monkeypatch): await _climate.async_update() - preset_modes = _climate.preset_modes - with pytest.raises(UnknownPresetMode): - bad_modes = ["UKNOWN_MODE", "home", "auto", "away", "hot"] - for mode in bad_modes: - assert mode not in preset_modes + bad_modes = ["UKNOWN_MODE", "home", "auto", "away", "hot"] + for mode in bad_modes: + assert mode not in _climate.preset_modes + with pytest.raises(UnknownPresetMode): await _climate.set_preset_mode(mode)
test: fix bad mode preset_test
py
diff --git a/plexapi/server.py b/plexapi/server.py index <HASH>..<HASH> 100644 --- a/plexapi/server.py +++ b/plexapi/server.py @@ -366,15 +366,9 @@ class PlexServer(PlexObject): def optimizedItems(self): """ Returns list of all :class:`~plexapi.media.Optimized` objects connected to server. """ - items = [] - - backgroundProcessing = self.query('/playlists?type=42') - for elem in backgroundProcessing: - key = elem.attrib.get('key') - for elem in self.query(key): - items.append(Optimized(server=self, data=elem)) - return items + backgroundProcessing = self.fetchItem('/playlists?type=42') + return self.fetchItems('%s/items' % backgroundProcessing.key, cls=Optimized) def conversions(self): """ Returns list of all :class:`~plexapi.media.Conversion` objects connected to server. """
use fetchItem and fetchItems instead of query
py
diff --git a/jobstamps/jobstamp.py b/jobstamps/jobstamp.py index <HASH>..<HASH> 100644 --- a/jobstamps/jobstamp.py +++ b/jobstamps/jobstamp.py @@ -101,7 +101,11 @@ class HashMethod(object): def check_dependency(self, dependency_path): """Check if mtime of dependency_path is greater than stored mtime.""" stored_hash = self._stamp_file_hashes.get(dependency_path) - assert stored_hash is not None + + # This file was newly added, or we don't have a file + # with stored hashes yet. Assume out of date. + if not stored_hash: + return False return stored_hash == _sha1_for_file(dependency_path)
Don't assume hashes have been stored
py
diff --git a/bakery/tasks.py b/bakery/tasks.py index <HASH>..<HASH> 100644 --- a/bakery/tasks.py +++ b/bakery/tasks.py @@ -56,7 +56,11 @@ def run(command, cwd = None, log = None): raise ValueError def prun(command, cwd, log=None): - """ Wrapper for subprocess.Popen that capture output and return as result + """ + + THIS METHOD IS DEPRECATED + + Wrapper for subprocess.Popen that capture output and return as result :param command: shell command to run :param cwd: current working dir @@ -266,7 +270,8 @@ def lint_process(project, log): def ttfautohint_process(project, log): """ Run ttfautohint with project command line settings for each - ttf file in result folder + ttf file in result src folder, outputting them in the _out root, + or just copy the ttfs there. """ # $ ttfautohint -l 7 -r 28 -G 0 -x 13 -w "" -W -c original_font.ttf final_font.ttf config = project.config
Improving tasks.py comments (minor)
py
diff --git a/slackclient/_channel.py b/slackclient/_channel.py index <HASH>..<HASH> 100644 --- a/slackclient/_channel.py +++ b/slackclient/_channel.py @@ -6,7 +6,7 @@ class Channel(object): self.members = members def __eq__(self, compare_str): - if self.name == compare_str or self.id == compare_str: + if self.name == compare_str or self.name == "#" + compare_str or self.id == compare_str: return True else: return False
allow channels to be found with leading #
py
diff --git a/horizon/setup.py b/horizon/setup.py index <HASH>..<HASH> 100644 --- a/horizon/setup.py +++ b/horizon/setup.py @@ -41,7 +41,7 @@ setup( + findall('horizon/dashboards/nova/templates') \ + findall('horizon/dashboards/syspanel/templates') \ + findall('horizon/dashboards/settings/templates')]}, - install_requires = ['setuptools', 'mox>=0.5.3', 'django_nose'], + install_requires = [], classifiers = [ 'Development Status :: 4 - Beta', 'Framework :: Django',
Removes unmainted requirements from setup.py Horizon maintains a requirements list in tools/pip-requires. This patch removes the unmaintained and incomplete list from setup.py, to avoid misleading naive users. Change-Id: I1e<I>f3c0dca<I>c<I>ee<I>d<I>abac<I>a<I>
py
diff --git a/installers/Windows/installer.py b/installers/Windows/installer.py index <HASH>..<HASH> 100644 --- a/installers/Windows/installer.py +++ b/installers/Windows/installer.py @@ -99,7 +99,6 @@ packages= ntsecuritycon {packages} files={package_dist_info} > $INSTDIR/pkgs - black-20.8b1.dist-info > $INSTDIR/pkgs __main__.py > $INSTDIR/pkgs/jedi/inference/compiled/subprocess __init__.py > $INSTDIR/pkgs/pylint lib @@ -340,13 +339,6 @@ def run(python_version, bitness, repo_root, entrypoint, package, icon_path, prefix="installer-pynsist-") as work_dir: print("Temporary working directory at", work_dir) - # NOTE: SHOULD BE TEMPORAL (until black has wheels available). - # See the 'files' section on the pynsist template config too. - print("Copying dist.info for black-20.8b1") - shutil.copytree( - "installers/Windows/assets/black/black-20.8b1.dist-info", - os.path.join(work_dir, "black-20.8b1.dist-info")) - # NOTE: SHOULD BE TEMPORAL (until jedi has the fix available). # See the 'files' section on the pynsist template config too. print("Copying patched CompiledSubprocess __main__.py for jedi")
Installer: Stop copying black dist-info when building Windows installers
py
diff --git a/src/bidi/algorithm.py b/src/bidi/algorithm.py index <HASH>..<HASH> 100644 --- a/src/bidi/algorithm.py +++ b/src/bidi/algorithm.py @@ -275,19 +275,22 @@ def resolve_neutral_types(sor, eor, extended_chars): # used at level run boundaries. + #TODO seems buggy for now dummy = [{'biditype':sor}] + extended_chars + [{'biditype':eor}] for i in range(1, len(dummy)-1): prev_type, curr_type, next_type = dummy[i-1:i+2] + print prev_type, curr_type, next_type if prev_type in ('EN', 'AN'): prev_type = 'R' if next_type in ('EN', 'AN'): next_type = 'R' - if curr_type == 'ON' and prev_type == next_type: - dummy[i]['biditype'] = next_type - else: - dummy[i]['biditype'] = ['L', 'R'][dummy[i]['level'] % 2] + if curr_type == 'ON': + if prev_type == next_type: + dummy[i]['biditype'] = next_type + else: + dummy[i]['biditype'] = ['L', 'R'][dummy[i]['level'] % 2] return sor, eor, dummy[1:-1]
Intermediate commit, before moving to class based implementation
py
diff --git a/script/update-external-binaries.py b/script/update-external-binaries.py index <HASH>..<HASH> 100755 --- a/script/update-external-binaries.py +++ b/script/update-external-binaries.py @@ -8,7 +8,7 @@ from lib.config import get_target_arch from lib.util import safe_mkdir, rm_rf, extract_zip, tempdir, download -VERSION = 'v1.0.0' +VERSION = 'v1.1.0' SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) FRAMEWORKS_URL = 'http://github.com/electron/electron-frameworks/releases' \ '/download/' + VERSION
Update external frameworks version Bump to [<I>](<URL>) which upgraded Squirrel.Mac.
py
diff --git a/ibis/backends/pyspark/compiler.py b/ibis/backends/pyspark/compiler.py index <HASH>..<HASH> 100644 --- a/ibis/backends/pyspark/compiler.py +++ b/ibis/backends/pyspark/compiler.py @@ -486,7 +486,11 @@ def compile_group_concat(t, expr, scope, timecontext, context=None, **kwargs): sep = expr.op().sep.op().value def fn(col): - return F.concat_ws(sep, F.collect_list(col)) + collected = F.collect_list(col) + return F.array_join( + F.when(F.size(collected) == 0, F.lit(None)).otherwise(collected), + sep, + ) return compile_aggregator( t, expr, scope, timecontext, fn=fn, context=context
fix(pyspark): use empty check for collect_list in GroupConcat rule
py
diff --git a/rash/cli.py b/rash/cli.py index <HASH>..<HASH> 100644 --- a/rash/cli.py +++ b/rash/cli.py @@ -99,7 +99,7 @@ def version_add_arguments(parser): pass -def locate_run(output, target): +def locate_run(output, target, no_newline): """ Print location of RASH related file. """ @@ -107,6 +107,8 @@ def locate_run(output, target): cfstore = ConfigStore() path = getattr(cfstore, "{0}_path".format(target)) output.write(path) + if not no_newline: + output.write("\n") def locate_add_arguments(parser): @@ -115,6 +117,9 @@ def locate_add_arguments(parser): choices=['base', 'config', 'db', 'daemon_pid', 'daemon_log'], help='Name of file to show the path (e.g., config).') parser.add_argument( + '--no-newline', '-n', action='store_true', + help='do not output the trailing newline.') + parser.add_argument( '--output', default='-', type=argparse.FileType('w'), help=""" Output file to write the results in. Default is stdout.
Make "rash locate" bash friendly
py
diff --git a/infoblox_client/objects.py b/infoblox_client/objects.py index <HASH>..<HASH> 100644 --- a/infoblox_client/objects.py +++ b/infoblox_client/objects.py @@ -281,8 +281,9 @@ class InfobloxObject(BaseObject): fields = [field for field in self._fields if field in self._updateable_search_fields or field not in self._search_for_update_fields] + elif search_fields == 'extra': - fields = [field for field in self._fields + fields = [field for field in self._fields if field not in update_fields] return {field: self.field_to_dict(field) for field in fields @@ -419,7 +420,8 @@ class InfobloxObject(BaseObject): def update(self): update_fields = self.to_dict(search_fields='exclude') - fields = self.to_dict(search_fields='extra', update_fields=update_fields) + fields = self.to_dict(search_fields='extra', + update_fields=update_fields) for key in fields: LOG.info( "Field is not allowed for update: %s - ignoring",
Changes as per linting
py
diff --git a/slackminion/plugin/base.py b/slackminion/plugin/base.py index <HASH>..<HASH> 100644 --- a/slackminion/plugin/base.py +++ b/slackminion/plugin/base.py @@ -57,11 +57,11 @@ class BasePlugin(object): if channel[0] == '@': self._bot.send_im(channel[1:], text) elif channel[0] == '#': - self._bot.send_message(channel[1:], text, thread) + self._bot.send_message(channel[1:], text, thread, reply_broadcast) else: - self._bot.send_message(channel, text, thread) + self._bot.send_message(channel, text, thread, reply_broadcast) else: - self._bot.send_message(channel, text, thread) + self._bot.send_message(channel, text, thread, reply_broadcast) def start_timer(self, duration, func, *args): """
add reply_broadcast to send_message calls
py
diff --git a/ontrack-delivery/publish.py b/ontrack-delivery/publish.py index <HASH>..<HASH> 100755 --- a/ontrack-delivery/publish.py +++ b/ontrack-delivery/publish.py @@ -2,6 +2,7 @@ import argparse import os +import re import github import ontrack @@ -74,9 +75,11 @@ def github_publish(options): def get_release_name(branch): """Extracts the release name from the name of the branch""" - # TODO get_release_name - # TODO Checks this is actually a release branch - return '2.0-rc' + matcher = re.match('release/(.*)', branch) + if matcher is not None: + return matcher.group(1) + else: + raise Exception('Can only release... releases.') # Publication main method
Release: Release name extraction from the branch
py
diff --git a/geomdl/abstract.py b/geomdl/abstract.py index <HASH>..<HASH> 100644 --- a/geomdl/abstract.py +++ b/geomdl/abstract.py @@ -887,6 +887,7 @@ class Curve(SplineGeometry): max_k = self.knotvector[-1] new_kv = [max_k - k for k in self.knotvector] self._knot_vector[0] = list(reversed(new_kv)) + self.reset(evalpts=True) def set_ctrlpts(self, ctrlpts, *args, **kwargs): """ Sets control points and checks if the data is consistent.
Don't forget to reset the curve after reversing
py
diff --git a/src/oidcservice/oauth2/service.py b/src/oidcservice/oauth2/service.py index <HASH>..<HASH> 100644 --- a/src/oidcservice/oauth2/service.py +++ b/src/oidcservice/oauth2/service.py @@ -173,6 +173,10 @@ class RefreshAccessToken(Service): _args = self.extend_request_args({}, oauth2.AccessTokenResponse, 'token_response', _state, parameters) + _args = self.extend_request_args({}, oauth2.AccessTokenResponse, + 'refresh_token_response', _state, + parameters) + if request_args is None: request_args = _args else:
Refresh token likely in refresh access token response.
py
diff --git a/uiautomator.py b/uiautomator.py index <HASH>..<HASH> 100644 --- a/uiautomator.py +++ b/uiautomator.py @@ -197,6 +197,8 @@ class Adb(object): "Adb not found in $ANDROID_HOME path: %s." % os.environ["ANDROID_HOME"]) else: import distutils + if "spawn" not in dir(distutils): + import distutils.spawn adb_cmd = distutils.spawn.find_executable("adb") if adb_cmd: adb_cmd = os.path.realpath(adb_cmd)
fix distutils.spawn import issue in some python distributions
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -63,8 +63,14 @@ setup( classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", - "License :: OSI Approved :: Apache License 2.0", + "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", "Topic :: Database", "Topic :: Database :: Database Engines/Servers", "Operating System :: OS Independent"
Fix PyPI classifiers (#<I>) * Add PyPI classifiers for supported python versions * Fix PyPI classifiers for Apache License
py
diff --git a/plaid/version.py b/plaid/version.py index <HASH>..<HASH> 100644 --- a/plaid/version.py +++ b/plaid/version.py @@ -1 +1 @@ -__version__ = '2.3.0' +__version__ = '2.3.1'
plaid-python@<I>
py
diff --git a/raiden/ui/cli.py b/raiden/ui/cli.py index <HASH>..<HASH> 100644 --- a/raiden/ui/cli.py +++ b/raiden/ui/cli.py @@ -140,13 +140,13 @@ def options(func): @options @click.command() -def app(address, # pylint: disable=too-many-arguments,too-many-locals +def app(address, keystore_path, eth_rpc_endpoint, registry_contract_address, discovery_contract_address, listen_address, - rpccorsdomain, + rpccorsdomain, # pylint: disable=unused-argument socket, logging, logfile,
rpccordsdomain is in run but unconditionally given
py
diff --git a/cassandra/io/libevreactor.py b/cassandra/io/libevreactor.py index <HASH>..<HASH> 100644 --- a/cassandra/io/libevreactor.py +++ b/cassandra/io/libevreactor.py @@ -123,10 +123,8 @@ class LibevLoop(object): conn.close() if conn._write_watcher: conn._write_watcher.stop() - del conn._write_watcher if conn._read_watcher: conn._read_watcher.stop() - del conn._read_watcher self.notify() # wake the timer watcher log.debug("Waiting for event loop thread to join...")
Do not del libev IO wrappers while thread is still running fixes an issue where the runtime would occasionally segfault when exiting without cluster shutdown. This is because the IO object was deacllocated while the thread was running and might still enter the IO callback.
py
diff --git a/bin/check_mutations.py b/bin/check_mutations.py index <HASH>..<HASH> 100644 --- a/bin/check_mutations.py +++ b/bin/check_mutations.py @@ -61,7 +61,7 @@ def start_logging(log_file='', log_level='INFO'): def parse_arguments(): - info = 'Extracts gene sequences from a genomic FASTA file' + info = 'Checks mutations to see what strand they are reported on and for unmapped mutations.' parser = argparse.ArgumentParser(description=info) # logging arguments
Corrected CLI description for check_mutations script
py
diff --git a/pynailgun/test_ng.py b/pynailgun/test_ng.py index <HASH>..<HASH> 100644 --- a/pynailgun/test_ng.py +++ b/pynailgun/test_ng.py @@ -130,5 +130,7 @@ class TestNailgunConnection(unittest.TestCase): if __name__ == '__main__': - for i in range(10): - unittest.main(exit=False) + for i in range(50): + was_successful = unittest.main(exit=False).result.wasSuccessful() + if not was_successful: + sys.exit(1)
test_ng.py: running <I> tests in a row
py
diff --git a/astrocats/catalog/catalog.py b/astrocats/catalog/catalog.py index <HASH>..<HASH> 100644 --- a/astrocats/catalog/catalog.py +++ b/astrocats/catalog/catalog.py @@ -714,7 +714,7 @@ class Catalog: # Write it all out! # NOTE: this needs to use a `list` wrapper to allow modification of # dict - for name in self.entries: + for name in list(self.entries.keys()): if self.args.write_entries: # If this is a stub and we aren't writing stubs, skip if self.entries[name]._stub and not write_stubs:
MAINT: reverted for loop change
py
diff --git a/src/Gelatin/compiler/SyntaxCompiler.py b/src/Gelatin/compiler/SyntaxCompiler.py index <HASH>..<HASH> 100644 --- a/src/Gelatin/compiler/SyntaxCompiler.py +++ b/src/Gelatin/compiler/SyntaxCompiler.py @@ -97,9 +97,9 @@ class SyntaxCompiler(DispatchProcessor): matcher.statements = self._suite(sublist[1], buffer) return matcher - def _when_stmt(self, (tag, left, right, sublist), buffer): + def _when_stmt(self, (tag, left, right, sublist), buffer, flags = 0): matcher = WhenStatement() - matcher.matchlist = self._match_list(sublist[0], buffer, None) + matcher.matchlist = self._match_list(sublist[0], buffer, flags) matcher.statements = self._suite(sublist[1], buffer) return matcher
fix: last commit broke the 'when' statement.
py
diff --git a/moto/route53/models.py b/moto/route53/models.py index <HASH>..<HASH> 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -2,11 +2,20 @@ from __future__ import unicode_literals from collections import defaultdict +import string +import random import uuid from jinja2 import Template from moto.core import BaseBackend, BaseModel -from moto.core.utils import get_random_hex + + +ROUTE53_ID_CHOICE = string.ascii_uppercase + string.digits + + +def create_route53_zone_id(): + # New ID's look like this Z1RWWTK7Y8UDDQ + return ''.join([random.choice(ROUTE53_ID_CHOICE) for _ in range(0, 15)]) class HealthCheck(BaseModel): @@ -247,7 +256,7 @@ class Route53Backend(BaseBackend): self.resource_tags = defaultdict(dict) def create_hosted_zone(self, name, private_zone, comment=None): - new_id = get_random_hex() + new_id = create_route53_zone_id() new_zone = FakeZone( name, new_id, private_zone=private_zone, comment=comment) self.zones[new_id] = new_zone
Updated R<I> ID's to match what AWS do now
py
diff --git a/bokeh/__init__.py b/bokeh/__init__.py index <HASH>..<HASH> 100644 --- a/bokeh/__init__.py +++ b/bokeh/__init__.py @@ -1,11 +1,14 @@ from __future__ import absolute_import, print_function +import logging import warnings from . import utils from . import sampledata from ._version import get_versions from .settings import settings +log = logging.getLogger(__name__) + try: from .__conda_version__ import conda_version __version__ = conda_version.replace("'","") @@ -87,7 +90,9 @@ try: if not skip_load: load_notebook(resources=resources, verbose=verbose, hide_banner=hide_banner) except ImportError: - pass + log.debug("You don't have IPython/Jupyter installed.") +except IOError: + log.debug("You don't have the static files available.") def _print_versions(): import platform as pt
Add logging and catch IOError when you don't have the static files.
py