diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/django_distill/management/commands/distill-local.py b/django_distill/management/commands/distill-local.py index <HASH>..<HASH> 100644 --- a/django_distill/management/commands/distill-local.py +++ b/django_distill/management/commands/distill-local.py @@ -42,7 +42,7 @@ class Command(BaseCommand): raise CommandError(e) if collectstatic: run_collectstatic(stdout) - if not os.path.isdir(settings.STATIC_ROOT): + if not exclude_staticfiles and not os.path.isdir(settings.STATIC_ROOT): e = 'Static source directory does not exist, run collectstatic' raise CommandError(e) output_dir = os.path.abspath(os.path.expanduser(output_dir))
only check STATIC_ROOT exists when not using --exclude-staticfiles, resolves #<I>
py
diff --git a/spacy/lang/char_classes.py b/spacy/lang/char_classes.py index <HASH>..<HASH> 100644 --- a/spacy/lang/char_classes.py +++ b/spacy/lang/char_classes.py @@ -9,6 +9,8 @@ _bengali = r"\u0980-\u09FF" _hebrew = r"\u0591-\u05F4\uFB1D-\uFB4F" +_hindi = r"\u0900-\u097F" + # Latin standard _latin_u_standard = r"A-Z" _latin_l_standard = r"a-z" @@ -193,7 +195,7 @@ _ukrainian = r"а-щюяіїєґА-ЩЮЯІЇЄҐ" _upper = LATIN_UPPER + _russian_upper + _tatar_upper + _greek_upper + _ukrainian_upper _lower = LATIN_LOWER + _russian_lower + _tatar_lower + _greek_lower + _ukrainian_lower -_uncased = _bengali + _hebrew + _persian + _sinhala +_uncased = _bengali + _hebrew + _persian + _sinhala + _hindi ALPHA = group_chars(LATIN + _russian + _tatar + _greek + _ukrainian + _uncased) ALPHA_LOWER = group_chars(_lower + _uncased)
Fix default punctuation rules for hindi text (#<I> explosion)
py
diff --git a/osmnx/io.py b/osmnx/io.py index <HASH>..<HASH> 100644 --- a/osmnx/io.py +++ b/osmnx/io.py @@ -377,15 +377,15 @@ def _stringify_nonnumeric_cols(gdf): def save_graph_xml( - data, - filepath=None, - node_tags=settings.osm_xml_node_tags, - node_attrs=settings.osm_xml_node_attrs, - edge_tags=settings.osm_xml_way_tags, - edge_attrs=settings.osm_xml_way_attrs, - oneway=False, - merge_edges=True, - edge_tag_aggs=None, + data, + filepath=None, + node_tags=settings.osm_xml_node_tags, + node_attrs=settings.osm_xml_node_attrs, + edge_tags=settings.osm_xml_way_tags, + edge_attrs=settings.osm_xml_way_attrs, + oneway=False, + merge_edges=True, + edge_tag_aggs=None, ): """ Save graph to disk as an OSM-formatted XML .osm file.
Minor parameter edits (6) Pycharm formatting was overzealous and modified a function not worked on in this PR. This should undo the change in indentation.
py
diff --git a/holoviews/core/util.py b/holoviews/core/util.py index <HASH>..<HASH> 100644 --- a/holoviews/core/util.py +++ b/holoviews/core/util.py @@ -4,6 +4,7 @@ import itertools import string, fnmatch import unicodedata from collections import defaultdict +from functools import reduce import numpy as np import param
Added missing import in core.util
py
diff --git a/sentrylogs/helpers.py b/sentrylogs/helpers.py index <HASH>..<HASH> 100644 --- a/sentrylogs/helpers.py +++ b/sentrylogs/helpers.py @@ -1,7 +1,7 @@ """ Helper functions for Sentry Logs """ -from sentry_sdk import capture_message, configure_scope +from sentry_sdk import capture_message, push_scope from .conf.settings import SENTRY_LOG_LEVEL, SENTRY_LOG_LEVELS @@ -12,7 +12,7 @@ def send_message(message, level, data): if (SENTRY_LOG_LEVELS.index(level) < SENTRY_LOG_LEVELS.index(SENTRY_LOG_LEVEL)): return - with configure_scope() as scope: + with push_scope() as scope: for key, value in data.items(): scope.set_extra(key, value) capture_message(message, level)
Ensure the Sentry scope doesn't have old data
py
diff --git a/theanets/main.py b/theanets/main.py index <HASH>..<HASH> 100644 --- a/theanets/main.py +++ b/theanets/main.py @@ -172,6 +172,8 @@ class Experiment: def _build_trainers(self, **kwargs): '''Build trainers from command-line arguments. ''' + if not hasattr(self.args, 'optimize'): + self.args.optimize = 'nag' if isinstance(self.args.optimize, str): self.args.optimize = self.args.optimize.strip().split() for factory in self.args.optimize:
Set a default optimizer if none is defined. Fixes issue #<I>.
py
diff --git a/django_plotly_dash/version.py b/django_plotly_dash/version.py index <HASH>..<HASH> 100644 --- a/django_plotly_dash/version.py +++ b/django_plotly_dash/version.py @@ -23,4 +23,4 @@ SOFTWARE. ''' -__version__ = "0.9.9" +__version__ = "0.9.10"
Increase version number (#<I>)
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,7 +40,7 @@ extensions = [ "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinx.ext.todo", - "sphinx_ansible_theme.ext.pygments_lexer", + "sphinx_ansible_theme", "notfound.extension", ]
Fix docs jobs (#<I>) The docs jobs are broken because the docs configuration is using a module that no longer exists. This change corrects that problem.
py
diff --git a/img2txt.py b/img2txt.py index <HASH>..<HASH> 100644 --- a/img2txt.py +++ b/img2txt.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- """ -Usage: imgtxt.py <imgfile> [--size=<size>] [--level=<level>] +Usage: img2txt.py <imgfile> [--size=<size>] [--level=<level>] """
oh img2txt
py
diff --git a/tests/render/test_render.py b/tests/render/test_render.py index <HASH>..<HASH> 100644 --- a/tests/render/test_render.py +++ b/tests/render/test_render.py @@ -139,7 +139,7 @@ def test_render_profiled_fixture_expectations_with_distribution(titanic_dataset_ rendered_json = PrescriptivePageRenderer.render(titanic_dataset_profiler_expectations_with_distribution) rendered_page = DefaultJinjaPageView.render(rendered_json) - with open('./tests/render/output/titanic_dataset_profiler_expectations_with_distribution.html', 'w') as f: + with open('./tests/render/output/titanic_dataset_profiler_expectations_with_distribution.html', 'wb') as f: f.write(rendered_page.encode("utf-8")) assert rendered_page[:15] == "<!DOCTYPE html>"
Improve unicode handling in tests for py2 support
py
diff --git a/pysc2/lib/point_flag.py b/pysc2/lib/point_flag.py index <HASH>..<HASH> 100644 --- a/pysc2/lib/point_flag.py +++ b/pysc2/lib/point_flag.py @@ -18,6 +18,7 @@ from __future__ import division from __future__ import print_function from absl import flags +import six from pysc2.lib import point @@ -33,7 +34,7 @@ class PointParser(flags.ArgumentParser): args = [argument] elif isinstance(argument, (list, tuple)): args = argument - elif isinstance(argument, str): + elif isinstance(argument, six.string_types): args = argument.split(",") else: raise ValueError(
Use six.string_types to get a py2/py3 equivalent of `isinstance(arg, (str, unicode))`. This fixes the revert in 1dde<I>. PiperOrigin-RevId: <I>
py
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index <HASH>..<HASH> 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -173,6 +173,10 @@ _apply_docs = { The resulting dtype will reflect the return value of the passed ``func``, see the examples below. + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + Examples -------- {examples}
GH<I> (#<I>) DOC
py
diff --git a/fragman/precisecodevillemerge.py b/fragman/precisecodevillemerge.py index <HASH>..<HASH> 100644 --- a/fragman/precisecodevillemerge.py +++ b/fragman/precisecodevillemerge.py @@ -104,7 +104,7 @@ def recurse_matches(a, b, ahi, bhi, answer, maxrecursion): for i in xrange(ahi - nahi): answer.append((nahi + i, nbhi + i)) -class Weave: +class Weave(object): def __init__(self): # [(lineid, line)] self.weave = []
this should not be an old-style class
py
diff --git a/pyinfra/api/state.py b/pyinfra/api/state.py index <HASH>..<HASH> 100644 --- a/pyinfra/api/state.py +++ b/pyinfra/api/state.py @@ -333,7 +333,7 @@ class State(object): if self.config.FAIL_PERCENT is not None: percent_failed = ( - 1 - len(active_hosts) / len(self.connected_hosts) + 1 - len(active_hosts) / self.inventory.len_all_hosts() ) * 100 if percent_failed > self.config.FAIL_PERCENT:
Use `Inventory.len_all_hosts`, not `State.connected_hosts` for calculating failed percentages (so it works before connect).
py
diff --git a/tests/resources/test_resources.py b/tests/resources/test_resources.py index <HASH>..<HASH> 100644 --- a/tests/resources/test_resources.py +++ b/tests/resources/test_resources.py @@ -188,3 +188,13 @@ def test_resource_load_proxy_href_inner(simplemm): # def test_fileuridecoder(): # assert File_URI_decoder.can_resolve('file://simple.ecore#//test') is True + + +def test_resource_mmregistry_isolation(): + global_registry['cdef'] = None + rset1 = ResourceSet() + rset2 = ResourceSet() + rset1.metamodel_registry['abcd'] = None + assert 'abcd' not in rset2.metamodel_registry + assert 'cdef' in rset2.metamodel_registry + assert 'cdef' in rset1.metamodel_registry
Add small unit test for rset new ChainMap The test only check if the 'metamodel_registry' per resource set is well isolated.
py
diff --git a/littlechef.py b/littlechef.py index <HASH>..<HASH> 100644 --- a/littlechef.py +++ b/littlechef.py @@ -480,6 +480,8 @@ def _print_node(node): def _get_recipes_in_cookbook(name): '''Gets the name of all recipes present in a cookbook''' recipes = [] + if not os.path.exists('cookbooks/' + name): + abort('Cookbook "%s" not found' % name) path = 'cookbooks/' + name + '/metadata.json' try: with open(path, 'r') as f:
Don't say metadata.json not found when cookbook doesn't exist
py
diff --git a/djcelery/admin_utils.py b/djcelery/admin_utils.py index <HASH>..<HASH> 100644 --- a/djcelery/admin_utils.py +++ b/djcelery/admin_utils.py @@ -4,6 +4,11 @@ from pprint import pformat from django.utils.html import escape +FIXEDWIDTH_STYLE = '''\ +<span title="%s", style="font-size: %spt; \ +font-family: Menlo, Courier; ">%s</span> \ +''' + def attrs(**kwargs): def _inner(fun): @@ -38,7 +43,7 @@ def fixedwidth(field, name=None, pt=6, width=16, maxlen=64, pretty=False): if len(shortval) > maxlen: shortval = shortval[:maxlen] + "..." - return """<span title="%s", style="font-size: %spt;\ - font-family: Menlo, Courier; ">%s</span>""" % ( - escape(val[:255]), pt, escape(shortval)).replace("|br/|", "<br/>") + styled = FIXEDWIDTH_STYLE % (escape(val[:255]), pt, + escape(shortval)) + return styled.replace("|br/|", "<br/>") return f
Fixes fields vanishing from admin task view. Closes #<I>
py
diff --git a/lightning/types/utils.py b/lightning/types/utils.py index <HASH>..<HASH> 100644 --- a/lightning/types/utils.py +++ b/lightning/types/utils.py @@ -235,7 +235,7 @@ def polygon_to_mask(coords, dims, z=None): return mask -def polygon_to_points(coords): +def polygon_to_points(coords, z=None): """ Given a list of pairs of points which define a polygon, return a list of points interior to the polygon @@ -255,6 +255,9 @@ def polygon_to_points(coords): points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int') points = where(points) points = vstack([points[0], points[1]]).T + bmin[-1::-1] - points = points.tolist() + if z is not None: + points = map(lambda p: (p[0], p[1], z), points) + else: + points = map(lambda p: tuple(p), points) return points \ No newline at end of file
Handle z-index for points
py
diff --git a/ansi2html/converter.py b/ansi2html/converter.py index <HASH>..<HASH> 100755 --- a/ansi2html/converter.py +++ b/ansi2html/converter.py @@ -293,14 +293,14 @@ def main(): # Produce only the headers and quit if opts.headers: - print(conv.produce_headers()) + _print(conv.produce_headers()) return # Process input line-by-line. Produce no headers. if opts.partial or opts.inline: line = sys.stdin.readline() while line: - print(conv.convert(ansi=line, full=False)[:-1], end='\n') + _print(conv.convert(ansi=line, full=False)[:-1]) line = sys.stdin.readline() return
Use correct output encoding for --partial. The problem was introduced in <I>ade7a9cebfa<I>b2ca<I>c<I>e5e
py
diff --git a/bcbio/structural/plot.py b/bcbio/structural/plot.py index <HASH>..<HASH> 100644 --- a/bcbio/structural/plot.py +++ b/bcbio/structural/plot.py @@ -6,6 +6,7 @@ variants. """ from bcbio.pipeline import datadict as dd from bcbio.variation import vcfutils +from bcbio.bam.coverage import plot_multiple_regions_coverage def _sort_by_type(x): """Simple prioritization to identify 'lead' items within a batch. @@ -16,6 +17,9 @@ def _sort_by_type(x): priority = 1 return [priority, dd.get_sample_name(x)] +def _merge_sv_calls(calls): + pass + def by_regions(items): """Plot for a union set of combined ensemble regions across all of the data items. """ @@ -30,6 +34,9 @@ def by_regions(items): # Merge SV calls into a union set # Make summary level plots # Add plots to SV information for lead item + # merged_bed = _merge_sv_calls(calls) + # plot = plot_multiple_regions_coverage(items, plot_file, + # region_bed=merged_bed) pass print [x["description"] for x in items] return items
Add example code for how to make the plots.
py
diff --git a/python/thunder/utils/context.py b/python/thunder/utils/context.py index <HASH>..<HASH> 100644 --- a/python/thunder/utils/context.py +++ b/python/thunder/utils/context.py @@ -405,6 +405,11 @@ class ThunderContext(): import os path = os.path.dirname(os.path.realpath(__file__)) + # this path might actually be inside an .egg file (appears to happen with Spark 1.2) + # check whether data/ directory actually exists on the filesystem, and if not, try + # a hardcoded path that should work on ec2 clusters launched via the thunder-ec2 script + if not os.path.isdir(os.path.join(path, 'data')): + path = "/root/thunder/python/thunder/utils" if dataset == "iris": return self.loadSeries(os.path.join(path, 'data/iris/iris.bin')) @@ -413,7 +418,8 @@ class ThunderContext(): elif dataset == "fish-images": return self.loadImages(os.path.join(path, 'data/fish/tif-stack'), inputformat="tif-stack") else: - raise NotImplementedError("Dataset '%s' not found" % dataset) + raise NotImplementedError("Dataset '%s' not known; should be one of 'iris', 'fish-series', 'fish-images'" + % dataset) def loadExampleEC2(self, dataset): """
workaround to find local example data on ec2 under Spark <I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -2,12 +2,20 @@ import os.path from setuptools import setup +# get __version__ +__version__ = None +exec(open(os.path.join(NAME, "about.py")).read()) +VERSION = __version__ + + NAME = "pyscreenshot" URL = "https://github.com/ponty/pyscreenshot" DESCRIPTION = "python screenshot" LONG_DESCRIPTION = """The pyscreenshot module can be used to copy the contents of the screen to a Pillow image memory using various back-ends. Replacement for the ImageGrab Module. -home: https://github.com/ponty/pyscreenshot""" +Documentation: https://github.com/ponty/pyscreenshot/tree/""" +LONG_DESCRIPTION += VERSION + PACKAGES = [ NAME, NAME + ".plugins", @@ -16,10 +24,6 @@ PACKAGES = [ NAME + ".examples", ] -# get __version__ -__version__ = None -exec(open(os.path.join(NAME, "about.py")).read()) -VERSION = __version__ # extra = {} # if sys.version_info >= (3,):
setup: versioned link to home
py
diff --git a/pyvista/jupyter/pv_pythreejs.py b/pyvista/jupyter/pv_pythreejs.py index <HASH>..<HASH> 100644 --- a/pyvista/jupyter/pv_pythreejs.py +++ b/pyvista/jupyter/pv_pythreejs.py @@ -496,7 +496,7 @@ def convert_renderer(pv_renderer): # replace inf with a real value here due to changes in # ipywidges==6.4.0 see # https://github.com/ipython/ipykernel/issues/771 - inf = 1E9 + inf = 1E20 orbit_controls = tjs.OrbitControls( controlling=camera, maxAzimuthAngle=inf,
use larger infinity (#<I>)
py
diff --git a/maintain/commands/release.py b/maintain/commands/release.py index <HASH>..<HASH> 100644 --- a/maintain/commands/release.py +++ b/maintain/commands/release.py @@ -25,15 +25,28 @@ def release(version, dry_run, bump, pull_request): releaser = AggregateReleaser() + git_releasers = filter(lambda releaser: isinstance(releaser, GitReleaser), releaser.releasers) + github_releasers = filter(lambda releaser: isinstance(releaser, GitHubReleaser), releaser.releasers) + try: - git_releaser = next(filter(lambda releaser: isinstance(releaser, GitReleaser), releaser.releasers)) + git_releaser = next(git_releasers) except StopIteration: git_releaser = None + except TypeError: + if len(git_releasers) > 0: + git_releaser = git_releasers[0] + else: + git_releaser = None try: - github_releaser = next(filter(lambda releaser: isinstance(releaser, GitHubReleaser), releaser.releasers)) + github_releaser = next(github_releasers) except StopIteration: github_releaser = None + except TypeError: + if len(github_releasers) > 0: + github_releaser = github_releasers[0] + else: + github_releaser = None if pull_request and not github_releaser: raise Exception('Used --pull-request and no GitHub remote')
fix: Filter doesn't return an iterator on Py <I> Restores Python <I> compatibility
py
diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index <HASH>..<HASH> 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -31,6 +31,7 @@ from _pytest.compat import safe_getattr from _pytest.compat import TYPE_CHECKING from _pytest.deprecated import FIXTURE_POSITIONAL_ARGUMENTS from _pytest.deprecated import FUNCARGNAMES +from _pytest.mark import ParameterSet from _pytest.outcomes import fail from _pytest.outcomes import TEST_OUTCOME @@ -1263,8 +1264,6 @@ class FixtureManager: This things are done later as well when dealing with parametrization so this could be improved """ - from _pytest.mark import ParameterSet - parametrize_argnames = [] for marker in node.iter_markers(name="parametrize"): if not marker.kwargs.get("indirect", False):
fixtures: move import of ParameterSet to top level This gets typically used always (via `getfixtureinfo`).
py
diff --git a/models/msm.py b/models/msm.py index <HASH>..<HASH> 100644 --- a/models/msm.py +++ b/models/msm.py @@ -173,9 +173,9 @@ class MSM(object): if self._reversible: # TODO: this should be using reversible eigenvalue decomposition! - self._eigenvalues = anaeig(self._T, k=neig, ncv=self._ncv) + self._eigenvalues = anaeig(self._T, k=neig, ncv=self._ncv, reversible=True, mu=self.stationary_distribution) else: - self._eigenvalues = anaeig(self._T, k=neig, ncv=self._ncv) + self._eigenvalues = anaeig(self._T, k=neig, ncv=self._ncv, reversible=False) def _ensure_eigenvalues(self, neig=None): """ Ensures that at least neig eigenvalues have been computed """
[msm.models.msm]: added reversible eigenvalue computation
py
diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index <HASH>..<HASH> 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -446,7 +446,18 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> crosstab(foo, bar) # 'c' and 'f' are not represented in the data, - ... # but they still will be counted in the output + # and will not be shown in the output because + # dropna is True by default. Set 'dropna=False' + # to preserve categories with no data + ... # doctest: +SKIP + col_0 d e + row_0 + a 1 0 + b 0 1 + + >>> crosstab(foo, bar, dropna=False) # 'c' and 'f' are not represented + # in the data, but they still will be counted + # and shown in the output ... # doctest: +SKIP col_0 d e f row_0
Reapply all patches by @testvinder against master (#<I>)
py
diff --git a/master/contrib/git_buildbot.py b/master/contrib/git_buildbot.py index <HASH>..<HASH> 100755 --- a/master/contrib/git_buildbot.py +++ b/master/contrib/git_buildbot.py @@ -35,22 +35,22 @@ from twisted.internet import reactor, defer from optparse import OptionParser -# Modify this to fit your setup, or pass in --master server:host on the +# Modify this to fit your setup, or pass in --master server:port on the # command line master = "localhost:9989" -# When sending the notification, send this category iff +# When sending the notification, send this category if # it's set (via --category) category = None -# When sending the notification, send this repository iff +# When sending the notification, send this repository if # it's set (via --repository) repository = None -# When sending the notification, send this project iff +# When sending the notification, send this project if # it's set (via --project) project = None
Fixed a few comments (server:port instead of server:host, s/iff/if)
py
diff --git a/HydraLib/python/HydraLib/hydra_dateutil.py b/HydraLib/python/HydraLib/hydra_dateutil.py index <HASH>..<HASH> 100644 --- a/HydraLib/python/HydraLib/hydra_dateutil.py +++ b/HydraLib/python/HydraLib/hydra_dateutil.py @@ -56,7 +56,7 @@ def get_datetime(timestamp): """ #First try to use date util. Failing that, continue try: - parsed_dt = parse(timestamp, dayfirst=True) + parsed_dt = parse(timestamp, dayfirst=False) if parsed_dt.tzinfo is None: return parsed_dt else:
change day first parameter to false when parsing dates so that it picks up european dates by default. A longer-term solution should be found for this
py
diff --git a/tests/integration/shell/matcher.py b/tests/integration/shell/matcher.py index <HASH>..<HASH> 100644 --- a/tests/integration/shell/matcher.py +++ b/tests/integration/shell/matcher.py @@ -312,6 +312,11 @@ class MatchTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn): ''' Test to see if we're not auto-adding '*' and 'sys.doc' to the call ''' + os_family = self.run_call('--local grains.get os_family')[1].strip() + if os_family == 'Arch': + self.skipTest('This test is failing in Arch due to a bug in salt-testing. ' + 'Skipping until salt-testing can be upgraded. For more information, ' + 'see https://github.com/saltstack/salt-jenkins/issues/324.') data = self.run_salt('-d -t 20') if data: self.assertIn('user.add:', data)
Skip the correct test for the matcher tests in Arch I mixed up the test that was actually failing for Arch. I originally submitted #<I> to skip the test_salt_documentation test in the shell matcher tests. This is the wrong test to skip. I reverted the previous commit, and applied the skipTest to the correct test that is failing on Arch, which is the test_salt_documentation_arguments_not_assumed test.
py
diff --git a/txtorcon/torconfig.py b/txtorcon/torconfig.py index <HASH>..<HASH> 100644 --- a/txtorcon/torconfig.py +++ b/txtorcon/torconfig.py @@ -463,6 +463,8 @@ class EphemeralHiddenService(object): self.hostname = ans['ServiceID'] + '.onion' if self._key_blob.startswith('NEW:'): self.private_key = ans['PrivateKey'] + else: + self.private_key = self._key_blob log.msg('Created hidden-service at', self.hostname)
Persist the Onion Service private key provided by the user
py
diff --git a/vexbot/adapters/shell/observers.py b/vexbot/adapters/shell/observers.py index <HASH>..<HASH> 100644 --- a/vexbot/adapters/shell/observers.py +++ b/vexbot/adapters/shell/observers.py @@ -39,13 +39,14 @@ def _super_parse(string: str) -> [list, dict]: class AuthorObserver(Observer): def __init__(self, add_callback=None, delete_callback=None): super().__init__() - self._authors = _LRUCache(100, add_callback, delete_callback) + self.authors = _LRUCache(100, add_callback, delete_callback) def on_next(self, msg: Message): author = msg.contents.get('author') if author is None: return - self._authors[author] = msg.source + + self.authors[author] = msg.source def on_error(self, *args, **kwargs): pass
fixed name of subclass so it makes more sense
py
diff --git a/tldap/query.py b/tldap/query.py index <HASH>..<HASH> 100644 --- a/tldap/query.py +++ b/tldap/query.py @@ -177,13 +177,13 @@ class QuerySet(object): else: name,value = child - if name == "pk": - name = self._cls._meta.pk - name, _, operation = name.rpartition("__") if name == "": name, operation = operation, None + if name == "pk": + name = self._cls._meta.pk + try: field = self._cls._meta.get_field_by_name(name) except KeyError:
Move pk substitution to under the split.
py
diff --git a/spanner/unit_tests/test_client.py b/spanner/unit_tests/test_client.py index <HASH>..<HASH> 100644 --- a/spanner/unit_tests/test_client.py +++ b/spanner/unit_tests/test_client.py @@ -108,7 +108,8 @@ class TestClient(unittest.TestCase): def test_instance_admin_api(self): from google.cloud._testing import _Monkey from google.cloud.spanner import client as MUT - client = self._makeOne(project=self.PROJECT) + creds = _make_credentials() + client = self._makeOne(project=self.PROJECT, credentials=creds) class _Client(object): pass @@ -123,7 +124,8 @@ class TestClient(unittest.TestCase): def test_database_admin_api(self): from google.cloud._testing import _Monkey from google.cloud.spanner import client as MUT - client = self._makeOne(project=self.PROJECT) + creds = _make_credentials() + client = self._makeOne(project=self.PROJECT, credentials=creds) class _Client(object): pass @@ -333,13 +335,6 @@ class TestClient(unittest.TestCase): [('google-cloud-resource-prefix', client.project_name)]) -class _Client(object): - - def __init__(self, credentials, user_agent): - self.credentials = credentials - self.user_agent = user_agent - - class _Credentials(object): scopes = None
Properly mock out credentials for 'test_foo_api' tests.
py
diff --git a/sk_dsp_comm/sigsys.py b/sk_dsp_comm/sigsys.py index <HASH>..<HASH> 100644 --- a/sk_dsp_comm/sigsys.py +++ b/sk_dsp_comm/sigsys.py @@ -1310,7 +1310,7 @@ def fs_approx(Xk,fk,t): return x_approx -def FT_approx(x,t,Nfft): +def ft_approx(x,t,Nfft): ''' Approximate the Fourier transform of a finite duration signal using scipy.signal.freqz() @@ -1368,7 +1368,7 @@ def FT_approx(x,t,Nfft): >>> plt.ylabel(r'$|X_0e(f)|$'); >>> # FT Approximation Plot - >>> f,X0 = FT_approx(x0,t,4096) + >>> f,X0 = ft_approx(x0,t,4096) >>> plt.subplot(313) >>> plt.plot(f,abs(X0)) >>> #plt.plot(f,angle(X0))
Changed FT_approx to ft_approx to be similar to fs_approx, etc.
py
diff --git a/udata/i18n.py b/udata/i18n.py index <HASH>..<HASH> 100644 --- a/udata/i18n.py +++ b/udata/i18n.py @@ -44,6 +44,11 @@ class PluggableDomain(Domain): # Load plugins translations if isinstance(translations, Translations): + # Load core extensions translations + from wtforms.i18n import messages_path + wtforms_translations = Translations.load(messages_path(), locale, domain='wtforms') + translations.merge(wtforms_translations) + for plugin_name in current_app.config['PLUGINS']: module_name = 'udata.ext.{0}'.format(plugin_name) module = import_module(module_name)
Properyly handle i<I>n for wtforms
py
diff --git a/source/rafcon/mvc/controllers/state_machines_editor.py b/source/rafcon/mvc/controllers/state_machines_editor.py index <HASH>..<HASH> 100644 --- a/source/rafcon/mvc/controllers/state_machines_editor.py +++ b/source/rafcon/mvc/controllers/state_machines_editor.py @@ -382,11 +382,14 @@ class StateMachinesEditorController(ExtendedController): notebook = self.view['notebook'] active_state_machine_id = self.state_machine_manager_model.state_machine_manager.active_state_machine_id - page = self.get_page_for_state_machine_id(active_state_machine_id) - - if page is None: - # logger.warning("No state machine open {0}".format(page_num)) + if active_state_machine_id is None: return + else: + page = self.get_page_for_state_machine_id(active_state_machine_id) + if page is None: + # logger.warning("No state machine open {0}".format(page_num)) + return + label = notebook.get_tab_label(page).get_children()[0] if active: draw_for_all_gtk_states(label,
state machines editor: check if active_state_machine_id is not None
py
diff --git a/algoliasearch/index.py b/algoliasearch/index.py index <HASH>..<HASH> 100644 --- a/algoliasearch/index.py +++ b/algoliasearch/index.py @@ -904,7 +904,7 @@ class Index(object): return self._req(False, '/batch', 'POST', data=requests) - def search_facet(self, facet_name, facet_query, query=None): + def search_for_facet_values(self, facet_name, facet_query, query=None): """ Perform a search within a given facet's values @param facet_name name of the facet to search. It must have been @@ -922,6 +922,11 @@ class Index(object): path = '/facets/%s/query' % safe(facet_name) return self._req(True, path, 'POST', data={'params' : urlencode(urlify(query))}) + + def search_facet(self, facet_name, facet_query, query=None): + return self.search_for_facet_values(facet_name, facet_query, query) + + def _req(self, is_search, path, meth, params=None, data=None): """Perform an HTTPS request with retry logic.""" path = '%s%s' % (self._request_path, path)
rename search_facet in a backward-compatible way
py
diff --git a/categories/__init__.py b/categories/__init__.py index <HASH>..<HASH> 100644 --- a/categories/__init__.py +++ b/categories/__init__.py @@ -2,7 +2,7 @@ __version_info__ = { 'major': 1, 'minor': 3, 'micro': 0, - 'releaselevel': 'final', + 'releaselevel': 'beta', 'serial': 1 }
Version bump to <I>b1
py
diff --git a/imagemounter/volume.py b/imagemounter/volume.py index <HASH>..<HASH> 100644 --- a/imagemounter/volume.py +++ b/imagemounter/volume.py @@ -47,6 +47,7 @@ class Volume(object): self.size = 0 self.flag = 'alloc' self.fsdescription = None + self._internal_fstype = None # Should be filled by fill_stats self.lastmountpoint = None @@ -135,7 +136,9 @@ class Volume(object): """ # Determine fs type. If forced, always use provided type. - if str(self.index) in self.fstypes: + if self._internal_fstype is not None: + fstype = self._internal_fstype + elif str(self.index) in self.fstypes: fstype = self.fstypes[str(self.index)] elif self.fsforce: fstype = self.fsfallback @@ -166,6 +169,7 @@ class Volume(object): if fstype: self._debug(" Detected {0} as {1}".format(fsdesc, fstype)) + self._internal_fstype = fstype return fstype def get_raw_base_path(self):
Cache the internal FS type for efficiency.
py
diff --git a/tests/test_dnlds.py b/tests/test_dnlds.py index <HASH>..<HASH> 100644 --- a/tests/test_dnlds.py +++ b/tests/test_dnlds.py @@ -29,10 +29,10 @@ def test_dnlds(prt=sys.stdout): dnld_ontology(file_dst) # Test downloading of associations from NCBI. file_assc = os.path.join(cwd, "gene2go") - os.system("rm -f {FILE}".format(FILE=file_assc)) - download_ncbi_associations(file_assc, prt, loading_bar=None) + #os.system("rm -f {FILE}".format(FILE=file_assc)) #download_ncbi_associations(file_assc, prt, loading_bar=None) - assert os.path.isfile(file_assc), "FILE({F}) EXPECTED TO EXIST".format(F=file_assc) + #download_ncbi_associations(file_assc, prt, loading_bar=None) + #assert os.path.isfile(file_assc), "FILE({F}) EXPECTED TO EXIST".format(F=file_assc) def dnld_ontology(filename): """Test downloading of ontologies."""
Comment out download of gene2go on NCBI's ftp server
py
diff --git a/test.py b/test.py index <HASH>..<HASH> 100644 --- a/test.py +++ b/test.py @@ -181,7 +181,7 @@ class LiSETest(unittest.TestCase): self.engine._things_cache ) - def test_roommate_collisions(self): + def testRoommateCollisions(self): """Test queries' ability to tell that all of the students that share rooms have been in the same place.""" done = set() for chara in self.engine.character.values():
camelCase all method names, to match unittest module style
py
diff --git a/conf.py b/conf.py index <HASH>..<HASH> 100644 --- a/conf.py +++ b/conf.py @@ -57,7 +57,7 @@ author = 'Martin Majlis' # The short X.Y version. version = "0.2" # The full version, including alpha/beta/rc tags. -release = "0.2.2" +release = "0.2.3" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.
Update version to <I> in conf.py
py
diff --git a/meshio/vtk_io.py b/meshio/vtk_io.py index <HASH>..<HASH> 100755 --- a/meshio/vtk_io.py +++ b/meshio/vtk_io.py @@ -278,7 +278,10 @@ def _read_sub_section(f, info): data_type = info.split[2].lower() d[info.section] = _read_coords(f, data_type, info.is_ascii, info.num_points) else: - d[info.section] = list(map(int, info.split[1:])) + if info.section == "DIMENSIONS": + d[info.section] = list(map(int, info.split[1:])) + else: + d[info.section] = list(map(float, info.split[1:])) assert ( len(d[info.section]) == 3 ), "Wrong number of info in section '{}'. Need 3, got {}.".format(
Bugfix: only DIMENSIONS are integer in DATASET
py
diff --git a/tests/risk_unittest.py b/tests/risk_unittest.py index <HASH>..<HASH> 100644 --- a/tests/risk_unittest.py +++ b/tests/risk_unittest.py @@ -1403,7 +1403,7 @@ class RiskJobGeneralTestCase(unittest.TestCase): def _prepare_bcr_result(self): self.job.blocks_keys = [19, 20] kvs.set_value_json_encoded(kvs.tokens.bcr_block_key(self.job_id, 19), [ - ((19.0, -1.1), [ + ((-1.1, 19.0), [ ({'bcr': 35.1, 'eal_original': 12.34, 'eal_retrofitted': 4}, 'assetID-191'), ({'bcr': 35.2, 'eal_original': 2.5, 'eal_retrofitted': 2.2}, @@ -1411,7 +1411,7 @@ class RiskJobGeneralTestCase(unittest.TestCase): ]) ]) kvs.set_value_json_encoded(kvs.tokens.bcr_block_key(self.job_id, 20), [ - ((20.0, 2.3), [ + ((2.3, 20.0), [ ({'bcr': 35.1, 'eal_original': 1.23, 'eal_retrofitted': 0.3}, 'assetID-201'), ({'bcr': 35.2, 'eal_original': 4, 'eal_retrofitted': 0.4},
flip canned lat/lon coos in test to reflect code changes
py
diff --git a/lyricfetch/cli.py b/lyricfetch/cli.py index <HASH>..<HASH> 100644 --- a/lyricfetch/cli.py +++ b/lyricfetch/cli.py @@ -25,12 +25,8 @@ def load_from_file(filename): logger.error("Err: File '%s' does not exist", filename) return None - try: - with open(filename, 'r') as sourcefile: - songs = [line.strip() for line in sourcefile] - except IOError as error: - logger.exception(error) - return None + with open(filename) as sourcefile: + songs = [line.strip() for line in sourcefile] songs = set(Song.from_filename(song) for song in songs) return songs.difference({None}) # In case any were in the wrong format
Let IOErrors slip when reading filenames from file
py
diff --git a/tests/integration/config.py b/tests/integration/config.py index <HASH>..<HASH> 100644 --- a/tests/integration/config.py +++ b/tests/integration/config.py @@ -8,6 +8,9 @@ class TestConfig(object): SQLALCHEMY_DATABASE_URI = "sqlite://" SQLALCHEMY_ECHO = False + CELERY_ALWAYS_EAGER = True # run tasks locally, no async + CELERY_EAGER_PROPAGATES_EXCEPTIONS = True + CSRF_ENABLED = False SECRET_KEY = "tototiti" SALT = "retwis"
celery config for tests: no async, propagate exceptions
py
diff --git a/salt/modules/apt.py b/salt/modules/apt.py index <HASH>..<HASH> 100644 --- a/salt/modules/apt.py +++ b/salt/modules/apt.py @@ -844,8 +844,8 @@ def mod_repo(repo, refresh=False, **kwargs): ) ) elif 'key_url' in kwargs: - key_url = kwargs.pop('key_url', None) - cmd = 'wget -q -O- "{0}" | apt-key add -'.format(key_url) + fn_ = __salt__['cp.cache_file'](kwargs['key_url']) + cmd = 'apt-key add {0}'.format(fn_) out = __salt__['cmd.run_stdout'](cmd) if not out.upper().startswith('OK'): error_str = 'Error: key retrieval failed: {0}'
Fix #<I> The apt code here is raising an exception, this should most likely not be happening
py
diff --git a/plaso/multi_processing/process_info.py b/plaso/multi_processing/process_info.py index <HASH>..<HASH> 100644 --- a/plaso/multi_processing/process_info.py +++ b/plaso/multi_processing/process_info.py @@ -62,11 +62,17 @@ class ProcessInfo(object): lib, data, dirty, percent. """ try: - external_information = self._process.get_ext_memory_info() + if self._psutil_pre_v2: + external_information = self._process.get_ext_memory_info() + else: + external_information = self._process.memory_info_ex() except psutil.NoSuchProcess: return - percent = self._process.get_memory_percent() + if self._psutil_pre_v2: + percent = self._process.get_memory_percent() + else: + percent = self._process.memory_percent() # Psutil will return different memory information depending on what is # available in that platform.
Pull request: <I>: Added psutil version 2 and later compatibility checks #<I>
py
diff --git a/ocrmypdf/pipeline.py b/ocrmypdf/pipeline.py index <HASH>..<HASH> 100644 --- a/ocrmypdf/pipeline.py +++ b/ocrmypdf/pipeline.py @@ -787,6 +787,8 @@ def ocr_tesseract_textonly_pdf( tessconfig=options.tesseract_config, timeout=options.tesseract_timeout, pagesegmode=options.tesseract_pagesegmode, + user_words=options.user_words, + user_patterns=options.user_patterns, log=log)
Fix missing user_words/user_patterns from textonly_pdf case
py
diff --git a/core/dbt/rpc/logger.py b/core/dbt/rpc/logger.py index <HASH>..<HASH> 100644 --- a/core/dbt/rpc/logger.py +++ b/core/dbt/rpc/logger.py @@ -89,6 +89,8 @@ class QueueTimeoutMessage(QueueMessage): class QueueLogHandler(logbook.queues.MultiProcessingHandler): def emit(self, record: logbook.LogRecord): + # trigger the cached proeprties here + record.pull_information() self.queue.put_nowait(QueueLogMessage.from_record(record)) def emit_error(self, error: JSONRPCError):
call pull_information() before we enqueue the log record
py
diff --git a/nion/swift/test/Symbolic_test.py b/nion/swift/test/Symbolic_test.py index <HASH>..<HASH> 100644 --- a/nion/swift/test/Symbolic_test.py +++ b/nion/swift/test/Symbolic_test.py @@ -342,7 +342,7 @@ class TestSymbolicClass(unittest.TestCase): computation = document_model.create_computation(Symbolic.xdata_expression("xd.fft(a.xdata)")) computation.create_object("a", document_model.get_object_specifier(data_item)) data = DocumentModel.evaluate_data(computation).data - assert numpy.array_equal(data, scipy.fftpack.fftshift(scipy.fftpack.fft2(d) * 1.0 / numpy.sqrt(d.shape[1] * d.shape[0]))) + assert numpy.array_equal(data, scipy.fftpack.fftshift(numpy.fft.fft2(d) * 1.0 / numpy.sqrt(d.shape[1] * d.shape[0]))) def test_gaussian_blur_handles_scalar_argument(self): document_model = DocumentModel.DocumentModel()
Update test to match new fft call.
py
diff --git a/webstack_django_sorting/templatetags/sorting_tags.py b/webstack_django_sorting/templatetags/sorting_tags.py index <HASH>..<HASH> 100644 --- a/webstack_django_sorting/templatetags/sorting_tags.py +++ b/webstack_django_sorting/templatetags/sorting_tags.py @@ -1,6 +1,6 @@ from operator import attrgetter -from django import template, VERSION as DJANGO_VERSION +from django import template from django.http import Http404 from django.utils.translation import ugettext_lazy as _ @@ -145,12 +145,7 @@ class SortedDataNode(template.Node): # Python sorting if not a field field = ordering[1:] if ordering[0] == "-" else ordering - - if DJANGO_VERSION < (1, 8): - field_names = queryset.model._meta.get_all_field_names() - else: - field_names = [f.name for f in queryset.model._meta.get_fields()] - + field_names = [f.name for f in queryset.model._meta.get_fields()] return field not in field_names def sort_queryset(self, queryset, ordering):
Remove Django <I> support
py
diff --git a/awkward/pandas/__init__.py b/awkward/pandas/__init__.py index <HASH>..<HASH> 100644 --- a/awkward/pandas/__init__.py +++ b/awkward/pandas/__init__.py @@ -1,9 +1,9 @@ from .base import ( - AwkwardArray, AwkwardType, + AwkwardAccessor, ) __all__ = [ 'AwkwardType', - 'AwkwardArray', + 'AwkwardAcessor', ]
[WIP] Removed 'AwkwardArray' instance from __init__ (not sure if __init__ is needed)
py
diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index <HASH>..<HASH> 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -228,8 +228,8 @@ class QuestionAnsweringPipeline(ChunkPipeline): max_answer_len (`int`, *optional*, defaults to 15): The maximum length of predicted answers (e.g., only answers with a shorter length are considered). max_seq_len (`int`, *optional*, defaults to 384): - The maximum length of the total sentence (context + question) after tokenization. The context will be - split in several chunks (using `doc_stride`) if needed. + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. max_question_len (`int`, *optional*, defaults to 64): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`):
Updating the docs for `max_seq_len` in QA pipeline (#<I>)
py
diff --git a/telemetry/telemetry/page/page.py b/telemetry/telemetry/page/page.py index <HASH>..<HASH> 100644 --- a/telemetry/telemetry/page/page.py +++ b/telemetry/telemetry/page/page.py @@ -6,6 +6,8 @@ import os import re import urlparse +from telemetry import decorators + class Page(object): def __init__(self, url, page_set, attributes=None, base_dir=None): @@ -40,6 +42,17 @@ class Page(object): raise AttributeError( '%r object has no attribute %r' % (self.__class__, name)) + @decorators.Cache + def GetSyntheticDelayCategories(self): + if not hasattr(self, 'synthetic_delays'): + return [] + result = [] + for delay, options in self.synthetic_delays.items(): + options = '%f;%s' % (options.get('target_duration', 0), + options.get('mode', 'static')) + result.append('DELAY(%s;%s)' % (delay, options)) + return result + def __lt__(self, other): return self.url < other.url
Convert smoothness to the new timeline based metric API. This is a reland of <URL>
py
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py b/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py index <HASH>..<HASH> 100644 --- a/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py +++ b/src/python/grpcio_tests/tests/unit/_metadata_flags_test.py @@ -94,10 +94,10 @@ class _GenericHandler(grpc.GenericRpcHandler): def get_free_loopback_tcp_port(): - tcp = socket.socket(socket.AF_INET6) + tcp = socket.socket(socket.AF_INET) tcp.bind(('', 0)) address_tuple = tcp.getsockname() - return tcp, "[::1]:%s" % (address_tuple[1]) + return tcp, "localhost:%s" % (address_tuple[1]) def create_dummy_channel():
Kokoro is v4-only
py
diff --git a/koala/ast/graph.py b/koala/ast/graph.py index <HASH>..<HASH> 100644 --- a/koala/ast/graph.py +++ b/koala/ast/graph.py @@ -994,7 +994,7 @@ def cell2code(named_ranges, cell, sheet): code = root.emit(ast, context=sheet) else: ast = None - code = str('"' + cell.value + '"' if isinstance(cell.value,unicode) else cell.value) + code = str('"' + cell.value.encode('utf-8') + '"' if isinstance(cell.value,unicode) else cell.value) return code,ast class ExcelCompiler(object): @@ -1088,7 +1088,7 @@ class ExcelCompiler(object): deps = [x.tvalue.replace('$','') for x in ast.nodes() if isinstance(x,RangeNode)] # remove dupes deps = uniqueify(deps) - + ###### 2) connect dependencies in cells in graph #################### # ### LOG
[bug] fixed encoding issue in cell2code
py
diff --git a/pydoop/pure/pipes.py b/pydoop/pure/pipes.py index <HASH>..<HASH> 100644 --- a/pydoop/pure/pipes.py +++ b/pydoop/pure/pipes.py @@ -235,6 +235,11 @@ class StreamRunner(object): def run_map(self, input_split, n_reduces, piped_input): logger.debug('start run_map') factory, ctx = self.factory, self.ctx + + cmd, args = self.cmd_stream.next() + if cmd == "setInputTypes" and piped_input: + ctx._input_key_class, ctx._input_value_class = args + reader = factory.create_record_reader(ctx) if reader is None and piped_input: raise PydoopError('RecordReader not defined')
Bug <I> fix: the 'setInputTypes' is now correctly handled when piped_input is enabled
py
diff --git a/fedmsg/core.py b/fedmsg/core.py index <HASH>..<HASH> 100644 --- a/fedmsg/core.py +++ b/fedmsg/core.py @@ -96,7 +96,7 @@ class FedMsgContext(object): are emitting detectable heartbeats. """ - topic = self.c['topic_prefix'] + '.heartbeat' + topic = self.c['topic_prefix'] + '._heartbeat' # TODO - include endpoint name in the results dict results = dict(zip(endpoints.values(), [False] * len(endpoints)))
Fixed old reference heartbeat -> _heartbeat.
py
diff --git a/Lib/fontParts/base/normalizers.py b/Lib/fontParts/base/normalizers.py index <HASH>..<HASH> 100644 --- a/Lib/fontParts/base/normalizers.py +++ b/Lib/fontParts/base/normalizers.py @@ -100,6 +100,10 @@ def normalizeKerningKey(value): raise FontPartsError("Kerning key items must be strings, not %s." % type(v).__name__) if len(v) < 1: raise FontPartsError("Kerning key items must be one character long") + if value[0].startswith("@") and not value[0].startswith("@public.kern1."): + raise FontPartsError("Left Kerning key group must start with @public.kern1." + if value[1].startswith("@") and not value[1].startswith("@public.kern2."): + raise FontPartsError("Right Kerning key group must start with @public.kern2." return tuple([unicode(v) for v in value])
Check that groups in the kerning keys contain the correct prefix.
py
diff --git a/ftr/version.py b/ftr/version.py index <HASH>..<HASH> 100644 --- a/ftr/version.py +++ b/ftr/version.py @@ -1,2 +1,2 @@ -version = '0.6.4' +version = '0.7'
version bump for <I>.
py
diff --git a/tests/basics/getitem.py b/tests/basics/getitem.py index <HASH>..<HASH> 100644 --- a/tests/basics/getitem.py +++ b/tests/basics/getitem.py @@ -21,6 +21,12 @@ try: except StopIteration: pass +# this class raises an IndexError to stop the iteration +class A: + def __getitem__(self, i): + raise IndexError +print(list(A())) + # this class raises a non-StopIteration exception on iteration class A: def __getitem__(self, i):
tests: Add test where __getitem__ raises IndexError to stop iteration.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -62,6 +62,10 @@ else: long_description = "" +needs_pytest = set(['pytest', 'test', 'ptr']).intersection(sys.argv) +pytest_runner = ['pytest-runner'] if needs_pytest else [] + + def ensure_scripts(linux_scripts): """Creates the proper script names required for each platform (taken from 4Suite) @@ -143,7 +147,7 @@ def install(**kwargs): extras_require=extras_require, test_suite="test", python_requires=">=3.5.*", - setup_requires=["pytest-runner"], + setup_requires=pytest_runner, tests_require=["pytest"], **kwargs )
Make installing pytest-runner contingent on running tests This monkeypatches several changes from [PyCQA/mccabe](<URL>) over to the pylint repo, since the pattern was equivalent between the two projects.
py
diff --git a/src/unity/python/turicreate/test/test_activity_classifier.py b/src/unity/python/turicreate/test/test_activity_classifier.py index <HASH>..<HASH> 100644 --- a/src/unity/python/turicreate/test/test_activity_classifier.py +++ b/src/unity/python/turicreate/test/test_activity_classifier.py @@ -184,7 +184,7 @@ class ActivityClassifierTest(unittest.TestCase): if _mac_ver() >= (10, 13): w = self.prediction_window - labels = map(str, sorted(self.model._target_id_map.keys())) + labels = list(map(str, sorted(self.model._target_id_map.keys()))) data_list = [dataset[f].to_numpy()[:, np.newaxis] for f in self.features] np_data = np.concatenate(data_list, 1)[np.newaxis]
Python 3 fix in AC unit test The labels is a generator and gets consumed first time its used, leaving coreml_time1_values empty.
py
diff --git a/src/python/src/grpc/_adapter/_links_test.py b/src/python/src/grpc/_adapter/_links_test.py index <HASH>..<HASH> 100644 --- a/src/python/src/grpc/_adapter/_links_test.py +++ b/src/python/src/grpc/_adapter/_links_test.py @@ -40,7 +40,7 @@ from grpc.framework.base import interfaces from grpc.framework.foundation import logging_pool _IDENTITY = lambda x: x -_TIMEOUT = 2 +_TIMEOUT = 32 # TODO(nathaniel): End-to-end metadata testing.
Increase test timeout The previous timeout was short enough that it led to erroneous failures. Fixes #<I>.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -5,6 +5,12 @@ JSON Tokens """ from setuptools import setup, find_packages +import unittest + +def get_test_suite(): + test_loader = unittest.TestLoader() + test_suite = test_loader.discover('.', pattern='unit_tests.py') + return test_suite setup( name='jsontokens', @@ -17,6 +23,7 @@ setup( keywords='json web token sign verify encode decode signature', packages=find_packages(), zip_safe=False, + test_suite="setup.get_test_suite", install_requires=[ 'cryptography>=1.9', 'keylib>=0.1.1',
add ./setup.py test support
py
diff --git a/ah_bootstrap.py b/ah_bootstrap.py index <HASH>..<HASH> 100644 --- a/ah_bootstrap.py +++ b/ah_bootstrap.py @@ -91,6 +91,17 @@ except: use_setuptools() +# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after +# initializing submodule with ah_boostrap.py +# See discussion and references in +# https://github.com/astropy/astropy-helpers/issues/302 + +try: + import typing # noqa +except ImportError: + pass + + # Note: The following import is required as a workaround to # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this # module now, it will get cleaned up after `run_setup` is called, but that will
Importing typing to avoid issues later
py
diff --git a/inginious/frontend/pages/course_admin/task_edit.py b/inginious/frontend/pages/course_admin/task_edit.py index <HASH>..<HASH> 100644 --- a/inginious/frontend/pages/course_admin/task_edit.py +++ b/inginious/frontend/pages/course_admin/task_edit.py @@ -208,8 +208,11 @@ class CourseEditTask(INGIniousAdminPage): if count > 1: return json.dumps({"status": "error", "message": _("Some tags have the same id! The id of a tag must be unique.")}) - - data = {key: val for key, val in data.items() if not key.startswith("problem") and not key.startswith("limits") and not key.startswith("tags")} + data = {key: val for key, val in data.items() if + not key.startswith("problem") + and not key.startswith("limits") + and not key.startswith("tags") + and not key.startswith("/")} del data["@action"] # Determines the task filetype
Ensure that code is not pushed into yaml
py
diff --git a/src/crate/client/sqlalchemy/types.py b/src/crate/client/sqlalchemy/types.py index <HASH>..<HASH> 100644 --- a/src/crate/client/sqlalchemy/types.py +++ b/src/crate/client/sqlalchemy/types.py @@ -47,6 +47,9 @@ class MutableList(Mutable, list): list.__setitem__(self, key, value) self.changed() + def __eq__(self, other): + return list.__eq__(self, other) + def append(self, item): list.append(self, item) self.changed() @@ -122,6 +125,9 @@ class MutableDict(Mutable, dict): return MutableDict(value, self.to_update, overwrite_key) return value + def __eq__(self, other): + return dict.__eq__(self, other) + class _Craty(sqltypes.UserDefinedType):
add __eq__ operator to MutableDict and MutableList
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ exec(read('tinydb/version.py')) setup( name="tinydb", version=__version__, - packages=find_packages(), + packages=find_packages(exclude=['tests']), # development metadata zip_safe=True,
Avoid installing tests as a package.
py
diff --git a/tests/conftest.py b/tests/conftest.py index <HASH>..<HASH> 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,7 +10,7 @@ import pytest import ipopt [email protected](scope=module) [email protected](scope="module") def hs071_fixture(): """Return a default implementation of the hs071 test problem.
Fix missing quote marks denoting string in fixture decorator scope kwarg
py
diff --git a/pypsa/opt.py b/pypsa/opt.py index <HASH>..<HASH> 100644 --- a/pypsa/opt.py +++ b/pypsa/opt.py @@ -146,6 +146,10 @@ def l_constraint(model,name,constraints,*args): where constraints is a dictionary of constraints of the form: + constraints[i] = LConstraint object + + OR using the soon-to-be-deprecated list format: + constraints[i] = [[(coeff1,var1),(coeff2,var2),...],sense,constant_term] i.e. the first argument is a list of tuples with the variables and their
Fix l_constraint docstring
py
diff --git a/tests/test_definitions.py b/tests/test_definitions.py index <HASH>..<HASH> 100644 --- a/tests/test_definitions.py +++ b/tests/test_definitions.py @@ -208,6 +208,8 @@ def test_convert_magicc7_to_openscm_variables(magicc7, openscm): "Effective Radiative Forcing|Aerosols|Direct Effect|NH3|MAGICC Fossil and Industrial", ), ("TOTAL_INCLVOLCANIC_ERF", "Effective Radiative Forcing"), + ("TOTAL_INCLVOLCANIC_EFFRF", "TOTAL_INCLVOLCANIC_EFFRF"), + ("CH4_EFFRF", "CH4_EFFRF"), ("SLR_TOT", "Sea Level Rise"), ], )
Check that the old EFFRF values are not converted to Effective Radiative Forcing
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -14,10 +14,12 @@ class build_ext_first(build_py_orig): DEFAULT = ["mupdf", "mupdf-third"] ARCH_LINUX = DEFAULT + ["jbig2dec", "openjp2", "jpeg", "freetype"] OPENSUSE = ARCH_LINUX + ["harfbuzz", "png16"] +FEDORA = ARCH_LINUX + ["harfbuzz"] LIBRARIES = { "default": DEFAULT, "arch": ARCH_LINUX, "opensuse": OPENSUSE, + "fedora": FEDORA, }
Load libraries on Fedora
py
diff --git a/usb1.py b/usb1.py index <HASH>..<HASH> 100644 --- a/usb1.py +++ b/usb1.py @@ -43,6 +43,10 @@ class USBAsyncReaderBase(object): self._event_callback_dict = {} self._errorCallback = DEFAULT_ASYNC_TRANSFER_ERROR_CALLBACK + def _getTransfer(self, handle, endpoint, data, callbackDispatcher, + user_data, timeout): + raise NotImplementedError + def submit(self): self._submited = True self._handle.submitTransfer(self._transfer)
Make it clear USBAsyncReaderBase is an abstract class.
py
diff --git a/checkers/python3.py b/checkers/python3.py index <HASH>..<HASH> 100644 --- a/checkers/python3.py +++ b/checkers/python3.py @@ -68,7 +68,7 @@ class Python3Checker(checkers.BaseChecker): "instead of 'raise foo(bar)'.", {'maxversion': (3, 0), 'old_names': [('W0121', 'old-raise-syntax')]}), - 'E1606': ('Use of the `` operator', + 'E1605': ('Use of the `` operator', 'backtick', 'Used when the deprecated "``" (backtick) operator is used ' 'instead of the str() function.', @@ -363,7 +363,7 @@ class Python3TokenChecker(checkers.BaseTokenChecker): __implements__ = interfaces.ITokenChecker name = 'python3' msgs = { - 'E1605': ('Use of long suffix', + 'E1606': ('Use of long suffix', 'long-suffix', 'Used when "l" or "L" is used to mark a long integer. ' 'This will not work in Python 3, since `int` and `long` '
Change the message ids, for consistency.
py
diff --git a/gwpy/timeseries/io/gwf/lalframe.py b/gwpy/timeseries/io/gwf/lalframe.py index <HASH>..<HASH> 100644 --- a/gwpy/timeseries/io/gwf/lalframe.py +++ b/gwpy/timeseries/io/gwf/lalframe.py @@ -147,6 +147,7 @@ def read(source, channels, start=None, end=None, series_class=TimeSeries, start = max(epoch, lalutils.to_lal_ligotimegps(start)) if end is None: end = epoch + streamdur + end = min(epoch + streamdur, lalutils.to_lal_ligotimegps(end)) duration = float(end - start) # read data
gwpy.timeseries: only read data we can when reading GWF data with lalframe, `TimeSeries.read` handles missing data on its own fixes #<I>
py
diff --git a/salt/config.py b/salt/config.py index <HASH>..<HASH> 100644 --- a/salt/config.py +++ b/salt/config.py @@ -901,7 +901,7 @@ def syndic_config(master_config_path, opts.update(master_opts) opts.update(minion_opts) syndic_opts = { - 'role': 'syndic', + '__role': 'syndic', 'root_dir': opts.get('root_dir', salt.syspaths.ROOT_DIR), 'pidfile': opts.get('syndic_pidfile', 'salt-syndic.pid'), 'log_file': opts.get('syndic_log_file', 'salt-syndic.log'), @@ -1917,7 +1917,7 @@ def apply_minion_config(overrides=None, defaults = DEFAULT_MINION_OPTS opts = defaults.copy() - opts['role'] = 'minion' + opts['__role'] = 'minion' if overrides: opts.update(overrides) @@ -2023,7 +2023,7 @@ def apply_master_config(overrides=None, defaults=None): defaults = DEFAULT_MASTER_OPTS opts = defaults.copy() - opts['role'] = 'master' + opts['__role'] = 'master' if overrides: opts.update(overrides)
role --> __role
py
diff --git a/troposphere/utils.py b/troposphere/utils.py index <HASH>..<HASH> 100644 --- a/troposphere/utils.py +++ b/troposphere/utils.py @@ -37,5 +37,5 @@ def tail(conn, stack_name, log_func=_tail_print, sleep_time=5, for e in events: if e.event_id not in seen: log_func(e) - seen.add(e.event_id) + seen.add(e.event_id) time.sleep(sleep_time)
tail: only add unseen events Since you already know whether the even was seen or not, there is no point in re-adding it.
py
diff --git a/visidata/vdtui.py b/visidata/vdtui.py index <HASH>..<HASH> 100755 --- a/visidata/vdtui.py +++ b/visidata/vdtui.py @@ -1762,6 +1762,8 @@ class Sheet(BaseSheet): rowattrs = {} # [rowidx] -> attr colattrs = {} # [colidx] -> attr + isNull = isNullFunc() + self.rowLayout = {} self.calcColLayout() vcolidx = 0 @@ -1784,7 +1786,10 @@ class Sheet(BaseSheet): row = rows[rowidx] cellval = col.getCell(row, colwidth-1) -# + if isNull(cellval.value): + cellval.note = options.disp_note_none + cellval.notecolor = 'color_note_type' + attr = self.colorize(col, row, cellval) # sepattr is the attr between cell/columns
[vdtui null] show null note according to isNull #<I>
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,7 +26,7 @@ import twine # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' +needs_sphinx = '1.7.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
Require Sphinx <I> Make docs/conf.py consistent with docs/requirements.txt.
py
diff --git a/tensorboard/main.py b/tensorboard/main.py index <HASH>..<HASH> 100644 --- a/tensorboard/main.py +++ b/tensorboard/main.py @@ -21,7 +21,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import logging as base_logging import os import socket import sys @@ -222,14 +221,10 @@ def run_simple_server(tb_app): # An error message was already logged # TODO(@jart): Remove log and throw anti-pattern. sys.exit(-1) - logger = base_logging.getLogger('tensorflow' + util.LogHandler.EPHEMERAL) - logger.setLevel(base_logging.INFO) - logger.info('TensorBoard %s at %s (Press CTRL+C to quit) ', - version.VERSION, url) - try: - server.serve_forever() - finally: - logger.info('') + sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' % + (version.VERSION, url)) + sys.stderr.flush() + server.serve_forever() def main(unused_argv=None):
Don't ephemeral log server URL for now (#<I>) This needs to be fine tuned a bit more, in order to ensure it always appears when being logged to TTYs that aren't proper TTYs.
py
diff --git a/superset/datasets/api.py b/superset/datasets/api.py index <HASH>..<HASH> 100644 --- a/superset/datasets/api.py +++ b/superset/datasets/api.py @@ -147,6 +147,7 @@ class DatasetRestApi(BaseSupersetModelRestApi): "owners.username", "owners.first_name", "owners.last_name", + "columns.advanced_data_type", "columns.changed_on", "columns.column_name", "columns.created_on", @@ -162,7 +163,18 @@ class DatasetRestApi(BaseSupersetModelRestApi): "columns.type", "columns.uuid", "columns.verbose_name", - "metrics", + "metrics", # TODO(john-bodley): Deprecate in 3.0. + "metrics.changed_on", + "metrics.created_on", + "metrics.d3format", + "metrics.description", + "metrics.expression", + "metrics.extra", + "metrics.id", + "metrics.metric_name", + "metrics.metric_type", + "metrics.verbose_name", + "metrics.warning_text", "datasource_type", "url", "extra",
refactor: Blossom metric fields for dataset API column selection (#<I>)
py
diff --git a/ykman/cli/fido.py b/ykman/cli/fido.py index <HASH>..<HASH> 100644 --- a/ykman/cli/fido.py +++ b/ykman/cli/fido.py @@ -74,7 +74,7 @@ def fido(ctx): \b Change the FIDO2 PIN from 123456 to 654321: - $ ykman fido set-pin --pin 123456 --new-pin 654321 + $ ykman fido access change-pin --pin 123456 --new-pin 654321 """ conn = ctx.obj["conn"] @@ -225,14 +225,14 @@ def access(): """ [email protected]("set-pin") [email protected]("change-pin") @click.pass_context @click.option("-P", "--pin", help="Current PIN code.") @click.option("-n", "--new-pin", help="A new PIN.") @click.option( "-u", "--u2f", is_flag=True, help="Set FIDO U2F PIN instead of FIDO2 PIN." ) -def set_pin(ctx, pin, new_pin, u2f): +def change_pin(ctx, pin, new_pin, u2f): """ Set or change the PIN code. @@ -413,7 +413,7 @@ def creds(): \b List stored credentials (providing PIN via argument): - $ ykman fido credentials --pin 123456 list + $ ykman fido credentials list --pin 123456 \b Delete a stored credential by user name (PIN will be prompted for):
Fix rename set-pin -> change-pin for consistency.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,6 @@ except ImportError: else: setupdict['packages'] = find_packages('src') setupdict['package_dir'] = { '': 'src' } - setupdict['install_requires'] = ['Twisted'] - + setupdict['install_requires'] = ['Twisted', 'six'] + setup(**setupdict)
Added six to setup.py
py
diff --git a/test/helpers.py b/test/helpers.py index <HASH>..<HASH> 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -10,6 +10,7 @@ from unittest import mock import vobject +from khard import address_book from khard import carddav_object @@ -56,6 +57,28 @@ def mock_stream(name="stdout"): return context_manager +class TmpAbook: + """Context manager to create a temporary address book folder""" + + def __init__(self, vcards): + self.vcards = vcards + + def __enter__(self): + self.tempdir = tempfile.TemporaryDirectory() + for card in self.vcards: + shutil.copy(self._card_path(card), self.tempdir.name) + return address_book.VdirAddressBook("tmp", self.tempdir.name) + + def __exit__(self, _a, _b, _c): + self.tempdir.cleanup() + + @staticmethod + def _card_path(card): + if os.path.exists(card): + return card + return os.path.join("test/fixture/vcards", card) + + class TmpConfig(contextlib.ContextDecorator): """Context manager to create a temporary khard configuration.
Add a context manager for temp test abooks
py
diff --git a/molecule/commands.py b/molecule/commands.py index <HASH>..<HASH> 100644 --- a/molecule/commands.py +++ b/molecule/commands.py @@ -602,8 +602,8 @@ class Login(AbstractCommand): colorama.Fore.RED)) hostname = match[0] - login_cmd = self.molecule._provisioner.login_cmd(hostname) - login_args = self.molecule._provisioner.login_args(hostname) + login_cmd = self.molecule._provisioner.login_cmd(hostname) + login_args = self.molecule._provisioner.login_args(hostname) except CalledProcessError: # gets appended to python-vagrant's error message
Fix molecule login Molecule login was broken as some variables were not defined properly.
py
diff --git a/setuptools/extension.py b/setuptools/extension.py index <HASH>..<HASH> 100644 --- a/setuptools/extension.py +++ b/setuptools/extension.py @@ -73,7 +73,9 @@ class Extension(_Extension): :keyword list[str] runtime_library_dirs: list of directories to search for C/C++ libraries at run time - (for shared extensions, this is when the extension is loaded) + (for shared extensions, this is when the extension is loaded). + Setting this will cause an exception during build on Windows + platforms. :keyword list[str] extra_objects: list of extra files to link with (eg. object files not implied @@ -113,6 +115,9 @@ class Extension(_Extension): :keyword bool optional: specifies that a build failure in the extension should not abort the build process, but simply not install the failing extension. + + :raises DistutilsPlatformError: if 'runtime_library_dirs' is specified + on Windows. (since v63) """ def __init__(self, name, sources, *args, **kw):
DOC: Mention that Extension(..., runtime_library_dirs) raises on Windows May want a closer match to the exception message to make it easier to search, but this should help. Inspired by #<I>.
py
diff --git a/tests/linalg_test.py b/tests/linalg_test.py index <HASH>..<HASH> 100644 --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -584,6 +584,8 @@ class NumpyLinalgTest(jtu.JaxTestCase): for shape in [(1, 1), (4, 4), (2, 3, 5), (5, 5, 5), (20, 20), (5, 10)] for pnorm in [np.inf, -np.inf, 1, -1, 2, -2, 'fro'] for dtype in float_types + complex_types)) + @jtu.skip_on_devices("tpu") # SVD is not implemented on the TPU backend + @jtu.skip_on_devices("gpu") # TODO(#2203): numerical errors def testCond(self, shape, pnorm, dtype): _skip_if_unsupported_type(dtype)
Disable linalg_test.testCond. Issue: <I> This test was added in #<I> but is failing in internal tests.
py
diff --git a/salt/master.py b/salt/master.py index <HASH>..<HASH> 100644 --- a/salt/master.py +++ b/salt/master.py @@ -721,10 +721,18 @@ class AESFuncs(object): Receive an event from the minion and fire it on the master event interface ''' - if 'id' not in load or 'tag' not in load or 'data' not in load: + if 'id' not in load: return False - tag = load['tag'] - return self.event.fire_event(load, tag) + if not 'events' in load: + if 'tag' not in load or 'data' not in load: + return False + if 'events' in load: + for event in events: + self.event.fire_event(event, event['tag']) + else: + tag = load['tag'] + self.event.fire_event(load, tag) + return True def _return(self, load): '''
Prep _minion_event method to handle new event process
py
diff --git a/signals.py b/signals.py index <HASH>..<HASH> 100644 --- a/signals.py +++ b/signals.py @@ -116,3 +116,12 @@ record_after_update = _signals.signal( """ This signal is sent after record is updated. """ + +pre_template_render = _signals.signal('pre-template-render') +""" +This signal is sent before *some* templates are rendered, an allows +customization of the template context. + +Sender is the blueprint view name (e.g. 'record.metadata'). Extra data +passed in depends on blueprint view. +"""
base: add pre-template-render signal * Adds signal that can be sent from views prior to a template being rendered. This allows simple customizations of the template context.
py
diff --git a/markgen_tests.py b/markgen_tests.py index <HASH>..<HASH> 100644 --- a/markgen_tests.py +++ b/markgen_tests.py @@ -96,8 +96,13 @@ class MarkgenTests(unittest.TestCase): emphasis(u'yo momma')]) base_path = os.path.abspath(os.path.dirname(__file__)) + file_content = open(os.path.join(base_path, - 'markgen_test_doc.md'), 'r').read().decode('utf8')[:-1] + 'markgen_test_doc.md'), 'r').read() + if hasattr(file_content, 'decode'): + file_content = file_content.decode('utf8') + + file_content = file_content[:-1] assert content == file_content
Fix tests utf8 usage for <I>
py
diff --git a/kitty/model/low_level/encoder.py b/kitty/model/low_level/encoder.py index <HASH>..<HASH> 100644 --- a/kitty/model/low_level/encoder.py +++ b/kitty/model/low_level/encoder.py @@ -49,10 +49,11 @@ def strToBytes(value): :param value: value to encode ''' kassert.is_of_types(value, (bytes, bytearray, six.string_types)) - if isinstance(value, six.string_types): - return bytes(bytearray([ord(x) for x in value])) - elif isinstance(value, bytearray): - return bytes(value) + if six.PY3: + if isinstance(value, six.string_types): + return bytes(bytearray([ord(x) for x in value])) + elif isinstance(value, bytearray): + return bytes(value) return value
[Encoder] improve performance for python2
py
diff --git a/zinnia/feeds.py b/zinnia/feeds.py index <HASH>..<HASH> 100644 --- a/zinnia/feeds.py +++ b/zinnia/feeds.py @@ -320,7 +320,7 @@ class SearchEntries(EntryFeed): """ Title of the feed. """ - return _("Results of the search for '%s'") % obj + return _("Search results for '%(pattern)s'") % {'pattern': obj} def description(self, obj): """
Use the same translation string for search results
py
diff --git a/django_select2/widgets.py b/django_select2/widgets.py index <HASH>..<HASH> 100644 --- a/django_select2/widgets.py +++ b/django_select2/widgets.py @@ -142,6 +142,8 @@ class Select2Mixin(object): options = dict(self.options) if options.get('allowClear', None) is not None: options['allowClear'] = not self.is_required + if options.get('placeholder'): + options['placeholder'] = force_text(options['placeholder']) return options def render_js_code(self, id_, *args):
Support proper translation of placeholders using ugettext_lazy.
py
diff --git a/angr/analyses/scout.py b/angr/analyses/scout.py index <HASH>..<HASH> 100644 --- a/angr/analyses/scout.py +++ b/angr/analyses/scout.py @@ -331,8 +331,8 @@ class Scout(Analysis): traced_address = set() self._functions = set() self._call_map = networkx.DiGraph() - initial_state = self._project.initial_state(mode="fastpath", add_options={simuvex.o.NO_SOLVING_FOR_SUCCESSORS}) - initial_options = initial_state.options + initial_state = self._project.initial_state(mode="fastpath") + initial_options = initial_state.options - { simuvex.o.TRACK_CONSTRAINTS } # initial_options.remove(simuvex.o.COW_STATES) initial_state.options = initial_options # Sadly, not all calls to functions are explicitly made by call
don't track constraints in scout
py
diff --git a/lib/svtplay_dl/fetcher/dash.py b/lib/svtplay_dl/fetcher/dash.py index <HASH>..<HASH> 100644 --- a/lib/svtplay_dl/fetcher/dash.py +++ b/lib/svtplay_dl/fetcher/dash.py @@ -94,5 +94,6 @@ class DASH(VideoRetriever): if self.options.output != "-": file_d.close() + progressbar(bytes_so_far, total_size, "ETA: complete") progress_stream.write('\n') self.finished = True
dash: complete the progress bar after file is downloaded The progress bar wasn't updated after the downloaded completed, so the final progress bar would look something like this: [<I>/<I>][===============================.] ETA: 0:<I>:<I> This can be interpreted as the file didn't download completely. Reported-by: rooth
py
diff --git a/command/build_ext.py b/command/build_ext.py index <HASH>..<HASH> 100644 --- a/command/build_ext.py +++ b/command/build_ext.py @@ -292,7 +292,7 @@ class build_ext (Command): ext.undef_macros = [] for macro in macros: if not (type(macro) is TupleType and - 1 <= len(macros) <= 2): + 1 <= len(macro) <= 2): raise DistutilsSetupError, \ ("'macros' element of build info dict " "must be 1- or 2-tuple")
Typo fix from David Ascher.
py
diff --git a/ceph_deploy/gatherkeys.py b/ceph_deploy/gatherkeys.py index <HASH>..<HASH> 100644 --- a/ceph_deploy/gatherkeys.py +++ b/ceph_deploy/gatherkeys.py @@ -143,7 +143,7 @@ def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir): keyring_path_local = os.path.join(dest_dir, keyring_name_local) with open(keyring_path_local, 'wb') as f: for line in out: - f.write(line + b'\n') + f.write(line.decode('utf-8') + '\n') return True @@ -183,7 +183,7 @@ def gatherkeys_with_mon(args, host, dest_dir): rlogger.debug(line) return False try: - mon_status = json.loads(b''.join(out).decode('utf-8')) + mon_status = json.loads(''.join(out)) except ValueError: rlogger.error('"ceph mon_status %s" output was not json', host) for line in out:
gatherkeys: no need to decode - already done by remoto
py