diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ import os from setuptools import find_packages, setup -with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: +with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme: README = readme.read() os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
replaces readme.md with readme.rst
py
diff --git a/ocrd/ocrd/workspace.py b/ocrd/ocrd/workspace.py index <HASH>..<HASH> 100644 --- a/ocrd/ocrd/workspace.py +++ b/ocrd/ocrd/workspace.py @@ -206,6 +206,8 @@ class Workspace(): content is not None) if content is not None and 'local_filename' not in kwargs: raise Exception("'content' was set but no 'local_filename'") + if self.overwrite_mode: + kwargs['force'] = True with pushd_popd(self.directory): if 'local_filename' in kwargs:
workspace.add_file: overwrite_mode → force=True (regression from 5dce<I>)
py
diff --git a/tests/influxdb/client_test_with_server.py b/tests/influxdb/client_test_with_server.py index <HASH>..<HASH> 100644 --- a/tests/influxdb/client_test_with_server.py +++ b/tests/influxdb/client_test_with_server.py @@ -180,13 +180,17 @@ class InfluxDbInstance(object): def get_logs_and_output(self): proc = self.proc - with open(self.logs_file) as fh: - return { - 'rc': proc.returncode, - 'out': proc.stdout.read(), - 'err': proc.stderr.read(), - 'logs': fh.read() - } + try: + with open(self.logs_file) as fh: + logs = fh.read() + except IOError as err: + logs = "Couldn't read logs: %s" % err + return { + 'rc': proc.returncode, + 'out': proc.stdout.read(), + 'err': proc.stderr.read(), + 'logs': logs + } def close(self, remove_tree=True): self.proc.terminate()
workaround: last influxd master doesn't anymore store a log file unfortunately :s So don't error if it's not readable. For now keeping the code for the log file as it might be reintroduced later.
py
diff --git a/gruvi/fibers.py b/gruvi/fibers.py index <HASH>..<HASH> 100644 --- a/gruvi/fibers.py +++ b/gruvi/fibers.py @@ -112,7 +112,7 @@ class Fiber(fibers.Fiber): if not self.is_alive(): return if message is None: - message = 'Fiber.cancel()' + message = 'cancelled by Fiber.cancel()' self._hub.run_callback(self.throw, Cancelled, Cancelled(message)) def join(self, timeout=None):
fibers: small nitpick
py
diff --git a/quantecon/models/solow/model.py b/quantecon/models/solow/model.py index <HASH>..<HASH> 100644 --- a/quantecon/models/solow/model.py +++ b/quantecon/models/solow/model.py @@ -112,7 +112,6 @@ class Model(ivp.IVP): """ # cached values self.__intensive_output = None - self.__k_dot = None self.output = output self.params = params @@ -136,18 +135,15 @@ class Model(ivp.IVP): return self.__intensive_output @property - def _k_dot(self): + def _symbolic_system(self): """ - :getter: Return vectorized version of equation of motion for capital - (per unit effective labor). - :type: function + Symbolic expression for the system of ODEs. + + :getter: Return the system of ODEs. + :type: sym.ImmutableMatrix """ - if self.__k_dot is None: - args = [k] + sym.var(self.params.keys()) - self.__k_dot = sym.lambdify(args, self.k_dot, - modules=[{'ImmutableMatrix': np.array}, "numpy"]) - return self.__k_dot + return sym.Matrix([self.k_dot]) @property def intensive_output(self):
Added a private property storing an ImmutableMatrix defining the ODEs.
py
diff --git a/setuptools/tests/test_dist.py b/setuptools/tests/test_dist.py index <HASH>..<HASH> 100644 --- a/setuptools/tests/test_dist.py +++ b/setuptools/tests/test_dist.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals +import io + from setuptools import Distribution from setuptools.extern.six.moves.urllib.request import pathname2url from setuptools.extern.six.moves.urllib_parse import urljoin @@ -122,7 +124,7 @@ def test_maintainer_author(name, attrs, tmpdir): dist.metadata.write_pkg_info(fn_s) - with open(str(fn.join('PKG-INFO')), 'r') as f: + with io.open(str(fn.join('PKG-INFO')), 'r', encoding='utf-8') as f: pkg_lines = f.readlines() pkg_lines = [_ for _ in pkg_lines if _] # Drop blank lines
Open metadata file with UTF-8 decoding.
py
diff --git a/shinken/trigger_functions.py b/shinken/trigger_functions.py index <HASH>..<HASH> 100644 --- a/shinken/trigger_functions.py +++ b/shinken/trigger_functions.py @@ -75,8 +75,9 @@ def set_value(obj_ref, output=None, perfdata=None, return_code=None): if not obj: return output = output or obj.output - perfdata = perfdata or obj.perfdata - return_code = return_code or obj.state_id + perfdata = perfdata or obj.perf_data + if return_code is None: + return_code = obj.state_id logger.debug("[trigger] Setting %s %s %s for object %s" % (output, perfdata, return_code, obj.get_full_name()))
* fixed typo "perfdata" * return_code can be 0, so specifically check for None
py
diff --git a/ec2.py b/ec2.py index <HASH>..<HASH> 100644 --- a/ec2.py +++ b/ec2.py @@ -21,7 +21,7 @@ class credentials(object): def __getitem__(self, item): item = item.upper() - return getattr(self, item[4:]) or os.environ.get(item) + return os.environ.get(item) or getattr(self, item[4:]) class instances(object):
Environment variables have higher priority over application level variables
py
diff --git a/openquake/calculators/hazard/classical/core.py b/openquake/calculators/hazard/classical/core.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/hazard/classical/core.py +++ b/openquake/calculators/hazard/classical/core.py @@ -506,6 +506,11 @@ class ClassicalHazardCalculator(general.BaseHazardCalculator): pause = pgen.next() while accounted_for != sites: + failures = stats.get_counter(self.calc_proxy.job_id, "h", + "compute_hazard_curve-failures", "i") + if failures: + raise RuntimeError( + "%s hazard curve failures, aborting" % failures) hc_data = [] # Sleep a little before checking the availability of additional # hazard curve results.
fail on hazard curve failures Former-commit-id: adb<I>ec<I>c<I>d<I>e6f9f<I>
py
diff --git a/imgaug/parameters.py b/imgaug/parameters.py index <HASH>..<HASH> 100644 --- a/imgaug/parameters.py +++ b/imgaug/parameters.py @@ -90,7 +90,7 @@ def handle_discrete_param(param, name, value_range=None, tuple_to_uniform=True, check_value_range(param[0]) check_value_range(param[1]) return DiscreteUniform(int(param[0]), int(param[1])) - elif list_to_choice and ia.is_iterable(param): + elif list_to_choice and ia.is_iterable(param) and not isinstance(param, tuple): for param_i in param: check_value_range(param_i) return Choice([int(param_i) for param_i in param])
Fix tuple being accepted as list
py
diff --git a/openquake/commonlib/tests/readinput_test.py b/openquake/commonlib/tests/readinput_test.py index <HASH>..<HASH> 100644 --- a/openquake/commonlib/tests/readinput_test.py +++ b/openquake/commonlib/tests/readinput_test.py @@ -19,8 +19,8 @@ class ClosestSiteModelTestCase(unittest.TestCase): <site lon="0.0" lat="0.2" vs30="200.0" vs30Type="inferred" z1pt0="100.0" z2pt5="2.0" /> </siteModel> </nrml>''') - oqparam = mock.Mock(inputs={}) - oqparam.inputs['site_model'] = data + oqparam = mock.Mock() + oqparam.inputs = dict(site_model=data) expected = [ SiteParam(z1pt0=100.0, z2pt5=2.0, measured=False, vs30=1200.0, lon=0.0, lat=0.0),
Changed the mock to be compatible with old versions
py
diff --git a/MySQLdb/cursors.py b/MySQLdb/cursors.py index <HASH>..<HASH> 100644 --- a/MySQLdb/cursors.py +++ b/MySQLdb/cursors.py @@ -60,11 +60,10 @@ class BaseCursor(object): InternalError, ProgrammingError, NotSupportedError _defer_warnings = False + connection = None def __init__(self, connection): - from weakref import ref - - self.connection = ref(connection) + self.connection = connection self.description = None self.description_flags = None self.rowcount = -1 @@ -81,7 +80,7 @@ class BaseCursor(object): def close(self): """Close the cursor. No further queries will be possible.""" try: - if self.connection is None or self.connection() is None: + if self.connection is None: return while self.nextset(): pass @@ -192,8 +191,6 @@ class BaseCursor(object): def _get_db(self): con = self.connection - if con is not None: - con = con() if con is None: raise ProgrammingError("cursor closed") return con
Cursor.connection is real reference
py
diff --git a/phono3py/cui/load.py b/phono3py/cui/load.py index <HASH>..<HASH> 100644 --- a/phono3py/cui/load.py +++ b/phono3py/cui/load.py @@ -454,6 +454,7 @@ def set_dataset_and_force_constants( "fc2", log_level) elif os.path.isfile("FORCES_FC3"): + # suppose fc3.hdf5 is read but fc2.hdf5 doesn't exist. disp_filename = None if os.path.isfile("disp_fc3.yaml"): if ph3py_yaml is None:
Add a line of comment in phono3py.load
py
diff --git a/submit/views.py b/submit/views.py index <HASH>..<HASH> 100644 --- a/submit/views.py +++ b/submit/views.py @@ -528,14 +528,11 @@ It may be imported again using the import feature""" % project.name)) def make_big_string(text, filename): def is_binary(str): - return "\x00" in str or any(ord(x) > 0x80 for x in str) - if len(text) < 150 and "\n" not in text and not is_binary(text): - return text - else: - response.append(("text", filename, text)) - return { - "File": filename - } + return "\x00" in str or any(ord(x) > 0x80 for x in str) + response.append(("text", filename, text)) + return { + "File": filename + } project_yml_dict = {} project_yml_dict["Name"] = project.name project_yml_dict["ExpectedFiles"] = {
Fix bug---the former code returned a str instead of a dict
py
diff --git a/tile_generator/config.py b/tile_generator/config.py index <HASH>..<HASH> 100644 --- a/tile_generator/config.py +++ b/tile_generator/config.py @@ -219,7 +219,7 @@ class Config(dict): } ] if requires_docker_bosh: - version = None + version = '29.0.0' version_param = '?v=' + version if version else '' self['releases'] += [{ 'name': 'docker-boshrelease',
Pin docker-boshrelease version. Need to have deeper changes to handle latest upstream version. See #<I>.
py
diff --git a/openfisca_france_indirect_taxation/scripts/build_coicop_legislation.py b/openfisca_france_indirect_taxation/scripts/build_coicop_legislation.py index <HASH>..<HASH> 100644 --- a/openfisca_france_indirect_taxation/scripts/build_coicop_legislation.py +++ b/openfisca_france_indirect_taxation/scripts/build_coicop_legislation.py @@ -202,6 +202,12 @@ def build_coicop_nomenclature_with_fiscal_categories(to_csv = False): # u'02202'] ['Cigares et cigarillos'] 1994 2014 cigares # [u'02201'] ['Cigarettes'] 1994 2014 cigarettes # TODO: Rajouter Stupéfiants sans taxe + # cigarettes = dict( + # value = '02.2.1', + # categorie_fiscale = 'tva_taux_plein' + # label = '' + # origin = '' + # ) # # 03 Habillement et chaussures habillement = dict(
Add stub in comment for tabac
py
diff --git a/test/runtests.py b/test/runtests.py index <HASH>..<HASH> 100644 --- a/test/runtests.py +++ b/test/runtests.py @@ -1,7 +1,5 @@ #!/usr/bin/env python -import sys -import os import unittest import test_bitstring import test_bitarray @@ -9,13 +7,12 @@ import test_constbitarray import test_constbitstream import test_bitstream -print("Running bitstring tests") -unittest.main(test_bitstring, exit=False) -print("Running constbitarray tests") -unittest.main(test_constbitarray, exit=False) -print("Running constbitstream tests") -unittest.main(test_constbitstream, exit=False) -print("Running bitarray tests") -unittest.main(test_bitarray, exit=False) -print("Running bitstream tests") -unittest.main(test_bitstream, exit=False) +for module in [test_bitstring, test_constbitarray, test_constbitstream, + test_bitarray, test_bitstream]: + print("Running {0}".format(module.__name__)) + try: + unittest.main(module) + except SystemExit: + pass + +
Fixing so that it will work for Python <I>.
py
diff --git a/Lib/fontParts/base/glyph.py b/Lib/fontParts/base/glyph.py index <HASH>..<HASH> 100644 --- a/Lib/fontParts/base/glyph.py +++ b/Lib/fontParts/base/glyph.py @@ -1477,7 +1477,8 @@ class BaseGlyph(BaseObject, TransformationMixin, InterpolationMixin, self._scaleWidthBy(sX) if height: self._scaleHeightBy(sY) - # scaleBy.__doc__ %= TransformationMixin.scaleBy.__doc__ + + scaleBy.__doc__ %= TransformationMixin.scaleBy.__doc__ def _scaleWidthBy(self, value): """
Hack the super's doc into the subclass' doc.
py
diff --git a/fabfile.py b/fabfile.py index <HASH>..<HASH> 100644 --- a/fabfile.py +++ b/fabfile.py @@ -6,7 +6,7 @@ import errno from fileinput import FileInput import os import shutil -import sys +from sys import version_info from contextlib import contextmanager from uuid import uuid4 @@ -143,7 +143,7 @@ def flake8(rcfile=FLAKE8_CFG): def black(): """Run black style checker.""" - if sys.version_info.major > 2: + if version_info >= (3, 6, 0): local("black --check %s" % (" ".join(CHECK_INCLUDES)))
Make black version check consistent with setup.py
py
diff --git a/src/armet/resources/model.py b/src/armet/resources/model.py index <HASH>..<HASH> 100644 --- a/src/armet/resources/model.py +++ b/src/armet/resources/model.py @@ -120,7 +120,7 @@ class BaseModel(base.BaseResource): queryset = self.model.objects.all() # Filter the queryset based on permissions you can have - queryset = self.authorize_queryset(queryset, 'read') + # queryset = self.authorize_queryset(queryset, 'read') try: # Apply transveral first.
fixed a bug in resources/model.py
py
diff --git a/tests/test_get_user_config.py b/tests/test_get_user_config.py index <HASH>..<HASH> 100644 --- a/tests/test_get_user_config.py +++ b/tests/test_get_user_config.py @@ -93,3 +93,31 @@ def test_get_user_config_nonexistent(): Get config from a nonexistent ~/.cookiecutterrc file """ assert config.get_user_config() == config.DEFAULT_CONFIG + + [email protected] +def custom_user_config(): + return { + 'cookiecutters_dir': '/foo/bar/some-path-to-templates', + 'replay_dir': '/foo/bar/some-path-to-replay-files', + 'default_context': { + 'full_name': 'Cookiemonster', + 'github_username': 'hackebrot' + }, + 'abbreviations': { + 'cookiedozer': 'https://github.com/hackebrot/cookiedozer.git', + } + } + + [email protected] +def custom_user_config_path(tmpdir, custom_user_config): + user_config_file = tmpdir.join('user_config') + + user_config_file.write(config.yaml.dump(custom_user_config)) + return str(user_config_file) + + +def test_specify_config_path(custom_user_config_path, custom_user_config): + user_config = config.get_user_config(custom_user_config_path) + assert user_config == custom_user_config
Implement a test for get_user_config to accept path
py
diff --git a/salt/utils/__init__.py b/salt/utils/__init__.py index <HASH>..<HASH> 100644 --- a/salt/utils/__init__.py +++ b/salt/utils/__init__.py @@ -863,7 +863,7 @@ def subdict_match(data, expr, delim=':', regex_match=False): try: return re.match(pattern.lower(), str(target).lower()) except Exception: - log.error('Invalid regex \'{0}\' in match'.format(pattern)) + log.error('Invalid regex {0!r} in match'.format(pattern)) return False else: return fnmatch.fnmatch(str(target).lower(), pattern.lower()) @@ -872,8 +872,8 @@ def subdict_match(data, expr, delim=':', regex_match=False): splits = expr.split(delim) key = delim.join(splits[:idx]) matchstr = delim.join(splits[idx:]) - log.debug('Attempting to match \'{0}\' in \'{1}\' using delimiter ' - '\'{2}\''.format(matchstr, key, delim)) + log.debug('Attempting to match {0!r} in {1!r} using delimiter ' + '{2!r}'.format(matchstr, key, delim)) match = traverse_dict(data, key, {}, delim=delim) if match == {}: continue
Use raw strings instead of escaped single-quotes
py
diff --git a/kechain/api.py b/kechain/api.py index <HASH>..<HASH> 100644 --- a/kechain/api.py +++ b/kechain/api.py @@ -1,6 +1,7 @@ import io import requests +import sys from kechain.query import PartSet from .globals import data @@ -24,7 +25,9 @@ def retrieve_parts(): 'activity_id': data.activity_id }) - assert r.status_code == 200 + if not r.status_code == 200: + print(r.json()["results"][0]["detail"], file=sys.stderr) + return raw_data = r.json()
added stderr output on failure
py
diff --git a/src/toil/test/cwl/cwlTest.py b/src/toil/test/cwl/cwlTest.py index <HASH>..<HASH> 100644 --- a/src/toil/test/cwl/cwlTest.py +++ b/src/toil/test/cwl/cwlTest.py @@ -14,12 +14,13 @@ from __future__ import absolute_import import json import os +from unittest import skip from toil.test import ToilTest, needs_cwl - @needs_cwl class CWLTest(ToilTest): + @skip("https://github.com/BD2KGenomics/toil/issues/621") def test_run_revsort(self): from toil.cwl import cwltoil outDir = self._createTempDir()
Skipping CWL test (connected to #<I>)
py
diff --git a/internetarchive/cli/ia_download.py b/internetarchive/cli/ia_download.py index <HASH>..<HASH> 100644 --- a/internetarchive/cli/ia_download.py +++ b/internetarchive/cli/ia_download.py @@ -40,7 +40,8 @@ options: -P, --search-parameters=<key:value>... Download items returned from a specified search query. -g, --glob=<pattern> Only download files whose filename matches the given glob pattern. - -f, --format=<format>... Only download files of the specified format(s). + -f, --format=<format>... Only download files of the specified format. + Use this option multiple times to download multiple formats. You can use the following command to retrieve a list of file formats contained within a given item:
Clarify CLI download syntax for multiple formats With the existing documentation, it wasn't clear to me how to download multiple formats at once; this PR attempts to clarify it.
py
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py index <HASH>..<HASH> 100644 --- a/tools/run_tests/artifacts/distribtest_targets.py +++ b/tools/run_tests/artifacts/distribtest_targets.py @@ -363,7 +363,7 @@ def targets(): RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_5'), RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_6'), RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_7'), - RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_3_0'), + # TODO(apolcyn): add a ruby 3.0 test once protobuf adds support RubyDistribTest('linux', 'x64', 'jessie',
skil ruby <I> distrib test until protobuf adds support
py
diff --git a/python/setup.py b/python/setup.py index <HASH>..<HASH> 100644 --- a/python/setup.py +++ b/python/setup.py @@ -73,7 +73,6 @@ setup( extras_require={ ':python_version>="3.5.2"': [ 'aiohttp>=3.0.1', - 'cchardet==2.1.1', 'aiodns==1.1.1', 'yarl==1.1.0', 'web3==4.4.1',
removed cchardet dependency from setup.py fix #<I>
py
diff --git a/uncommitted/test_integration.py b/uncommitted/test_integration.py index <HASH>..<HASH> 100644 --- a/uncommitted/test_integration.py +++ b/uncommitted/test_integration.py @@ -15,7 +15,7 @@ if sys.version_info.major > 2: else: from StringIO import StringIO [email protected]_fixture(scope='module') [email protected](scope='module') def tempdir(): """Temporary directory in which all tests will run.""" tempdir = tempfile.mkdtemp(prefix='uncommitted-test')
Rewrite "@yield_fixture" (py.test deprecated it)
py
diff --git a/test/testbasics.py b/test/testbasics.py index <HASH>..<HASH> 100644 --- a/test/testbasics.py +++ b/test/testbasics.py @@ -326,15 +326,15 @@ class TestBasics(unittest.TestCase): match +=1 if t == 'Ravenscar to Hull': match +=1 - if t == 'East Coast - Smugglers, Alum and Scarborough Bay': - match +=1 + #if t == 'East Coast - Smugglers, Alum and Scarborough Bay': + #match +=1 if t == "Swanage to Land's End": match +=1 if t == 'Heart of the British Isles - A Grand Tour': match +=1 - self.assertTrue(count == 6) - self.assertTrue(match == 6) + self.assertTrue(count == 5) + self.assertTrue(match == 5) #todo test malformed pipeline syntax too
Update test to suit (latest!) data
py
diff --git a/xopen.py b/xopen.py index <HASH>..<HASH> 100644 --- a/xopen.py +++ b/xopen.py @@ -220,8 +220,8 @@ def xopen(filename, mode='r', compresslevel=6): raise ImportError("Cannot open xz files: The lzma module is not available (use Python 3.3 or newer)") return lzma.open(filename, mode) elif filename.endswith('.gz'): - if _PY3: - return gzip.open(filename, mode, compresslevel=compresslevel) + if _PY3 and 'r' in mode: + return gzip.open(filename, mode) if sys.version_info[:2] == (2, 7): buffered_reader = io.BufferedReader buffered_writer = io.BufferedWriter
Go back to using a pigz/gzip process even on Python 3 ... at least for writing. The performance benefit of doing compression in a separate process (and even multithreaded with pigz) is quite large.
py
diff --git a/src/requirementslib/models/requirements.py b/src/requirementslib/models/requirements.py index <HASH>..<HASH> 100644 --- a/src/requirementslib/models/requirements.py +++ b/src/requirementslib/models/requirements.py @@ -753,7 +753,7 @@ class VCSRequirement(FileRequirement): @property def pipfile_part(self): - excludes = ["_repo", "_base_line"] + excludes = ["_repo", "_base_line", "setup_path"] filter_func = lambda k, v: bool(v) is True and k.name not in excludes pipfile_dict = attr.asdict(self, filter=filter_func).copy() if "vcs" in pipfile_dict:
Don't include setup path in pipfile output
py
diff --git a/allennlp/training/checkpointer.py b/allennlp/training/checkpointer.py index <HASH>..<HASH> 100644 --- a/allennlp/training/checkpointer.py +++ b/allennlp/training/checkpointer.py @@ -66,7 +66,8 @@ class Checkpointer(Registrable): self._last_permanent_saved_checkpoint_time = save_time if remove_path: for fname in paths_to_remove[1:]: - os.remove(fname) + if os.path.isfile(fname): + os.remove(fname) def find_latest_checkpoint(self) -> Tuple[str, str]: """
Checkpointer should check in case user deleted a serialized model (#<I>) If the user deletes an old checkpoint file, the checkpointer will crash when it also tries to delete it. This fix should prevent that.
py
diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index <HASH>..<HASH> 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -116,6 +116,20 @@ test=True every 3600 seconds (every hour) until the current time is between the of 8am and 5pm. The range parameter must be a dictionary with the date strings using the dateutil format. + ... versionadded:: 2014.7.0 + + schedule: + job1: + function: state.sls + cron: '*/15 * * * *' + args: + - httpd + kwargs: + test: True + +The scheduler also supports scheduling jobs using a cron like format. This requires the +python-croniter library. + The scheduler also supports ensuring that there are no more than N copies of a particular routine running. Use this for jobs that may be long-running and could step on each other or pile up in case of infrastructure outage.
Adding missing docs for using cron like format for scheduler
py
diff --git a/pubsub/setup.py b/pubsub/setup.py index <HASH>..<HASH> 100644 --- a/pubsub/setup.py +++ b/pubsub/setup.py @@ -51,6 +51,7 @@ SETUP_BASE = { REQUIREMENTS = [ + 'google-cloud-core >= 0.27.0, < 0.28dev', 'google-gax >= 0.15.13, < 0.16dev', 'googleapis-common-protos[grpc] >= 1.5.2, < 2.0dev', 'grpc-google-iam-v1 >= 0.11.1, < 0.12dev', @@ -60,7 +61,7 @@ REQUIREMENTS = [ setup( name='google-cloud-pubsub', - version='0.28.0', + version='0.28.1', description='Python Client for Google Cloud Pub/Sub', long_description=README, namespace_packages=[
Re-add core dependency (#<I>)
py
diff --git a/yangson/instance.py b/yangson/instance.py index <HASH>..<HASH> 100644 --- a/yangson/instance.py +++ b/yangson/instance.py @@ -250,6 +250,9 @@ class InstanceNode: if isinstance(self.value, ArrayValue) and isinstance(sn, ListNode): try: inst = self.entry(0) + except NonexistentInstance: + return self + try: while True: ninst = inst.add_defaults() inst = ninst.next()
Make Instance.add_defaults work also for empty lists.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ install_requires = [ setup( name='nodeconductor', - version='0.34.0.dev0', + version='0.34.0', author='OpenNode Team', author_email='[email protected]', url='https://github.com/opennode/nodeconductor',
Preparing release <I>
py
diff --git a/spock/plugins/helpers/chat.py b/spock/plugins/helpers/chat.py index <HASH>..<HASH> 100644 --- a/spock/plugins/helpers/chat.py +++ b/spock/plugins/helpers/chat.py @@ -10,12 +10,16 @@ logger = logging.getLogger('spock') translations = {} try: - with open('en_US.lang', 'r') as trf: - for line in trf: + with open('en_US.lang', 'r') as lang_file: + # the chat data comes in as strings, so we need to + # replace all %d, %i, %3$d etc. with %s + import re + pcts_only = re.compile('%([0-9]\$)?[a-z]') + for line in lang_file: if '=' in line: # cut off newline, split some.translation.id=format %s string translation_id, format_str = line[:-1].split('=', 1) - translations[translation_id] = format_str + translations[translation_id] = pcts_only.sub('%s', format_str) except: logger.warn('en_US.lang not loaded, cannot translate chat messages')
Fix chat translation string formatting by replacing all formatting with %s
py
diff --git a/src/livestreamer/plugins/weeb.py b/src/livestreamer/plugins/weeb.py index <HASH>..<HASH> 100644 --- a/src/livestreamer/plugins/weeb.py +++ b/src/livestreamer/plugins/weeb.py @@ -58,9 +58,10 @@ class Weeb(Plugin): raise PluginError("rtmpdump is not usable and required by Weeb plugin") streams = {} + stream_name = "sd" if multibitrate: - streams["low"] = RTMPStream(self.session, { + streams[stream_name] = RTMPStream(self.session, { "rtmp": "{0}/{1}".format(rtmp, playpath), "pageUrl": self.url, "swfVfy": self.SWFURL, @@ -68,8 +69,9 @@ class Weeb(Plugin): "live": True }) playpath += "HI" + stream_name = "hd" - streams["live"] = RTMPStream(self.session, { + streams[stream_name] = RTMPStream(self.session, { "rtmp": "{0}/{1}".format(rtmp, playpath), "pageUrl": self.url, "swfVfy": self.SWFURL,
plugins.weeb: Rename streams to be more consistent with the flash player.
py
diff --git a/tests/test_request.py b/tests/test_request.py index <HASH>..<HASH> 100644 --- a/tests/test_request.py +++ b/tests/test_request.py @@ -60,9 +60,6 @@ class RequestTest(TestCase): else: request.user = self.user - if kwargs.get('remote_user', False): - request.META['HTTP_REMOTE_USER'] = kwargs.get('remote_user') - if kwargs.get('headers'): for key, value in kwargs.get('headers').items(): request.META[key] = value
Remove unnecessary code. It was already implemented in more generic form for override headers.
py
diff --git a/src/GEOparse/sra_downloader.py b/src/GEOparse/sra_downloader.py index <HASH>..<HASH> 100644 --- a/src/GEOparse/sra_downloader.py +++ b/src/GEOparse/sra_downloader.py @@ -202,9 +202,12 @@ class SRADownloader(object): sra_run = path.split("/")[-1] logger.info("Analysing %s" % sra_run) - url = type(self).FTP_ADDRESS_TPL.format( - range_subdir=sra_run[:6], file_dir=sra_run - ) + if self.aspera: + url = type(self).FTP_ADDRESS_TPL.format( + range_subdir=sra_run[:6], file_dir=sra_run + ) + else: + url = path logger.debug("URL: %s", url) filepath = os.path.abspath(os.path.join(self.directory, "%s.sra" % sra_run)) utils.download_from_url(
fix: Use provided URL as FTP often does not exist
py
diff --git a/metnet/sbml.py b/metnet/sbml.py index <HASH>..<HASH> 100644 --- a/metnet/sbml.py +++ b/metnet/sbml.py @@ -61,12 +61,12 @@ def parse_sbml_file(file): left = [] for species_id, value in parse_species_references(reaction, sbml_name('listOfReactants')): species_name, species_comp = model_compounds[species_id] - left.append((Compound(species_id), value, species_comp)) + left.append((Compound(species_id, compartment=species_comp), value)) right = [] for species_id, value in parse_species_references(reaction, sbml_name('listOfProducts')): species_name, species_comp = model_compounds[species_id] - right.append((Compound(species_id), value, species_comp)) + right.append((Compound(species_id, compartment=species_comp), value)) # Add reaction to database direction = Reaction.Bidir if reaction_rev else Reaction.Right
sbml: Fix reaction creation with compartments
py
diff --git a/rets/session.py b/rets/session.py index <HASH>..<HASH> 100644 --- a/rets/session.py +++ b/rets/session.py @@ -17,6 +17,7 @@ from rets.parsers.login.one_five import OneFive from rets.parsers.get_metadata.system import System from rets.parsers.get_metadata.resource import Resource from rets.models.bulletin import Bulletin +from configuration import Configuration import sys if sys.version_info < (3, 0): @@ -34,8 +35,8 @@ class Session(object): client = requests.Session() capabilities = {} - def __init__(self, configuration): - self.configuration = configuration + def __init__(self, login_url=None, version='1.5', username=None, password=None, user_agent='Python RETS', user_agent_password=None): + self.configuration = Configuration(login_url=login_url, version=version, username=username, password=password, user_agent=user_agent, user_agent_password=user_agent_password) if self.configuration.http_authentication == self.configuration.AUTH_BASIC: self.client.auth = HTTPBasicAuth(self.configuration.username, self.configuration.password)
Session now instanciates its own configuration object and takes login_url, username, password, user_agent, user_agent_password, and version as parameters of the constructor.
py
diff --git a/tests/test_utils.py b/tests/test_utils.py index <HASH>..<HASH> 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,5 @@ import json -from jwt import base64url_decode +import base64 from django.contrib.auth import get_user_model from django.test import TestCase @@ -8,6 +8,15 @@ from rest_framework_jwt import utils User = get_user_model() +def base64url_decode(input): + rem = len(input) % 4 + + if rem > 0: + input += b'=' * (4 - rem) + + return base64.urlsafe_b64decode(input) + + class UtilsTests(TestCase): def setUp(self): self.username = 'jpueblo'
Don’t depend on old private PyJWT API base<I>url_decode wasn’t meant to be public API and has since been moved.
py
diff --git a/umap/umap_.py b/umap/umap_.py index <HASH>..<HASH> 100644 --- a/umap/umap_.py +++ b/umap/umap_.py @@ -133,6 +133,9 @@ class UMAP (BaseEstimator): or 'coo'. """ + # Handle other array dtypes (TODO: do this properly) + X = X.astype(np.float64) + graph = fuzzy_simplicial_set(X, self.n_neighbors, self.oversampling) if self.n_edge_samples is None:
Temporary hack to support other input dtypes.
py
diff --git a/isochrones/starmodel.py b/isochrones/starmodel.py index <HASH>..<HASH> 100644 --- a/isochrones/starmodel.py +++ b/isochrones/starmodel.py @@ -1279,7 +1279,7 @@ class BinaryStarModel(StarModel): """ fig1 = self.triangle(plot_datapoints=False, - params=['mass_A', 'mass_B','radius','Teff','feh','age', + params=['mass_A', 'mass_B','radius','Teff','logg','feh','age', 'distance', 'AV'], **kwargs) if basename is not None: @@ -1646,7 +1646,7 @@ class TripleStarModel(StarModel): """ fig1 = self.triangle(plot_datapoints=False, params=['mass_A', 'mass_B', 'mass_C', 'radius', - 'Teff','feh','age','distance','AV'], + 'Teff','logg','feh','age','distance','AV'], **kwargs) if basename is not None: plt.savefig('{}_physical.{}'.format(basename,format))
added logg to Binary and Triple triangleplots
py
diff --git a/safe/reportv4/test/test_impact_report.py b/safe/reportv4/test/test_impact_report.py index <HASH>..<HASH> 100644 --- a/safe/reportv4/test/test_impact_report.py +++ b/safe/reportv4/test/test_impact_report.py @@ -53,6 +53,9 @@ class TestImpactReport(unittest.TestCase): actual_string = actual_file.read().strip() self.assertEquals(control_string, actual_string) + # This test is generating the result from definitions, but the expected + # result is static. + @unittest.expectedFailure def test_analysis_result_from_impact_function(self): """Test generate analysis result from impact function."""
disable test which is not reading definition for the expected result
py
diff --git a/ipyrad/__main__.py b/ipyrad/__main__.py index <HASH>..<HASH> 100644 --- a/ipyrad/__main__.py +++ b/ipyrad/__main__.py @@ -132,6 +132,7 @@ def branch_assembly(args, parsedict): ## Get the current assembly data = getassembly(args, parsedict) + ## get arguments to branch command bargs = args.branch @@ -142,6 +143,13 @@ def branch_assembly(args, parsedict): ## look for subsamples if len(bargs) > 1: + ## Branching and subsampling at step 6 is a bad idea, it messes up + ## indexing into the hdf5 cluster file. Warn against this. + if any([x.stats.state == 6 for x in data.samples.values()]): + pass + ## TODODODODODO + #print("wat") + ## are we removing or keeping listed samples? subsamples = bargs[1:]
A note to add a feature for the future.
py
diff --git a/arch/zx48k/translator.py b/arch/zx48k/translator.py index <HASH>..<HASH> 100644 --- a/arch/zx48k/translator.py +++ b/arch/zx48k/translator.py @@ -632,18 +632,18 @@ class Translator(TranslatorVisitor): continue_loop = backend.tmp_label() if node.token == 'UNTIL_DO': - self.emit('jump', continue_loop) + self.ic_jump(continue_loop) - self.emit('label', loop_label) + self.ic_label(loop_label) self.LOOPS.append(('DO', end_loop, continue_loop)) # Saves which labels to jump upon EXIT or CONTINUE if len(node.children) > 1: yield node.children[1] - self.emit('label', continue_loop) + self.ic_label(continue_loop) yield node.children[0] # Condition - self.emit('jzero' + self.TSUFFIX(node.children[0].type_), node.children[0].t, loop_label) - self.emit('label', end_loop) + self.ic_jzero(node.children[0].type_, node.children[0].t, loop_label) + self.ic_label(end_loop) self.LOOPS.pop() # del loop_label, end_loop, continue_loop
Refactorice UNTIL DO visit
py
diff --git a/holoviews/plotting/mpl/plot.py b/holoviews/plotting/mpl/plot.py index <HASH>..<HASH> 100644 --- a/holoviews/plotting/mpl/plot.py +++ b/holoviews/plotting/mpl/plot.py @@ -107,13 +107,10 @@ class MPLPlot(DimensionedPlot): self.handles['fig'] = fig self.handles['axis'] = axis - if not self.final_hooks and self.finalize_hooks: - self.warning('Using deprecated finalize_hooks options, ' - 'use final_hooks instead') - self.final_hooks = self.finalize_hooks - elif self.final_hooks and self.finalize_hooks: - raise ValueError('Set either final_hooks or deprecated ' - 'finalize_hooks, not both.') + if self.final_hooks and self.finalize_hooks: + self.warning('Set either final_hooks or deprecated ' + 'finalize_hooks, not both.') + self.finalize_hooks = self.final_hooks def _init_axis(self, fig, axis):
Removed deprecation warning for finalize_hooks
py
diff --git a/grip/readers.py b/grip/readers.py index <HASH>..<HASH> 100644 --- a/grip/readers.py +++ b/grip/readers.py @@ -160,7 +160,7 @@ class DirectoryReader(ReadmeReader): Gets whether the specified subpath is a supported binary file. """ mimetype = self.mimetype_for(subpath) - return mimetype and mimetype.startswith('image/') + return mimetype is not None and mimetype.startswith('image/') def read(self, subpath=None): """
Return False, not None from DirectoryReader.is_binary.
py
diff --git a/salt/utils/jinja.py b/salt/utils/jinja.py index <HASH>..<HASH> 100644 --- a/salt/utils/jinja.py +++ b/salt/utils/jinja.py @@ -176,7 +176,7 @@ class SerializerExtension(Extension, object): try: return yaml.load(value) except AttributeError: - raise TemplateRuntimeError("Unable to load yaml from {}".format(value)) + raise TemplateRuntimeError("Unable to load yaml from {0}".format(value)) def load_json(self, value): if isinstance(value, TemplateModule): @@ -184,7 +184,7 @@ class SerializerExtension(Extension, object): try: return json.loads(value) except (ValueError, TypeError): - raise TemplateRuntimeError("Unable to load json from {}".format(value)) + raise TemplateRuntimeError("Unable to load json from {0}".format(value)) def parse(self, parser): if parser.stream.current.value == "import_yaml":
make it works for python <I>
py
diff --git a/gooey/python_bindings/gooey_parser.py b/gooey/python_bindings/gooey_parser.py index <HASH>..<HASH> 100644 --- a/gooey/python_bindings/gooey_parser.py +++ b/gooey/python_bindings/gooey_parser.py @@ -68,6 +68,11 @@ class GooeyParser(object): self.__dict__['parser'] = ArgumentParser(**kwargs) self.widgets = {} self.options = {} + if 'parents' in kwargs: + for parent in kwargs['parents']: + if isinstance(parent, self.__class__): + self.widgets.update(parent.widgets) + self.options.update(parent.options) @property def _mutually_exclusive_groups(self):
Copy widgets and their configuration from parent parsers. Resolves #<I>
py
diff --git a/tests/run_tests.py b/tests/run_tests.py index <HASH>..<HASH> 100644 --- a/tests/run_tests.py +++ b/tests/run_tests.py @@ -1,7 +1,11 @@ # run_tests.py +import os import unittest as unittest - -if __name__ == "__main__": - all_tests = unittest.TestLoader().discover('.', pattern='test*.py') - unittest.TextTestRunner().run(all_tests) \ No newline at end of file + +# run all tests in tests folder +all_tests = unittest.TestLoader().discover('.', pattern='test*.py') +unittest.TextTestRunner().run(all_tests) + + + \ No newline at end of file
run_tests is simple script instead of __main__ call for coverage issue
py
diff --git a/vyked/bus.py b/vyked/bus.py index <HASH>..<HASH> 100644 --- a/vyked/bus.py +++ b/vyked/bus.py @@ -56,6 +56,7 @@ class Bus: self._host_id = unique_hex() self._pubsub_handler = None self._ronin = False + self._registered = False @property def ronin(self): @@ -229,13 +230,15 @@ class Bus: asyncio.async(func(**json.loads(payload))) def registration_complete(self): - f = self._create_service_clients() + if not self._registered: + f = self._create_service_clients() + self._registered = True - def fun(fut): - if self._tcp_host: - self._clear_request_queue() + def fun(fut): + if self._tcp_host: + self._clear_request_queue() - f.add_done_callback(fun) + f.add_done_callback(fun) def _create_tcp_service_host(self): if self._tcp_host:
process registeration only once if dual hosted
py
diff --git a/ella/polls/models.py b/ella/polls/models.py index <HASH>..<HASH> 100644 --- a/ella/polls/models.py +++ b/ella/polls/models.py @@ -427,6 +427,9 @@ class Result(models.Model): """ return self.count*100/self.total() + def get_text(self): + return mark_safe( u'%s' % self.text ) + def __unicode__(self): if self.title: return self.title
polls.Result.text mark_safed in get_text method
py
diff --git a/tests/pytests/functional/modules/state/test_state.py b/tests/pytests/functional/modules/state/test_state.py index <HASH>..<HASH> 100644 --- a/tests/pytests/functional/modules/state/test_state.py +++ b/tests/pytests/functional/modules/state/test_state.py @@ -670,6 +670,7 @@ def test_retry_option(state, state_tree): assert entry >= 3 [email protected](max_runs=4) def test_retry_option_success(state, state_tree, tmp_path): """ test a state with the retry option that should return True immedietly (i.e. no retries)
Mark test_state_retry as flaky
py
diff --git a/pypeerassets/__main__.py b/pypeerassets/__main__.py index <HASH>..<HASH> 100644 --- a/pypeerassets/__main__.py +++ b/pypeerassets/__main__.py @@ -17,7 +17,6 @@ def find_all_valid_decks(provider, prod=True) -> list: :test True/False - test or production P2TH ''' - decks = [] deck_spawns = (provider.getrawtransaction(i, 1) for i in find_deck_spawns(provider)) def deck_parser(raw_tx): @@ -35,7 +34,7 @@ def find_all_valid_decks(provider, prod=True) -> list: d["issuer"] = find_tx_sender(provider, raw_tx) d["network"] = provider.network d["production"] = prod - decks.append(Deck(**d)) + return Deck(**d) except AssertionError: pass @@ -43,9 +42,7 @@ def find_all_valid_decks(provider, prod=True) -> list: with concurrent.futures.ThreadPoolExecutor(max_workers=2) as th: for result in th.map(deck_parser, deck_spawns): if result: - decks.append(result) - - return decks + yield result def find_deck(provider, key: str, prod=True) -> list:
find_all_valid_decks: behave as a generator rather than using internal list of decks.
py
diff --git a/cumulusci/tasks/robotframework/tests/test_robotframework.py b/cumulusci/tasks/robotframework/tests/test_robotframework.py index <HASH>..<HASH> 100644 --- a/cumulusci/tasks/robotframework/tests/test_robotframework.py +++ b/cumulusci/tasks/robotframework/tests/test_robotframework.py @@ -594,7 +594,7 @@ class TestRobotPerformanceKeywords: project_config = BaseProjectConfig(universal_config) with temporary_dir() as d, mock.patch( "cumulusci.robotframework.Salesforce.Salesforce._init_locators" - ): + ), responses.RequestsMock(): project_config.repo_info["root"] = d suite = Path(self.datadir) / "../../../robotframework/" / suite_path task = create_task( @@ -626,7 +626,6 @@ class TestRobotPerformanceKeywords: metrics = first_arg.split("-")[-1].split(",") return dict(self.parse_metric(metric) for metric in metrics) - @responses.activate def test_elapsed_time_xml(self): pattern = "Elapsed Time: "
Move responses.activate into shared function
py
diff --git a/generate_build_id.py b/generate_build_id.py index <HASH>..<HASH> 100644 --- a/generate_build_id.py +++ b/generate_build_id.py @@ -14,4 +14,4 @@ # limitations under the License. ############################################################################### import datetime -print (datetime.datetime.now().strftime('%Y%m%d')) +print (datetime.datetime.now().strftime('%Y%m%d%H%M'))
Add time to build id for consistency with prebuilt binaries Previously supplied prebuilt binaries used a longer build id than generate_build_id.py that included 4 digits for the time. In order for standalone builds to have consistent build ids this commit changes generate_build_id.py to match.
py
diff --git a/barf/core/smt/smtsymbol.py b/barf/core/smt/smtsymbol.py index <HASH>..<HASH> 100644 --- a/barf/core/smt/smtsymbol.py +++ b/barf/core/smt/smtsymbol.py @@ -23,13 +23,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -def cast_bool(value): - return Bool(str(value).lower()) - - def cast_to_bool(value): if type(value) is bool: - value = cast_bool(value) + value = Bool(str(value).lower()) assert type(value) == Bool
Refactor smtsymbol method
py
diff --git a/bliss/core/seq.py b/bliss/core/seq.py index <HASH>..<HASH> 100644 --- a/bliss/core/seq.py +++ b/bliss/core/seq.py @@ -56,7 +56,7 @@ class Seq (object): self.pathname = pathname self.cmddict = cmddict or cmd.getDefaultCmdDict() self.crc32 = None - self.seqid = id + self.seqid = int(id) self.lines = [ ] self.header = { } self.version = version @@ -227,12 +227,12 @@ class Seq (object): if 'seqid' in self.header: self.seqid = self.header['seqid'] - else: + elif self.seqid is None: self.log.error('No sequence id present in header.') if 'version' in self.header: self.version = self.header['version'] - else: + elif self.version is None: self.log.warning('No version present in header. Defaulting to zero (0).') self.version = 0
Issue #<I>: Update seq.py to more gracefully handle errors
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -64,8 +64,8 @@ setup_requires = [ ] install_requires = [ + 'Flask>=0.11.1', 'Flask-BabelEx>=0.9.2', - 'Flask-CLI>=0.2.1', 'Flask-Login>=0.3.2', 'invenio-indexer>=1.0.0a3', 'invenio-jsonschemas>=1.0.0a2',
installation: Flask>=<I> * Removed dependency on Flask-CLI.
py
diff --git a/imgui/integrations/sdl2.py b/imgui/integrations/sdl2.py index <HASH>..<HASH> 100644 --- a/imgui/integrations/sdl2.py +++ b/imgui/integrations/sdl2.py @@ -26,9 +26,17 @@ class SDL2Renderer(ProgrammablePipelineRenderer): SDL_GetWindowSize(self.window, width_ptr, height_ptr) self.io.display_size = width_ptr[0], height_ptr[0] + self.io.get_clipboard_text_fn = self._get_clipboard_text + self.io.set_clipboard_text_fn = self._set_clipboard_text self._map_keys() + def _get_clipboard_text(self): + return SDL_GetClipboardText() + + def _set_clipboard_text(self, text): + SDL_SetClipboardText(text) + def _map_keys(self): key_map = self.io.key_map
Add system clipboard support to sdl2 integration
py
diff --git a/lib/obsbandpass.py b/lib/obsbandpass.py index <HASH>..<HASH> 100644 --- a/lib/obsbandpass.py +++ b/lib/obsbandpass.py @@ -43,6 +43,9 @@ class ObsModeBandpass(CompositeSpectralElement): self.obsmode=ob self.name=self.obsmode._obsmode #str(self.obsmode) + #Check for valid bounds + self._checkbounds() + def __str__(self): """Defer to ObservationMode component """ return self.name #self.obsmode._obsmode @@ -55,6 +58,9 @@ class ObsModeBandpass(CompositeSpectralElement): """Defer to ObservationMode component """ return self.obsmode.showfiles() - - + + def _checkbounds(self): + thru=self.throughput + if thru[0] != 0 or thru[-1] != 0: + print "Warning: throughput for this obsmode is not bounded by zeros. Endpoints: thru[0]=%g, thru[-1]=%g"%(thru[0],thru[-1])
#<I>: Verify that the throughput resulting from this obsmode goes to zero at both ends. If not, print a warning. git-svn-id: <URL>
py
diff --git a/djangui/backend/utils.py b/djangui/backend/utils.py index <HASH>..<HASH> 100644 --- a/djangui/backend/utils.py +++ b/djangui/backend/utils.py @@ -337,10 +337,11 @@ def create_job_fileinfo(job): try: try: preview = group_file.get('preview') + json_preview = json.dumps(preview) except: sys.stderr.write('Error encountered in file preview:\n {}\n'.format(traceback.format_exc())) - preview = None - dj_file = DjanguiFile(job=job, filetype=file_type, filepreview=json.dumps(preview), + json_preview = json.dumps(None) + dj_file = DjanguiFile(job=job, filetype=file_type, filepreview=json_preview, parameter=group_file.get('parameter')) filepath = group_file['file'].path # We make the filename relative to the root, this is for filesystems that can change between
handle json errors for filepreviews
py
diff --git a/tools/oqbugs.py b/tools/oqbugs.py index <HASH>..<HASH> 100644 --- a/tools/oqbugs.py +++ b/tools/oqbugs.py @@ -171,6 +171,7 @@ def arg_parse(): # custom argparse actions args, remaining_argv = parser.parse_known_args() + # after partial check, force -t/--time as required parameter parser._actions[0].required = True if args.time: @@ -185,8 +186,7 @@ def arg_parse(): add_help=True) - # hack - action_group = action_parser.add_mutually_exclusive_group() + action_group = action_parser.add_mutually_exclusive_group(required=True) action_group.add_argument('-c', '--fix-committed', action=fix_committed(launchpad, commits_output), help="Invoked from the CI gets from a git repository every \ @@ -210,7 +210,7 @@ def arg_parse(): nargs=0, required=False) - action_parser.parse_args(remaining_argv) + args = action_parser.parse_args(remaining_argv) return args
at least one mutually exclusive option is required Former-commit-id: <I>a0ab<I>dd<I>cef6e<I>fa3f6ff<I>f<I>b<I>
py
diff --git a/smart_open/smart_open_lib.py b/smart_open/smart_open_lib.py index <HASH>..<HASH> 100644 --- a/smart_open/smart_open_lib.py +++ b/smart_open/smart_open_lib.py @@ -145,6 +145,8 @@ class ParseUri(object): if self.scheme == "hdfs": self.uri_path = parsed_uri.netloc + parsed_uri.path + if self.uri_path[0] != "/": + self.uri_path = "/" + self.uri_path if not self.uri_path: raise RuntimeError("invalid HDFS URI: %s" % uri) @@ -263,7 +265,7 @@ class HdfsOpenRead(object): self.parsed_uri = parsed_uri def __iter__(self): - hdfs = subprocess.Popen(["hdfs", "dfs", "-cat", "/" + self.parsed_uri.uri_path], stdout=subprocess.PIPE) + hdfs = subprocess.Popen(["hdfs", "dfs", "-cat", self.parsed_uri.uri_path], stdout=subprocess.PIPE) return hdfs.stdout def read(self, size=None):
bugfix in hdfs read
py
diff --git a/pysat/tests/test_utils.py b/pysat/tests/test_utils.py index <HASH>..<HASH> 100644 --- a/pysat/tests/test_utils.py +++ b/pysat/tests/test_utils.py @@ -235,6 +235,22 @@ class TestIfyFunctions(object): utils.testing.assert_lists_equal(new_iterable, tst_iterable) return + @pytest.mark.parametrize('iterable', [{'key1': 1, 'key2': 2}.keys(), + {'key1': 1, 'key2': 2}.values()]) + def test_listify_failure_with_dict_iterable(self, iterable): + """Test listify failes with various dict iterables. + + Parameters + ---------- + iterable : dict_keys or dict_values + Iterable dict object + + """ + + new_iterable = utils.listify(iterable) + assert new_iterable[0] == iterable + return + @pytest.mark.parametrize('iterable', [ np.timedelta64(1), np.full((1, 1), np.timedelta64(1)), np.full((2, 2), np.timedelta64(1)),
TST: added listify unit test Added a listify unit test ensuring dict_keys and dict_values behave as expected, and are not correctly handled by listify.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ setup( setup_requires=pytest_runner + wheel + ["setuptools_scm"], tests_require=["pytest>=2.8"], install_requires=[ - "fonttools[ufo]>=4.26.1", + "fonttools[ufo]>=4.28.5", "cu2qu>=1.6.7", "cffsubr>=0.2.8", "booleanOperations>=0.9.0",
setup.py: bump minimum fonttools version to latest <I> includes several bugfixes since <I>
py
diff --git a/cmd2.py b/cmd2.py index <HASH>..<HASH> 100755 --- a/cmd2.py +++ b/cmd2.py @@ -2087,9 +2087,9 @@ class Cmd(cmd.Cmd): # Join all matches into 1 string for ease of searching all_matches_str = ''.join(self.completion_matches) - # If there is a common_prefix and any of the matches have a space, - # then we must add an opening quote to the matches. - if common_prefix and ' ' in all_matches_str: + # If there is a tab completion that will alter the text on the command line and + # any of the matches have a space, then we must add an opening quote to the matches. + if common_prefix != text and ' ' in all_matches_str: # Figure out what kind of quote to add and save it as the unclosed_quote if '"' in all_matches_str:
Correcting when to add an opening quote
py
diff --git a/pypsa/pf.py b/pypsa/pf.py index <HASH>..<HASH> 100644 --- a/pypsa/pf.py +++ b/pypsa/pf.py @@ -532,7 +532,7 @@ def sub_network_pf(sub_network, snapshots=None, skip_pre=False, x_tol=1e-6, use_ #let slack generator take up the slack if distribute_slack: - distributed_slack_power = network.buses_t.p_set.loc[snapshots,sn_buses] - ss[:,buses_indexer(sn_buses)].real + distributed_slack_power = network.buses_t.p.loc[snapshots,sn_buses] - ss[:,buses_indexer(sn_buses)].real for bus, group in sub_network.generators().groupby('bus'): if slack_weights != 'dispatch': bus_generator_shares = network.generators.p_nom.loc[group.index].pipe(normed).fillna(0)
pf: fix typo from p_set to p
py
diff --git a/synapse/cores/common.py b/synapse/cores/common.py index <HASH>..<HASH> 100644 --- a/synapse/cores/common.py +++ b/synapse/cores/common.py @@ -839,10 +839,10 @@ class Cortex(EventBus): tufo = (iden,props) - self.fire('tufo:add', tufo=tufo) - self.fire('tufo:add:%s' % form, tufo=tufo) + self.fire('tufo:add', tufo=tufo) + self.fire('tufo:add:%s' % form, tufo=tufo) - return tufo + return tufo def delTufo(self, tufo): '''
fire tufo:add outside the lock to allow tufo:add handlers to add tufos
py
diff --git a/slickqa/data.py b/slickqa/data.py index <HASH>..<HASH> 100644 --- a/slickqa/data.py +++ b/slickqa/data.py @@ -32,7 +32,8 @@ class Configuration(micromodels.Model): reference = ConfigurationReference() reference.configId = self.id reference.name = self.name - reference.filename = self.filename + if hasattr(self, 'filename'): + reference.filename = self.filename return reference
fixing an issue where a Configuration may not have a filename
py
diff --git a/bcbio/heterogeneity/phylowgs.py b/bcbio/heterogeneity/phylowgs.py index <HASH>..<HASH> 100644 --- a/bcbio/heterogeneity/phylowgs.py +++ b/bcbio/heterogeneity/phylowgs.py @@ -68,7 +68,7 @@ def _gids_to_genes(gids, ssm_locs, cnv_ssms, data): locs[chrom].add(pos) genes = set([]) with tx_tmpdir(data) as tmpdir: - chrom_prefix = "chr" if next(ref.file_contigs(dd.get_ref_file(data))).startswith("chr") else "" + chrom_prefix = "chr" if next(ref.file_contigs(dd.get_ref_file(data))).name.startswith("chr") else "" loc_file = os.path.join(tmpdir, "battenberg_find_genes.bed") with open(loc_file, "w") as out_handle: for chrom in sorted(locs.keys()):
Handle hg<I> when working with dealing with PhyloWGS output, which uses GRCh<I> contigs
py
diff --git a/bolt/about.py b/bolt/about.py index <HASH>..<HASH> 100644 --- a/bolt/about.py +++ b/bolt/about.py @@ -8,4 +8,4 @@ A task runner written in Python copyright = u'2016 Abantos' author = u'Isaac Rodriguez' version = u'0.2' -release = u'0.2.5' +release = u'0.2.6'
Updated version to <I> to create new release
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -22,7 +22,6 @@ setup( packages=find_packages(), include_package_data=True, license='MIT License', - install_requires=["django>=1.7"], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment',
Removed django dependency as it can cause problems (eg.: unwanted django upgrade)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -5,6 +5,7 @@ from setuptools import setup, find_packages import tomodachi.__version__ install_requires = [ + 'pycparser=>2.18', 'aioamqp>=0.10.0, <0.11.0', 'ujson>=1.35', 'uvloop>=0.8.1',
Added pycparser to setup.py
py
diff --git a/taskw/test/test_datas.py b/taskw/test/test_datas.py index <HASH>..<HASH> 100644 --- a/taskw/test/test_datas.py +++ b/taskw/test/test_datas.py @@ -498,6 +498,21 @@ class TestDBShellout(_BaseTestDB): }) eq_(len(tasks), 0) + def test_filtering_logic_conjunction_junction_whats_your_function(self): + task1 = self.tw.task_add("foobar1") + task2 = self.tw.task_add("foobar2") + task2 = self.tw.task_add("foobar3") + tasks = self.tw.filter_tasks({ + 'and': [ + ('description', 'foobar1'), + ], + 'or': [ + ('status', 'pending'), + ('status', 'waiting'), + ] + }) + eq_(len(tasks), 1) + def test_annotation_escaping(self): original = {'description': 're-opening the issue'}
Adding another failed test for filtering of exported tasks.
py
diff --git a/python/jsbeautifier/javascript/tokenizer.py b/python/jsbeautifier/javascript/tokenizer.py index <HASH>..<HASH> 100644 --- a/python/jsbeautifier/javascript/tokenizer.py +++ b/python/jsbeautifier/javascript/tokenizer.py @@ -217,9 +217,9 @@ class Tokenizer(BaseTokenizer): token = self._create_token(TOKEN.END_BLOCK, c) elif c == ';': token = self._create_token(TOKEN.SEMICOLON, c) - elif c == '.' and self._input.peek(1) is not None and bool( - dot_pattern.match(self._input.peek(1))): - token = self._create_token(TOKEN.DOT, c) + elif c == '.' and self._input.peek(1) is not None and \ + bool(dot_pattern.match(self._input.peek(1))): + token = self._create_token(TOKEN.DOT, c) elif c == ',': token = self._create_token(TOKEN.COMMA, c)
Formatting consistent with the rest of the file.
py
diff --git a/pymatgen/io/exciting/tests/test_inputs.py b/pymatgen/io/exciting/tests/test_inputs.py index <HASH>..<HASH> 100644 --- a/pymatgen/io/exciting/tests/test_inputs.py +++ b/pymatgen/io/exciting/tests/test_inputs.py @@ -117,7 +117,7 @@ class ExcitingInputTest(PymatgenTest): 'BSE': {'bsetype': 'singlet', 'nstlbse': '1 5 1 4'}}} test_input = ExcitingInput(struct) - test_string = test_input.write_string('unchanged', paramdict=paradir) + test_string = test_input.write_string('unchanged', **paradir) # read reference file filepath = os.path.join(test_dir, 'input_exciting2.xml')
Update test with new paramdict kwarg format
py
diff --git a/cosmic_ray/testing/test_runner.py b/cosmic_ray/testing/test_runner.py index <HASH>..<HASH> 100644 --- a/cosmic_ray/testing/test_runner.py +++ b/cosmic_ray/testing/test_runner.py @@ -41,10 +41,10 @@ class TestRunner(metaclass=abc.ABCMeta): raise NotImplemented() def __call__(self): - """Call `_run()` and return a `TestResult` with the results. + """Call `_run()` and return a `WorkRecord` with the results. Returns: A `WorkRecord` with the `test_outcome` and `data` fields - filled in. The `outcome` field of the return value is: + filled in. """ try: test_result = self._run()
Cleaned up the docstring for WorkDB.__call__().
py
diff --git a/salesforce/backend/operations.py b/salesforce/backend/operations.py index <HASH>..<HASH> 100644 --- a/salesforce/backend/operations.py +++ b/salesforce/backend/operations.py @@ -12,16 +12,18 @@ import itertools from salesforce import DJANGO_18_PLUS, DJANGO_19_PLUS from salesforce.models import DefaultedOnCreate +import salesforce.backend.driver if DJANGO_18_PLUS: from django.db.backends.base.operations import BaseDatabaseOperations else: from django.db.backends import BaseDatabaseOperations +BULK_BATCH_SIZE = 200 if salesforce.backend.driver.beatbox else 25 + """ Default database operations, with unquoted names. """ - class DatabaseOperations(BaseDatabaseOperations): compiler_module = "salesforce.backend.compiler" @@ -69,8 +71,7 @@ class DatabaseOperations(BaseDatabaseOperations): return django.db.backends.utils.format_number(value, max_digits, decimal_places) def bulk_batch_size(self, fields, objs): - limit = 25 - return limit + return BULK_BATCH_SIZE # This SQL is not important because we control the db from the compiler # but anything must exist
Adjusted bulk_batch_size() corresponding to the selected API
py
diff --git a/gwpy/spectrogram/core.py b/gwpy/spectrogram/core.py index <HASH>..<HASH> 100644 --- a/gwpy/spectrogram/core.py +++ b/gwpy/spectrogram/core.py @@ -472,8 +472,8 @@ class Spectrogram(Array2D): raise TypeError("Spectrogram.filter() got an unexpected keyword " "argument '%s'" % list(kwargs.keys())[0]) f = self.frequencies.value.copy() - if f[0] == 0: - f[0] = 1e-100 + if f[0] == 0: # shift DC to 1% of first frequency + f[0] = f[1] * 0.01 fresp = numpy.nan_to_num(abs(signal.freqs(b, a, f)[1])) if inplace: self *= fresp
Spectrogram.filter: avoid overflows in filtering DC offset of 1e-<I> causes numerical overflows, so just use f[1] / <I>.
py
diff --git a/flux_led/__main__.py b/flux_led/__main__.py index <HASH>..<HASH> 100755 --- a/flux_led/__main__.py +++ b/flux_led/__main__.py @@ -606,7 +606,7 @@ class WifiLedBulb: mode = "RGB" elif mode_code == 0x04: mode = "RGBW" - elif mode_code == 0x05: + elif mode_code == 0x05 or mode_code == 0x17: mode = "RGBWW" elif self.rgbwcapable: mode = "color" @@ -731,6 +731,7 @@ class WifiLedBulb: or rx[1] == 0x81 or rx[1] == 0x44 or rx[1] == 0x06 + or rx[1] == 0x35 ): self.rgbwcapable = True
Add type "<I>" RGBWW support
py
diff --git a/pypeerassets/__init__.py b/pypeerassets/__init__.py index <HASH>..<HASH> 100644 --- a/pypeerassets/__init__.py +++ b/pypeerassets/__init__.py @@ -1 +1,3 @@ -from pypeerassets.kutil import Kutil \ No newline at end of file +from pypeerassets.kutil import Kutil +from pypeerassets.providers.node import RpcNode +from pypeerassets.providers import mock
expose RpcNode and mock providers
py
diff --git a/hydpy/models/evap/evap_model.py b/hydpy/models/evap/evap_model.py index <HASH>..<HASH> 100644 --- a/hydpy/models/evap/evap_model.py +++ b/hydpy/models/evap/evap_model.py @@ -934,13 +934,20 @@ class Calc_NetLongwaveRadiation_V1(modeltools.Method): >>> derived.nmblogentries(1) >>> inputs.airtemperature = 22.1 >>> fluxes.actualvapourpressure = 2.1 - >>> logs.loggedglobalradiation.shape = 1 - >>> logs.loggedclearskysolarradiation.shape = 1 - >>> logs.loggedglobalradiation = 14.5 - >>> logs.loggedclearskysolarradiation = 18.8 + >>> fluxes.clearskysolarradiation = 18.8 + >>> fluxes.globalradiation = 14.5 >>> model.calc_netlongwaveradiation_v1() >>> fluxes.netlongwaveradiation netlongwaveradiation(3.531847) + + >>> fluxes.clearskysolarradiation = 0.0 + >>> logs.loggedclearskysolarradiation.shape = 1 + >>> logs.loggedclearskysolarradiation = 12.0 + >>> logs.loggedglobalradiation.shape = 1 + >>> logs.loggedglobalradiation = 10.0 + >>> model.calc_netlongwaveradiation_v1() + >>> fluxes.netlongwaveradiation + netlongwaveradiation(3.959909) """ DERIVEDPARAMETERS = ( evap_derived.NmbLogEntries,
Add a missing doctest to method `Calc_NetLongwaveRadiation_V1` of module `evap_model`.
py
diff --git a/analyze_2d/sampler_helpers.py b/analyze_2d/sampler_helpers.py index <HASH>..<HASH> 100644 --- a/analyze_2d/sampler_helpers.py +++ b/analyze_2d/sampler_helpers.py @@ -47,7 +47,7 @@ def generate_view_cluster_hyper_posterior(p_State, view_idx): def generate_column_sample(random_state, p_State, view_idx, col_idx, cluster_idx): r, nu, s, mu = get_updated_continuous_hypers(p_State, view_idx, col_idx, cluster_idx) standard_t_draw = random_state.standard_t(nu) - student_t_draw = standard_t_draw * (s * (r + 1)) / (nu * r) + mu + student_t_draw = standard_t_draw * numpy.sqrt((s * (r + 1)) / (nu / 2. * r)) + mu return student_t_draw def generate_cluster_draws(random_state, p_State, column_view_idx_lookup):
fix bug in student_t draw generator, may still have issues with nu / 2. in denominator
py
diff --git a/src/toil/statsAndLogging.py b/src/toil/statsAndLogging.py index <HASH>..<HASH> 100644 --- a/src/toil/statsAndLogging.py +++ b/src/toil/statsAndLogging.py @@ -112,9 +112,10 @@ class StatsAndLogging( object ): pass else: def logWithFormatting(jobStoreID, jobLogs): - logFormat = '\n%s ' % jobStoreID logger.debug('Received Toil worker log. Disable debug level ' - 'logging to hide this output\n%s', logFormat.join(jobLogs)) + 'logging to hide this output') + for line in jobLogs: + logger.debug('%s %s', jobStoreID, line) # we may have multiple jobs per worker jobNames = logs.names messages = logs.messages
Handle worker logs one line at a time Fixes #<I>. As a side effect, the "---TOIL WORKER OUTPUT LOG---" line is now prefixed by the job name. Otherwise, the output is (hopefully) identical.
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -132,7 +132,7 @@ html_theme_options = { 'travis_button': True, 'analytics_id': os.getenv('GOOGLE_ANALYTICS_ID', "no-google-analytics"), 'extra_nav_links': { - "Home Page": 'https://www.charlesbot.org', + "Home Page": 'https://charlesbot.org', "GitHub": 'https://github.com/marvinpinto/charlesbot', "Issues": 'https://github.com/marvinpinto/charlesbot/issues', }
Update the charlesbot website URL It is now being served off the apex domain, with appropriate old redirects in place.
py
diff --git a/wafer/pages/serializers.py b/wafer/pages/serializers.py index <HASH>..<HASH> 100644 --- a/wafer/pages/serializers.py +++ b/wafer/pages/serializers.py @@ -1,3 +1,5 @@ +from django.contrib.auth import get_user_model + from rest_framework import serializers from reversion import revisions @@ -6,6 +8,10 @@ from wafer.pages.models import Page class PageSerializer(serializers.ModelSerializer): + people = serializers.PrimaryKeyRelatedField( + many=True, allow_null=True, + queryset=get_user_model().objects.all()) + class Meta: model = Page exclude = ('_content_rendered',) @@ -20,5 +26,8 @@ class PageSerializer(serializers.ModelSerializer): revisions.set_comment("Changed via REST api") page.parent = validated_data['parent'] page.content = validated_data['content'] + page.include_in_menu = validated_data['include_in_menu'] + page.exclude_from_static = validated_data['exclude_from_static'] + page.people = validated_data.get('people') page.save() return page
Add people and other fields to page update options
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ setup( license='GPL v3', classifiers=[ 'Development Status :: 4 - Beta', - 'Environment :: Console' + 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering',
Forgot a comma in classifiers list
py
diff --git a/scrapelib/__init__.py b/scrapelib/__init__.py index <HASH>..<HASH> 100644 --- a/scrapelib/__init__.py +++ b/scrapelib/__init__.py @@ -269,12 +269,9 @@ class Scraper(object): self._request_frequency = 0.0 self._last_request = 0 - def accept_response(self, response): - return response.status_code < 400 - - def _accept_response_allow_404s(self, response): - # accepts anything below 400 and also 404s - return response.status_code < 400 or response.status_code == 404 + def accept_response(self, response, **kwargs): + return response.status_code < 400 or ( + response.status_code == 404 and not self._retry_on_404) def urlopen(self, url, method='GET', body=None, retry_on_404=False): """ @@ -331,12 +328,8 @@ class Scraper(object): result = ResultStr(self, resp, url) # break from loop on an accepted response - # (or 404 if retry_on_404 is off) - if not retry_on_404: - accept_resp = self._accept_response_allow_404s - else: - accept_resp = self.accept_response - if accept_resp(resp): + self._retry_on_404 = retry_on_404 + if self.accept_response(resp): break except (requests.HTTPError, requests.ConnectionError,
retry_on_<I> and custom accept_response don't play nicely
py
diff --git a/src/feat/common/resolver.py b/src/feat/common/resolver.py index <HASH>..<HASH> 100644 --- a/src/feat/common/resolver.py +++ b/src/feat/common/resolver.py @@ -1,7 +1,8 @@ import socket from twisted.names import dns, client, resolve, cache, hosts as hostsModule -from twisted.internet import error +from twisted.names.error import DNSNameError +from twisted.internet import error, defer def installResolver(reactor=None, @@ -24,6 +25,15 @@ class ResolverChain(resolve.ResolverChain): raise error.DNSLookupError(name) return result + def _lookup(self, name, cls, type, timeout): + d = resolve.ResolverChain._lookup(self, name, cls, type, timeout) + d.addErrback(self._formatError, name) + return d + + def _formatError(self, fail, name): + fail.trap(DNSNameError) + return defer.fail(error.DNSLookupError(name)) + class Resolver(client.Resolver):
Catch and repack DNSNameError. The error returned by the resolver from twisted.names raises an expection specific to the implementation. It leaks out the interface.
py
diff --git a/seed_identity_store/settings.py b/seed_identity_store/settings.py index <HASH>..<HASH> 100644 --- a/seed_identity_store/settings.py +++ b/seed_identity_store/settings.py @@ -220,7 +220,7 @@ CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json'] CELERY_IGNORE_RESULT = True -CELERYD_MAX_TASKS_PER_CHILD = 1000 +CELERYD_MAX_TASKS_PER_CHILD = 50 djcelery.setup_loader()
Reduced default to <I>
py
diff --git a/demosys/effects/managers.py b/demosys/effects/managers.py index <HASH>..<HASH> 100644 --- a/demosys/effects/managers.py +++ b/demosys/effects/managers.py @@ -29,7 +29,6 @@ class SingleEffectManager(BaseEffectManger): """Init after context creations""" effect_list = [cfg.cls() for name, cfg in effects.effects.items()] for effect in effect_list: - effect.init() if effect.name == self.effect_module: self.active_effect = effect
Don't call init() on new effect instances
py
diff --git a/gwpy/plotter/timeseries.py b/gwpy/plotter/timeseries.py index <HASH>..<HASH> 100644 --- a/gwpy/plotter/timeseries.py +++ b/gwpy/plotter/timeseries.py @@ -27,11 +27,7 @@ from matplotlib.projections import register_projection from matplotlib.artist import allow_rasterization from matplotlib.cbook import iterable -try: - from mpl_toolkits.axes_grid1 import make_axes_locatable -except ImportError: - from mpl_toolkits.axes_grid import make_axes_locatable - +from mpl_toolkits.axes_grid1 import make_axes_locatable from ..time import (Time, LIGOTimeGPS) from ..segments import SegmentList
plotter.timeseries: simplified import axes_grid1 is everywhere
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -72,7 +72,10 @@ setup( "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries :: Python Modules", ], + python_requires="~=3.6", test_suite="nose.collector", )
[setup] restrict python version on setup itself > <I> < 4 (#<I>)
py
diff --git a/pyrogram/__init__.py b/pyrogram/__init__.py index <HASH>..<HASH> 100644 --- a/pyrogram/__init__.py +++ b/pyrogram/__init__.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. -__version__ = "1.3.1" +__version__ = "1.3.2" __license__ = "GNU Lesser General Public License v3 or later (LGPLv3+)" __copyright__ = "Copyright (C) 2017-present Dan <https://github.com/delivrance>"
Update Pyrogram to <I>
py
diff --git a/grimoire/utils.py b/grimoire/utils.py index <HASH>..<HASH> 100755 --- a/grimoire/utils.py +++ b/grimoire/utils.py @@ -102,12 +102,9 @@ def get_params(connectors): # And now a specific param to do the update until process termination parser.add_argument("--loop", action='store_true', help="loop the ocean update until process termination") - + parser.add_argument("--redis", default="redis", + help="url for the redis server") args = parser.parse_args() return args - - - -
Added redis param to specify where the redis server is running.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ setuptools.setup( "oathtool", "pandas>=1.0", "requests", - "selenium<4.0.0", + "selenium<5.0.0", "selenium-requests>=1.3.3", "xmltodict", "keyring",
Update selenium dependency version (#<I>) The <<I> restriction was to satisfy the version requirements of selenium_requests. selenium_requests now supports selenium <I> as of the <I> release.
py