diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/openquake/server/tests/tests.py b/openquake/server/tests/tests.py index <HASH>..<HASH> 100644 --- a/openquake/server/tests/tests.py +++ b/openquake/server/tests/tests.py @@ -110,6 +110,11 @@ class EngineServerTestCase(unittest.TestCase): def tearDownClass(cls): cls.wait() + def setUp(self): + if sys.version_info[0] == 2: + # python 2 will die + raise unittest.SkipTest('Python 2') + # tests def test_404(self):
Skipped again Python 2 tests [skip hazardlib]
py
diff --git a/pymatgen/core/structure.py b/pymatgen/core/structure.py index <HASH>..<HASH> 100644 --- a/pymatgen/core/structure.py +++ b/pymatgen/core/structure.py @@ -1290,6 +1290,9 @@ class IStructure(SiteCollection, MSONable): filtered_labels.append(labels[ind]) return filtered_labels + # radius too small + if r < 1e-10: + return [[]] latt = self.lattice if sites is None: sites = self.sites @@ -1345,8 +1348,9 @@ class IStructure(SiteCollection, MSONable): # find all neighboring cubes for each atom in the lattice cell site_neighbors = find_neighbors(site_cube_index, nx, ny, nz) neighbors = [] + # if no neighbors were found, return list of empty list if np.all([len(i) == 0 for i in site_neighbors]): - return [] + return [[]] for sp, i, j in zip(self.species_and_occu, site_coords, site_neighbors): l1 = np.array(three_to_one(j, ny, nz), dtype=int).ravel() # use the cube index map to find the all the neighboring
return [[]] instead of [] when no neighbors
py
diff --git a/LiSE/core.py b/LiSE/core.py index <HASH>..<HASH> 100644 --- a/LiSE/core.py +++ b/LiSE/core.py @@ -950,6 +950,7 @@ class Engine(object): "sense TEXT NOT NULL, " "branch TEXT NOT NULL DEFAULT 'master', " "tick INTEGER NOT NULL DEFAULT 0, " + "function TEXT NOT NULL, " "active BOOLEAN NOT NULL DEFAULT 1, " "PRIMARY KEY(character, sense, branch, tick)," "FOREIGN KEY(character) REFERENCES graphs(graph))"
schema change to support changing sense function on particular character
py
diff --git a/salt/states/user.py b/salt/states/user.py index <HASH>..<HASH> 100644 --- a/salt/states/user.py +++ b/salt/states/user.py @@ -34,7 +34,7 @@ import salt.utils log = logging.getLogger(__name__) -def _group_changes(cur, wanted, remove=True): +def _group_changes(cur, wanted, remove=False): ''' Determine if the groups need to be changed '''
Change default behavior of _change_groups. As discussed in #<I>
py
diff --git a/ryu/lib/mac.py b/ryu/lib/mac.py index <HASH>..<HASH> 100644 --- a/ryu/lib/mac.py +++ b/ryu/lib/mac.py @@ -16,8 +16,6 @@ from ryu.lib import addrconv -import itertools - # string representation HADDR_PATTERN = r'([0-9a-f]{2}:){5}[0-9a-f]{2}' @@ -55,4 +53,4 @@ def haddr_to_bin(string): def haddr_bitand(addr, mask): return ''.join(chr(ord(a) & ord(m)) for (a, m) - in itertools.izip(addr, mask)) + in zip(addr, mask))
'itertools.izip()' has been deprecated in Python 3
py
diff --git a/spatialist/tests/test_spatial.py b/spatialist/tests/test_spatial.py index <HASH>..<HASH> 100644 --- a/spatialist/tests/test_spatial.py +++ b/spatialist/tests/test_spatial.py @@ -121,7 +121,7 @@ def test_Raster(tmpdir, testdata): ras.load() mat = ras.matrix() assert isinstance(mat, np.ndarray) - ras.assign(mat, index=0) + ras.assign(mat, band=0) # ras.reduce() ras.rescale(lambda x: 10 * x) @@ -149,7 +149,7 @@ def test_Raster_extract(testdata): mat = ras.matrix() mat[0:10, 0:10] = ras.nodata mat[207:217, 258:268] = ras.nodata - ras.assign(mat, index=0) + ras.assign(mat, band=0) assert ras.extract(px=ras.geo['xmin'], py=ras.geo['ymax'], radius=5) == ras.nodata assert ras.extract(px=ras.geo['xmax'], py=ras.geo['ymin'], radius=5) == ras.nodata
[test_spatial] adjusted tests to new param name of Raster.assign
py
diff --git a/examples/consumer.py b/examples/consumer.py index <HASH>..<HASH> 100644 --- a/examples/consumer.py +++ b/examples/consumer.py @@ -293,7 +293,7 @@ class OpenIDRequestHandler(BaseHTTPRequestHandler): odd = ' class="odd"' for k, v in sreg_list: - field_name = sreg.sreg_data_fields.get(k, k) + field_name = sreg.data_fields.get(k, k) value = cgi.escape(v) self.wfile.write( '<tr%s><td>%s</td><td>%s</td></tr>' % (odd, field_name, value))
[project @ Fix sreg display (to match sreg api change)]
py
diff --git a/utils.py b/utils.py index <HASH>..<HASH> 100644 --- a/utils.py +++ b/utils.py @@ -5,13 +5,20 @@ from __future__ import generators import operator, math, random, copy, sys, os.path, bisect, re +assert (2,5) <= sys.version_info < (3,), """\ +This code is meant for Python 2.5 through 2.7. +You might find that the parts you care about still work in older +Pythons or happen to work in newer ones, but you're on your own -- +edit utils.py if you want to try it.""" + #______________________________________________________________________________ # Compatibility with Python 2.2, 2.3, and 2.4 -# The AIMA code is designed to run in Python 2.2 and up (at some point, -# support for 2.2 may go away; 2.2 was released in 2001, and so is over -# 6 years old). The first part of this file brings you up to 2.5 -# compatibility if you are running in Python 2.2 through 2.4: +# The AIMA code was originally designed to run in Python 2.2 and up. +# The first part of this file implements for Python 2.2 through 2.4 +# the parts of 2.5 that the original code relied on. Now we're +# starting to go beyond what can be filled in this way, but here's +# the compatibility code still since it doesn't hurt: try: bool, True, False ## Introduced in 2.3 except NameError:
Added Python <I>-7 version check.
py
diff --git a/wallace/command_line.py b/wallace/command_line.py index <HASH>..<HASH> 100755 --- a/wallace/command_line.py +++ b/wallace/command_line.py @@ -423,4 +423,13 @@ def verify(): print "✗ experiment.py is MISSING" is_passing = False + # Make sure there's a README + is_txt_readme = os.path.exists("README.md") + is_md_readme = os.path.exists("README.txt") + if (not is_md_readme) and (not is_txt_readme): + is_passing = False + print "✗ README.txt or README.md is MISSING." + else: + print "✓ README is OK" + return is_passing
Verify presence of a README in Wallace apps
py
diff --git a/lib/access_control_engine.py b/lib/access_control_engine.py index <HASH>..<HASH> 100644 --- a/lib/access_control_engine.py +++ b/lib/access_control_engine.py @@ -94,8 +94,8 @@ def acc_authorize_action(id_user, name_action, verbose=0, **arguments): if not res2: raise Exception if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1 and res2[0][1] not in [1, "1"]: - if res[0][1]: - return (9, "%s %s" % (cfg_webaccess_warning_msgs[9] % res[0][1], (called_from and "%s %s" % (cfg_webaccess_msgs[0] % name_action[3:], cfg_webaccess_msgs[1]) or ""))) + if res2[0][0]: + return (9, "%s %s" % (cfg_webaccess_warning_msgs[9] % res2[0][0], (called_from and "%s %s" % (cfg_webaccess_msgs[0] % name_action[3:], cfg_webaccess_msgs[1]) or ""))) else: raise Exception query2 = """SELECT ur.id_accROLE FROM user_accROLE ur WHERE ur.id_user=%s ORDER BY ur.id_accROLE """ % id_user
Fixed error reporting in case of not-yet-activated accounts, caused by commit <I>. BTW, the whole code should be prettified and tested.
py
diff --git a/mutagen/wave.py b/mutagen/wave.py index <HASH>..<HASH> 100644 --- a/mutagen/wave.py +++ b/mutagen/wave.py @@ -168,8 +168,8 @@ class WAVE(FileType): def score(filename, fileobj, header): filename = filename.lower() - return (header.startswith(b"RIFF") * 2 + endswith(filename, b".wav") + - endswith(filename, b".wave")) + return (header.startswith(b"RIFF") + (header[8:12] == b'WAVE') + + endswith(filename, b".wav") + endswith(filename, b".wave")) def add_tags(self): """Add an empty ID3 tag to the file."""
wave: consider RIFF type WAVE for file type score
py
diff --git a/xclim/indices/_conversion.py b/xclim/indices/_conversion.py index <HASH>..<HASH> 100644 --- a/xclim/indices/_conversion.py +++ b/xclim/indices/_conversion.py @@ -1191,9 +1191,9 @@ def universal_thermal_climate_index( tr: xr.DataArray = None, ) -> xr.DataArray: """ - Mean Universal Thermal Climate Index (UTCI) + Universal Thermal Climate Index (UTCI) - The mean of daily UTCI + The daily UTCI Parameters ----------
Update xclim/indices/_conversion.py
py
diff --git a/django_object_view_tracking/models.py b/django_object_view_tracking/models.py index <HASH>..<HASH> 100644 --- a/django_object_view_tracking/models.py +++ b/django_object_view_tracking/models.py @@ -38,9 +38,8 @@ class ObjectTracker(object): # The last date that we say "everything before this has been seen" last_date = self.session[self.key_name].get('_date') ct = ContentType.objects.get_for_model(model_class).id - if ct not in session: - if not last_date or not date_value: - return False + if ct not in session or not last_date or not date_value: + return False else: last_date = session[ct].get(pk, last_date) return last_date > date_value
reworked viewed logic again
py
diff --git a/test_twarc2.py b/test_twarc2.py index <HASH>..<HASH> 100644 --- a/test_twarc2.py +++ b/test_twarc2.py @@ -8,16 +8,13 @@ import threading dotenv.load_dotenv() logging.basicConfig(filename="test.log", level=logging.INFO) -T = None BEARER_TOKEN = os.environ.get("BEARER_TOKEN") +T = twarc.Twarc2(bearer_token=BEARER_TOKEN) + def test_bearer_token(): assert BEARER_TOKEN -def test_constructor(): - global T - T = twarc.Twarc2(bearer_token=BEARER_TOKEN) - def test_sample(): count = 0
Move client constructor to the top level This allows testing of individual functions using the pytest name matching functionality, and better decouples the tests. For example this will run any test containing the string 'recent_search': pytest -k recent_search
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ setup( }, license='MIT', package_dir={'axes': 'axes'}, - python_requires='~=3.5', + python_requires='~=3.6', install_requires=[ 'django', 'django-appconf>=1.0.3',
Use Python <I> version marker in setup.py
py
diff --git a/great_expectations/expectations/metrics/map_metric_provider.py b/great_expectations/expectations/metrics/map_metric_provider.py index <HASH>..<HASH> 100644 --- a/great_expectations/expectations/metrics/map_metric_provider.py +++ b/great_expectations/expectations/metrics/map_metric_provider.py @@ -2052,7 +2052,7 @@ def _sqlalchemy_column_map_condition_values( query = query.limit(result_format["partial_unexpected_count"]) elif ( result_format["result_format"] == "COMPLETE" - and "bigquery" in execution_engine.engine.dialect.name + and execution_engine.engine.dialect.name.lower() == "bigquery" ): logger.warning( "BigQuery imposes a limit of 10000 parameters on individual queries; "
Use engine.dialect.name instead of dialect.name (#<I>)
py
diff --git a/multiqc/plots/bargraph.py b/multiqc/plots/bargraph.py index <HASH>..<HASH> 100644 --- a/multiqc/plots/bargraph.py +++ b/multiqc/plots/bargraph.py @@ -79,9 +79,11 @@ def plot (data, cats=None, pconfig={}): sample_dcount = dict() for c in cats[idx].keys(): thisdata = list() + catcount = 0 for s in hc_samples: try: thisdata.append(d[s][c]) + catcount += 1 try: sample_dcount[s] += 1 except KeyError: @@ -89,7 +91,7 @@ def plot (data, cats=None, pconfig={}): except KeyError: # Pad with NaNs when we have missing categories in a sample thisdata.append(float('nan')) - if len(thisdata) > 0 and max(x for x in thisdata if not math.isnan(x)) > 0: + if catcount > 0 and max(x for x in thisdata if not math.isnan(x)) > 0: thisdict = { 'name': cats[idx][c]['name'], 'data': thisdata } if 'color' in cats[idx][c]: thisdict['color'] = cats[idx][c]['color']
Handle cases where all cats are missing
py
diff --git a/docs/apigen.py b/docs/apigen.py index <HASH>..<HASH> 100644 --- a/docs/apigen.py +++ b/docs/apigen.py @@ -21,12 +21,17 @@ import shutil import sys from fnmatch import fnmatch from os import path +from typing import Dict from sphinx import __display_version__ +from sphinx.application import Sphinx from sphinx.cmd.quickstart import EXTENSIONS from sphinx.util import logging from sphinx.util.osutil import FileAvoidWrite, walk +if sys.version_info[0] >= 3: + unicode = str + if False: # For type annotation from typing import Any, List, Tuple # NOQA
apigen.py: Import types used in type hints that pyflakes>=<I> now checks
py
diff --git a/insights/client/utilities.py b/insights/client/utilities.py index <HASH>..<HASH> 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -95,6 +95,8 @@ def write_to_disk(filename, delete=False, content=get_time()): """ Write filename out to disk """ + if not os.path.exists(os.path.dirname(filename)): + return if delete: if os.path.lexists(filename): os.remove(filename)
check directory before writing a file (#<I>)
py
diff --git a/bloop/stream/coordinator.py b/bloop/stream/coordinator.py index <HASH>..<HASH> 100644 --- a/bloop/stream/coordinator.py +++ b/bloop/stream/coordinator.py @@ -1,5 +1,5 @@ import arrow -from typing import Dict, List, Optional, Any, Mapping +from typing import Dict, List, Optional, Any, Mapping # noqa from .buffer import RecordBuffer from .shard import Shard
typing.List only used in comment annotation, flake8 thinks it's unused
py
diff --git a/python/dllib/src/test/dev/run-tests.py b/python/dllib/src/test/dev/run-tests.py index <HASH>..<HASH> 100755 --- a/python/dllib/src/test/dev/run-tests.py +++ b/python/dllib/src/test/dev/run-tests.py @@ -136,7 +136,7 @@ def run_individual_python_test(test_name, python_exec): def get_default_python_executables(): # TODO: add more version. only support python 2.7 for now - python_execs = ["python2.7"] + python_execs = ["python2.7", "python3.5"] return python_execs
Enable python<I> testing (#<I>)
py
diff --git a/aliyun/log/util.py b/aliyun/log/util.py index <HASH>..<HASH> 100755 --- a/aliyun/log/util.py +++ b/aliyun/log/util.py @@ -203,21 +203,21 @@ class UTC(tzinfo): utc = UTC() -def parse_timestamp(tm, fmts=("%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M:%S %Z")): +def parse_timestamp(tm): if isinstance(tm, (int, float)) or \ (isinstance(tm, (six.text_type, six.binary_type)) and tm.isdigit()): return int(tm) - dt = None - - for fmt in fmts: - try: - dt = datetime.strptime(tm, fmt) - except ValueError as ex: - pass - - if dt is None: - dt = parser.parse(tm) + # dt = None + # + # for fmt in fmts: + # try: + # dt = datetime.strptime(tm, fmt) + # except ValueError as ex: + # pass + # + # if dt is None: + dt = parser.parse(tm) if six.PY2: if dt.tzinfo is None:
use parser by default to keep py2/py3 consistent.
py
diff --git a/src/edeposit/amqp/harvester/structures.py b/src/edeposit/amqp/harvester/structures.py index <HASH>..<HASH> 100755 --- a/src/edeposit/amqp/harvester/structures.py +++ b/src/edeposit/amqp/harvester/structures.py @@ -108,6 +108,9 @@ class Publication(object): return isbn + if self.optionals and self.optionals.EAN: + return self.optionals.EAN + return self.title + ",".join(map(lambda x: x.name, self.authors)) def __setattr__(self, key, val):
#5: Added EAN to _get_hash()
py
diff --git a/espefuse.py b/espefuse.py index <HASH>..<HASH> 100755 --- a/espefuse.py +++ b/espefuse.py @@ -22,6 +22,7 @@ import argparse import sys import os import struct +import time # Table of efuse values - (category, block, word in block, mask, write disable bit, read disable bit, register name, type, description) # Match values in efuse_reg.h & Efuse technical reference chapter @@ -73,6 +74,7 @@ EFUSE_CMD_READ = 0x1 # address of first word of write registers for each efuse EFUSE_REG_WRITE = [0x3FF5A01C, 0x3FF5A098, 0x3FF5A0B8, 0x3FF5A0D8] +EFUSE_BURN_TIMEOUT = 0.250 # seconds def confirm(action, args): print("%s%sThis is an irreversible operation." % (action, "" if action.endswith("\n") else ". ")) @@ -103,7 +105,8 @@ def efuse_perform_write(esp): esp.write_reg(EFUSE_REG_CMD, EFUSE_CMD_WRITE) def wait_idle(): - for _ in range(10): + deadline = time.time() + EFUSE_BURN_TIMEOUT + while time.time() < deadline: if esp.read_reg(EFUSE_REG_CMD) == 0: return raise esptool.FatalError("Timed out waiting for Efuse controller command to complete")
espefuse: Calculate efuse timeout in time units rather than number of retries
py
diff --git a/src/service.py b/src/service.py index <HASH>..<HASH> 100644 --- a/src/service.py +++ b/src/service.py @@ -375,7 +375,7 @@ class Service(SchedulingItem): #Raise a log entry with a Notification alert like #SERVICE NOTIFICATION: superadmin;server;Load;OK;notify-by-rss;no output def raise_notification_log_entry(self, contact, command): - if self.__class_.log_notifications: + if self.__class__.log_notifications: Log().log("SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s" % (contact.get_name(), self.host.get_name(), self.get_name(), self.state, \ command.get_name(), self.output))
* typo found. added a missing _ to __class__
py
diff --git a/examples/plotting/file/box_annotation.py b/examples/plotting/file/box_annotation.py index <HASH>..<HASH> 100644 --- a/examples/plotting/file/box_annotation.py +++ b/examples/plotting/file/box_annotation.py @@ -5,7 +5,7 @@ from bokeh.sampledata.glucose import data TOOLS = "pan,wheel_zoom,box_zoom,reset,save" # reduce data size -data = data.ix['2010-10-06':'2010-10-13'] +data = data.loc['2010-10-06':'2010-10-13'] p = figure(x_axis_type="datetime", tools=TOOLS, title="Glocose Range") p.xgrid.grid_line_color=None
REF: Use .loc instead of .ix in box_annotation example (#<I>)
py
diff --git a/pylint/checkers/utils.py b/pylint/checkers/utils.py index <HASH>..<HASH> 100644 --- a/pylint/checkers/utils.py +++ b/pylint/checkers/utils.py @@ -15,6 +15,7 @@ import six from six.moves import map, builtins # pylint: disable=redefined-builtin import astroid +from astroid import bases as _bases from astroid import scoped_nodes BUILTINS_NAME = builtins.__name__ @@ -683,6 +684,16 @@ def _supports_protocol(value, protocol_callback): return True if protocol_callback(value): return True + + + # TODO: this is not needed in astroid 2.0, where we can + # check the type using a virtual base class instead. + if (isinstance(value, _bases.Proxy) + and isinstance(value._proxied, astroid.BaseInstance) + and has_known_bases(value._proxied)): + value = value._proxied + return protocol_callback(value) + return False
Check proxies when verifying if an object supports a protocol This patch adds support for understanding proxies of instances when checking if an object, the proxy in question, supports a protocol or not. The problem was with the isinstance call, since the proxy we have does not forward the __instancecheck__ to its underlying wrapped object. Changing astroid for supporting this use case requires a plethora of changes, which will lead to churn for a small benefit. Close #<I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ tests_require = [ 'pytest-cache>=1.0', 'pytest-cov>=1.8.0', 'pytest-pep8>=1.0.6', - 'pytest>=2.8.0', + 'pytest>=3.6.0', # due to pytest-cov requirement ] extras_require = {
setup: fix pytest version to be compatible with pytest-cov
py
diff --git a/openquake/signalling.py b/openquake/signalling.py index <HASH>..<HASH> 100644 --- a/openquake/signalling.py +++ b/openquake/signalling.py @@ -42,7 +42,8 @@ def generate_routing_key(job_id, type_): :return: the routing key :rtype: string """ - assert type_ in ('*', 'failed', 'succeeded', 'ERROR', 'FATAL') + assert type_ in ('*', 'failed', 'succeeded', + 'FATAL', 'ERROR', 'WARN', 'INFO', 'DEBUG') assert isinstance(job_id, int) or job_id == '*'
Completed the list of logging levels Former-commit-id: <I>b9ed9c<I>f<I>c<I>a1fc7fad<I>a8ff<I>
py
diff --git a/benchexec/tools/jdart.py b/benchexec/tools/jdart.py index <HASH>..<HASH> 100644 --- a/benchexec/tools/jdart.py +++ b/benchexec/tools/jdart.py @@ -42,6 +42,8 @@ class Tool(benchexec.tools.template.BaseTool): "jpf-jdart.jar", "RunJPF.jar", "version.txt", + "jdart.sh", + "run-jdart.sh", ] def executable(self):
Update the jdart tool integration module to include all shell scripts
py
diff --git a/Lib/ufo2ft/kernFeatureWriter.py b/Lib/ufo2ft/kernFeatureWriter.py index <HASH>..<HASH> 100644 --- a/Lib/ufo2ft/kernFeatureWriter.py +++ b/Lib/ufo2ft/kernFeatureWriter.py @@ -135,6 +135,7 @@ class KernFeatureWriter(object): if leftIsClass: self.leftUfoClasses[left] = self.groups[left] if rightIsClass: + self.rightUfoClasses[right] = self.groups[right] self.classPairKerning[glyphPair] = val else: self.leftClassKerning[glyphPair] = val
kernFeatureWriter: fix issue when collecting right kerning classes from UFO groups it was skipped when both 'leftIsClass' and 'rightIsClass' are true
py
diff --git a/pandas/util/testing.py b/pandas/util/testing.py index <HASH>..<HASH> 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -354,6 +354,10 @@ def ensure_clean(filename=None, return_filelike=False): try: fd, filename = tempfile.mkstemp(suffix=filename) + except UnicodeEncodeError: + raise nose.SkipTest('no unicode file names on this system') + + try: yield filename finally: try:
TST: ensure_clean skips test when fs doesn't support unicode (sparc)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ def get_version(): Read version from __init__.py """ version_regex = re.compile( - '__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*)(?P=q)' + '__version__\\s*=\\s*(?P<q>[\'"])(?P<version>\\d+(\\.\\d+)*(-(alpha|beta|rc)(\\.\\d+)?)?)(?P=q)' ) here = path.abspath(path.dirname(__file__)) init_location = path.join(here, "CHAID/__init__.py")
Change version regex to allow for alpha versions (#<I>)
py
diff --git a/sortinghat/cmd/export.py b/sortinghat/cmd/export.py index <HASH>..<HASH> 100644 --- a/sortinghat/cmd/export.py +++ b/sortinghat/cmd/export.py @@ -84,17 +84,17 @@ class Export(Command): """ params = self.parser.parse_args(args) - if params.identities: - code = self.export_identities(params.outfile, params.source) - elif params.orgs: - code = self.export_organizations(params.outfile) - else: - # The running proccess never should reach this section - raise RuntimeError("Unexpected export option") + with params.outfile as outfile: + if params.identities: + code = self.export_identities(outfile, params.source) + elif params.orgs: + code = self.export_organizations(outfile) + else: + # The running proccess never should reach this section + raise RuntimeError("Unexpected export option") return code - def export_identities(self, outfile, source=None): """Export identities information to a file.
[cmd:export] Fix not closed output file error
py
diff --git a/tests/util.py b/tests/util.py index <HASH>..<HASH> 100644 --- a/tests/util.py +++ b/tests/util.py @@ -132,6 +132,6 @@ def run_scenario(application=None, feature=None, scenario=None, **opts): proc = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) text, _ = proc.communicate() - text = text.decode().rstrip() + text = text.decode('utf-8').rstrip() return proc.returncode, text
Fix parsing non-ASCII data from tests
py
diff --git a/polyaxon/polyaxon/config_settings/core.py b/polyaxon/polyaxon/config_settings/core.py index <HASH>..<HASH> 100644 --- a/polyaxon/polyaxon/config_settings/core.py +++ b/polyaxon/polyaxon/config_settings/core.py @@ -1,3 +1,5 @@ +from corsheaders.defaults import default_headers + from polyaxon.utils import ROOT_DIR, config DEBUG = config.get_boolean('POLYAXON_DEBUG') @@ -10,6 +12,15 @@ CORS_ORIGIN_ALLOW_ALL = True CORS_ALLOW_CREDENTIALS = True SSL_ENABLE = config.get_boolean('POLYAXON_SSL_ENABLE', is_optional=True, default=False) +CORS_ORIGIN_WHITELIST = config.get_string('POLYAXON_CORS_ORIGIN_WHITELIST', + is_optional=True, + default=False) + +CORS_ALLOW_HEADERS = default_headers + ( + 'x-polyaxon-cli-version', + 'x-polyaxon-client-version', +) + if SSL_ENABLE: SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True
Add cors allow headers and origin whitelist
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -31,7 +31,6 @@ tests_require = [ "flake8", "yanc", "remotecv", - "hiredis", "pyssim", "celery", "cairosvg",
Remove unused hiredis dependency No usage of that library found anywhere in the code
py
diff --git a/forms_builder/forms/tests.py b/forms_builder/forms/tests.py index <HASH>..<HASH> 100644 --- a/forms_builder/forms/tests.py +++ b/forms_builder/forms/tests.py @@ -126,6 +126,14 @@ class Tests(TestCase): except IntegrityError: self.fail("Slugs were not auto-unique") + def test_field_validate_slug_length(self): + max_slug_length = 2000 + form = Form.objects.create(title="Test") + field = Field(form=form, + label='x' * (max_slug_length + 1), field_type=NAMES[0][0]) + field.save() + self.assertLessEqual(len(field.slug), max_slug_length) + def test_field_default_ordering(self): form = Form.objects.create(title="Test") form.fields.create(label="second field",
Add test ensuring validated slug length is under the field length
py
diff --git a/suds/bindings/document.py b/suds/bindings/document.py index <HASH>..<HASH> 100644 --- a/suds/bindings/document.py +++ b/suds/bindings/document.py @@ -109,6 +109,6 @@ class Document(Binding): result = self.schema.find(qref) if result is None: raise TypeNotFound(ref) - result = result[0].resolve() + result = result[0] break return result \ No newline at end of file
returned_type() should return the (unresolved) Element
py
diff --git a/tests/nosetests/test_fti/test_fti.py b/tests/nosetests/test_fti/test_fti.py index <HASH>..<HASH> 100644 --- a/tests/nosetests/test_fti/test_fti.py +++ b/tests/nosetests/test_fti/test_fti.py @@ -34,9 +34,8 @@ def test_binary(plot=False): b.plot() plt.legend() plt.show() - assert(np.allclose(fluxes, fluxes_legacy, rtol=0, atol=1e-2)) + assert(np.allclose(fluxes, fluxes_legacy, rtol=0, atol=1e-3)) - b['exptime'] = 1766.0 b.run_compute(fti_method='oversample', fti_oversample=10) fluxes_legacy = np.loadtxt(os.path.join(dir, 'kic12004834.fti.data'), unpack=True, usecols=(1,)) fluxes = b.get_value('fluxes', context='model') @@ -49,7 +48,7 @@ def test_binary(plot=False): b.plot() plt.legend() plt.show() - assert(np.allclose(fluxes, fluxes_legacy, rtol=0, atol=1e-2)) + assert(np.allclose(fluxes, fluxes_legacy, rtol=0, atol=1e-3))
tightened tolerance on test_fti now that its passing
py
diff --git a/sievelib/commands.py b/sievelib/commands.py index <HASH>..<HASH> 100644 --- a/sievelib/commands.py +++ b/sievelib/commands.py @@ -376,13 +376,13 @@ class Command(object): break if atype in curarg["type"]: - ext = curarg.get("extension") - condition = ( - check_extension and ext and - ext not in RequireCommand.loaded_extensions) - if condition: - raise ExtensionNotLoaded(ext) if self.__is_valid_value_for_arg(curarg, avalue): + ext = curarg.get("extension") + condition = ( + check_extension and ext and + ext not in RequireCommand.loaded_extensions) + if condition: + raise ExtensionNotLoaded(ext) if "extra_arg" in curarg: self.curarg = curarg break
Check valid value before checking if the extension is loaded Avoid giving an error that extension copy is not loaded when fileinto :create is specified. The fileinto command takes two optional tag arguments that require distinct extensions to be loaded: :copy requires the copy extension and :create requires the mailbox extension. If the value is not checked before checking the extension, fileinto :create raises ExtensionNotLoaded since the current argument's type matches the :copy parameter's type ("tag").
py
diff --git a/fints/segments/message.py b/fints/segments/message.py index <HASH>..<HASH> 100644 --- a/fints/segments/message.py +++ b/fints/segments/message.py @@ -73,7 +73,7 @@ class HNVSK(FinTS3SegmentOLD): ':'.join(['PIN', str(profile_version)]), 998, self.SECURITY_SUPPLIER_ROLE, - ':'.join(['1', '', str(systemid)]), + ':'.join(['1', '', fints_escape(str(systemid))]), ':'.join(['1', time.strftime('%Y%m%d'), time.strftime('%H%M%S')]), ':'.join(['2', '2', '13', '@8@00000000', '5', '1']), # Crypto algorithm ':'.join([str(self.country_code), blz, username, 'S', '0', '0']),
Add missing escaping (systemid sometimes has a +)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -39,7 +39,6 @@ setup( 'scipy', 'pandas', 'scikit-learn', - 'pyspark', 'pyarrow' ], extras_require={
Remove install_requires that are not used by bat These packages are often used with bat and some of them were used in examples, but the requirements list within setup.py should be limited only to packages specifically required by the bat package itself. Fixes #<I>
py
diff --git a/angr/analyses/complete_calling_conventions.py b/angr/analyses/complete_calling_conventions.py index <HASH>..<HASH> 100644 --- a/angr/analyses/complete_calling_conventions.py +++ b/angr/analyses/complete_calling_conventions.py @@ -202,7 +202,11 @@ class CompleteCallingConventionsAnalysis(Analysis): if idx % 3 == 0: time.sleep(0.1) - cc, proto, varman = self._analyze_core(func_addr) + try: + cc, proto, varman = self._analyze_core(func_addr) + except Exception: # pylint:disable=broad-except + _l.error("Exception occurred during _analyze_core().", exc_info=True) + cc, proto, varman = None, None, None self._results.put((func_addr, cc, proto, varman)) def _analyze_core(self, func_addr: int) -> Tuple[Optional['SimCC'],Optional['SimTypeFunction'],
CCA: Catch exceptions in workers.
py
diff --git a/tests/test_wsaa_crypto.py b/tests/test_wsaa_crypto.py index <HASH>..<HASH> 100644 --- a/tests/test_wsaa_crypto.py +++ b/tests/test_wsaa_crypto.py @@ -1,5 +1,7 @@ import base64, subprocess +from past.builtins import basestring + from pyafipws.wsaa import WSAA @@ -9,7 +11,7 @@ def test_wsfev1_create_tra(): # TODO: return string tra = tra.decode("utf8") # sanity checks: - assert isinstance(tra, str) + assert isinstance(tra, basestring) assert tra.startswith( '<?xml version="1.0" encoding="UTF-8"?>' '<loginTicketRequest version="1.0">'
WSAA: fix TRA test expecting unicode in python2
py
diff --git a/octodns/provider/cloudflare.py b/octodns/provider/cloudflare.py index <HASH>..<HASH> 100644 --- a/octodns/provider/cloudflare.py +++ b/octodns/provider/cloudflare.py @@ -585,7 +585,7 @@ class CloudflareProvider(BaseProvider): changed_records = {c.record for c in changes} for desired_record in desired.records: - if desired_record not in existing.records: # Will be created + if desired_record not in existing_records: # Will be created continue elif desired_record in changed_records: # Already being updated continue
Use dict to speed up record search This dict is created earlier, presumably to speed up this exact lookup, but it was using the list form instead, requiring a linear scan for each item.
py
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index <HASH>..<HASH> 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -2624,6 +2624,17 @@ class TestDataFrameIndexing: result = df.loc[IndexType("foo", "bar")]["A"] assert result == 1 + @pytest.mark.parametrize("tpl", [tuple([1]), tuple([1, 2])]) + def test_index_single_double_tuples(self, tpl): + # GH 20991 + idx = pd.Index([tuple([1]), tuple([1, 2])], name="A", tupleize_cols=False) + df = DataFrame(index=idx) + + result = df.loc[[tpl]] + idx = pd.Index([tpl], name="A", tupleize_cols=False) + expected = DataFrame(index=idx) + tm.assert_frame_equal(result, expected) + def test_boolean_indexing(self): idx = list(range(3)) cols = ["A", "B", "C"]
TST: add test for indexing with single/double tuples (#<I>)
py
diff --git a/ella/core/cache/invalidate.py b/ella/core/cache/invalidate.py index <HASH>..<HASH> 100644 --- a/ella/core/cache/invalidate.py +++ b/ella/core/cache/invalidate.py @@ -75,7 +75,7 @@ if ACTIVE_MQ_HOST: # start listening for any model # register the proper propagation function for intercepting the proper signals - #dispatcher.connect(CACHE_DELETER.propagate_signal, signal=signals.pre_save) + dispatcher.connect(CACHE_DELETER.propagate_signal, signal=signals.pre_save) dispatcher.connect(CACHE_DELETER.propagate_signal, signal=signals.post_save) dispatcher.connect(CACHE_DELETER.propagate_signal, signal=signals.pre_delete) log.debug('Start listening for any model')
catch both pre_save and post_save signals for cache works for me now. git-svn-id: <URL>
py
diff --git a/example/drone_delivery/drone_delivery.py b/example/drone_delivery/drone_delivery.py index <HASH>..<HASH> 100644 --- a/example/drone_delivery/drone_delivery.py +++ b/example/drone_delivery/drone_delivery.py @@ -41,7 +41,6 @@ class Drone(object): self._log("Waiting for GPS Lock") def run(self): - self.vehicle.armed = True self.change_mode('GUIDED') if self.webserver_enabled is True:
if armed gps signal is lost
py
diff --git a/safe_qgis/test_impact_functions_doc.py b/safe_qgis/test_impact_functions_doc.py index <HASH>..<HASH> 100644 --- a/safe_qgis/test_impact_functions_doc.py +++ b/safe_qgis/test_impact_functions_doc.py @@ -98,7 +98,7 @@ class ImpactFunctionsDocTest(unittest.TestCase): if myDatatype == 'sigab': verifyColumn(myDialog.if_table, 5, myDatatype, 'included') - def testRestButton(self): + def XtestRestButton(self): """Test when reset button is pressed.""" myDialog = ImpactFunctionsDoc(PARENT) expectedTable = myDialog.if_table.toNewlineFreeString() @@ -115,7 +115,7 @@ class ImpactFunctionsDocTest(unittest.TestCase): 'is pressed.') assert expectedTable == realTableReset, msgReset - def test_showHelp(self): + def Xtest_showHelp(self): """Test that help button works""" myDialog = ImpactFunctionsDoc(PARENT) myButton = myDialog.myButtonBox.button(QtGui.QDialogButtonBox.Help)
temporarly disabling 2 tests to run on jenkins
py
diff --git a/dcard/cli.py b/dcard/cli.py index <HASH>..<HASH> 100644 --- a/dcard/cli.py +++ b/dcard/cli.py @@ -49,6 +49,6 @@ def download(args): print('成功下載 %d items!' % len(resources) if all(status) \ else '出了點錯下載不完全喔') - print('Finish in {:.5f} sec(s).'.format(time.time() - start_time)) + print('Finish in {0:.5f} sec(s).'.format(time.time() - start_time)) return all(status)
hotfix in cli for py<I>
py
diff --git a/tacl/catalogue.py b/tacl/catalogue.py index <HASH>..<HASH> 100644 --- a/tacl/catalogue.py +++ b/tacl/catalogue.py @@ -49,4 +49,6 @@ class Catalogue (dict): """ with open(path, 'wb') as fh: writer = csv.writer(fh, delimiter=' ') - writer.writerows(self.items()) + rows = self.items() + rows.sort(key=lambda x: x[0]) + writer.writerows(rows)
Added sorting of catalogue entries when creating a new catalogue.
py
diff --git a/master/buildbot/schedulers/filter.py b/master/buildbot/schedulers/filter.py index <HASH>..<HASH> 100644 --- a/master/buildbot/schedulers/filter.py +++ b/master/buildbot/schedulers/filter.py @@ -56,3 +56,17 @@ class ChangeFilter(ComparableMixin): if filt_fn is not None and not filt_fn(chg_val): return False return True + + def __repr__(self): + checks = [] + for (filt_list, filt_re, filt_fn, chg_attr) in self.checks: + if filt_list is not None and len(filt_list) == 1: + checks.append('%s == %s' % (chg_attr, filt_list[0])) + elif filt_list is not None: + checks.append('%s in %r' % (chg_attr, filt_list)) + if filt_re is not None : + checks.append('%s ~/%s/' % (chg_attr, filt_re)) + if filt_fn is not None : + checks.append('%s(%s)' % (filt_fn.__name__, chg_attr)) + + return "<%s on %s>" % (self.__class__.__name__, ' and '.join(checks))
change filter: have a repr()
py
diff --git a/drf_generators/management/commands/generate.py b/drf_generators/management/commands/generate.py index <HASH>..<HASH> 100644 --- a/drf_generators/management/commands/generate.py +++ b/drf_generators/management/commands/generate.py @@ -58,7 +58,7 @@ class Command(AppCommand): generator = APIViewGenerator(app_config, force) elif format == 'function': generator = FunctionViewGenerator(app_config, force) - elif format == 'modelviewset' or format == None: + elif format == 'modelviewset' or format is None: generator = ModelViewSetGenerator(app_config, force) else: message = '\'%s\' is not a valid format.' % options['format']
E<I> is None, not == None
py
diff --git a/pylivetrader/backend/alpaca.py b/pylivetrader/backend/alpaca.py index <HASH>..<HASH> 100644 --- a/pylivetrader/backend/alpaca.py +++ b/pylivetrader/backend/alpaca.py @@ -432,8 +432,8 @@ class Backend(BaseBackend): symbols = [assets.symbol] else: symbols = [asset.symbol for asset in assets] - if ((quantopian_compatible and field == 'last_traded') or - (not quantopian_compatible and field in ('price', 'last_traded'))): + if field == 'last_traded' or + not quantopian_compatible and field == 'price': results = self._get_spot_trade(symbols, field) else: results = self._get_spot_bars(symbols, field)
Simplify the if statement it was a complicated condition with a more silmplified and readable alternative
py
diff --git a/will/scripts/generate_will_project.py b/will/scripts/generate_will_project.py index <HASH>..<HASH> 100644 --- a/will/scripts/generate_will_project.py +++ b/will/scripts/generate_will_project.py @@ -257,11 +257,11 @@ PLUGIN_BLACKLIST = [ if not os.path.exists(readme_path): with open(readme_path, 'w+') as f: f.write(""" -This is our bot, a [https://github.com/skoczen/will](will) bot. +This is our bot, a [will](https://github.com/skoczen/will) bot. """) print "\nDone." if __name__ == '__main__': - main() \ No newline at end of file + main()
Bugfix in the generated README.md format Change auto generated README.md file content from "This is our bot, a [<URL> bot." to "This is our bot, a [will](<URL>) bot."
py
diff --git a/tests/pytests/functional/modules/test_pkg.py b/tests/pytests/functional/modules/test_pkg.py index <HASH>..<HASH> 100644 --- a/tests/pytests/functional/modules/test_pkg.py +++ b/tests/pytests/functional/modules/test_pkg.py @@ -534,4 +534,3 @@ def test_list_repos_duplicate_entries(grains, modules): fp_.write("clean_requirements_on_remove=True\n") fp_.write("best=True\n") fp_.write("skip_if_unavailable=False\n") - fp_.write("http_caching=True\n")
Updated leaving clean yum.conf in destructive tests
py
diff --git a/telethon/errors/rpcbaseerrors.py b/telethon/errors/rpcbaseerrors.py index <HASH>..<HASH> 100644 --- a/telethon/errors/rpcbaseerrors.py +++ b/telethon/errors/rpcbaseerrors.py @@ -7,6 +7,7 @@ class RPCError(Exception): super().__init__('RPCError {}: {}{}'.format( code or self.code, message, self._fmt_request(request))) + self.request = request self.code = code self.message = message @@ -15,7 +16,7 @@ class RPCError(Exception): return ' (caused by {})'.format(request.__class__.__name__) def __reduce__(self): - return type(self), (self.code, self.message) + return type(self), (self.request, self.message, self.code) class InvalidDCError(RPCError):
Fix errors found by new tests (#<I>)
py
diff --git a/artist/__init__.py b/artist/__init__.py index <HASH>..<HASH> 100644 --- a/artist/__init__.py +++ b/artist/__init__.py @@ -57,9 +57,9 @@ The following modules are included: based on the name of the function creating the plot. """ -from plot import Plot, PolarPlot -from multi_plot import MultiPlot -from recursive_smooth import smooth +from .plot import Plot, PolarPlot +from .multi_plot import MultiPlot +from .recursive_smooth import smooth # Backwards compatibility -from plot import Plot as GraphArtist +from .plot import Plot as GraphArtist
fixing relative imports The only acceptable syntax for relative imports is from .[module] import name. All import forms not starting with . are interpreted as absolute imports. (PEP <I>)
py
diff --git a/taxtastic/refpkg.py b/taxtastic/refpkg.py index <HASH>..<HASH> 100644 --- a/taxtastic/refpkg.py +++ b/taxtastic/refpkg.py @@ -155,6 +155,7 @@ class Refpkg(object): os.mkdir(path) with open(os.path.join(path, self._manifest_name), 'w') as h: json.dump(manifest_template(), h, indent=4) + h.write('\n') else: raise ValueError( "Reference package {0} does not exist.".format(path)) @@ -279,6 +280,7 @@ class Refpkg(object): """ with open(os.path.join(self.path, self._manifest_name), 'w') as h: json.dump(self.contents, h, indent=4) + h.write('\n') def _sync_from_disk(self): """Read any changes made on disk to this Refpkg.
Trailing newline in JSON output For reading from R
py
diff --git a/openquake/calculators/views.py b/openquake/calculators/views.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/views.py +++ b/openquake/calculators/views.py @@ -304,7 +304,7 @@ def view_job_info(token, dstore): task_sent.refresh() task_sent = dict(task_sent[()]) for task, array in group_array(task_info[()], 'taskname').items(): - sent = sorted(ast.literal_eval(task_sent[task]).items(), + sent = sorted(ast.literal_eval(task_sent[decode(task)]).items(), key=operator.itemgetter(1), reverse=True) sent = ['%s=%s' % (k, humansize(v)) for k, v in sent[:3]] recv = array['received'].sum()
Fixed view_job_info [skip CI]
py
diff --git a/pliers/converters/misc.py b/pliers/converters/misc.py index <HASH>..<HASH> 100644 --- a/pliers/converters/misc.py +++ b/pliers/converters/misc.py @@ -15,11 +15,11 @@ class ExtractorResultToSeriesConverter(Converter): df = result.to_df(timing=False, metadata=False, object_id=False) n_rows = df.shape[0] stims = [] - for i in n_rows: + for i in range(n_rows): data = df.iloc[i, :] - onset = result.onset[i] - duration = result.duration[i] - order = result.order[i] - st = SeriesStim(data, onset=onset, duration=duration, order=order) + onset = result.onset[i] if result.onset is not None else None + dur = result.duration[i] if result.duration is not None else None + order = result.order[i] if result.order is not None else i + st = SeriesStim(data, onset=onset, duration=dur, order=order) stims.append(st) return stims
ensure context attributes are set to None if unavailable
py
diff --git a/argcomplete/completers.py b/argcomplete/completers.py index <HASH>..<HASH> 100644 --- a/argcomplete/completers.py +++ b/argcomplete/completers.py @@ -85,7 +85,7 @@ class _FilteredFilesCompleter(object): A predicate accepts as its only argument a candidate path and either accepts it or rejects it. ''' - assert predicate and callable(predicate), 'Expected a callable predicate' + assert predicate, 'Expected a callable predicate' self.predicate = predicate def __call__(self, prefix, **kwargs):
Removed assertion at _FilteredFilesCompleter
py
diff --git a/pybry/constants.py b/pybry/constants.py index <HASH>..<HASH> 100644 --- a/pybry/constants.py +++ b/pybry/constants.py @@ -17,3 +17,6 @@ DTYPE_MAPPING = {'list': list, # LBRYCRD documentation doesn't exist at least that I could find # LBRYCRD_API_RAW_JSON_URL = "" + + +LBRYD_FPATH = "pybry/lbryd_api_test.py"
Adds path to the LBRYD api file
py
diff --git a/session_security/tests/base.py b/session_security/tests/base.py index <HASH>..<HASH> 100644 --- a/session_security/tests/base.py +++ b/session_security/tests/base.py @@ -1,7 +1,12 @@ import time from django.contrib.auth.models import User -from django.test import LiveServerTestCase + +try: + from django.contrib.staticfiles.testing import StaticLiveServerTestCase as \ + LiveServerTestCase +except ImportError: + from django.test import LiveServerTestCase from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.firefox.webdriver import WebDriver
Support django <I> in tests
py
diff --git a/doctr/__main__.py b/doctr/__main__.py index <HASH>..<HASH> 100644 --- a/doctr/__main__.py +++ b/doctr/__main__.py @@ -130,7 +130,7 @@ def configure(args, parser): "doctr configure --force to run anyway.") build_repo = input("What repo do you want to build the docs for (org/reponame, like 'drdoctr/doctr')? ") - deploy_repo = input("What repo do you want to deploy the docs to? [{build_repo}]".format(build_repo=build_repo)) + deploy_repo = input("What repo do you want to deploy the docs to? [{build_repo}] ".format(build_repo=build_repo)) if not deploy_repo: deploy_repo = build_repo
Add missing trailing space from input() call
py
diff --git a/transit/handler.py b/transit/handler.py index <HASH>..<HASH> 100644 --- a/transit/handler.py +++ b/transit/handler.py @@ -161,14 +161,8 @@ class DateTimeHandler(object): class VerboseDateTimeHandler(DateTimeHandler): @staticmethod - def tag(_): - return "t" - @staticmethod def rep(d): return DateTimeHandler.string_rep(d) - @staticmethod - def string_rep(d): - return DateTimeHandler.string_rep(d) class SetHandler(object): @staticmethod
removed unecessary method defs from verbose handler after inheritance change.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ use_setuptools() from setuptools import setup, find_packages setup(name = "svg-chart", - version = "1.1", + version = "1.2", description = "Python SVG Charting Library", author = "Jason R. Coombs", author_email = "[email protected]",
bumped to version <I>
py
diff --git a/ansible/module_utils/hashivault.py b/ansible/module_utils/hashivault.py index <HASH>..<HASH> 100644 --- a/ansible/module_utils/hashivault.py +++ b/ansible/module_utils/hashivault.py @@ -170,19 +170,22 @@ def hashivault_read(params): try: data = response.get('data', {}) data = data.get('data', {}) - lease_duration = response.get('lease_duration', None) - if lease_duration is not None: - result['lease_duration'] = lease_duration - lease_id = response.get('lease_id', None) - if lease_id is not None: - result['lease_id'] = lease_id - renewable = response.get('renewable', None) - if renewable is not None: - result['renewable'] = renewable except Exception: data = str(response) else: data = response['data'] + lease_duration = response.get('lease_duration', None) + if lease_duration is not None: + result['lease_duration'] = lease_duration + lease_id = response.get('lease_id', None) + if lease_id is not None: + result['lease_id'] = lease_id + renewable = response.get('renewable', None) + if renewable is not None: + result['renewable'] = renewable + wrap_info = response.get('wrap_info', None) + if wrap_info is not None: + result['wrap_info'] = wrap_info if key and key not in data: if default is not None: result['value'] = default
Support metadata for v1 reads
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ Sentry-Python - Sentry SDK for Python ===================================== -**Sentry-Python is an experimental SDK for Sentry.** Check out `GitHub +**Sentry-Python is an SDK for Sentry.** Check out `GitHub <https://github.com/getsentry/sentry-python>`_ to find out more. """
Remove experimental disclaimer from PyPI (#<I>) * Remove experimental disclaimer from PyPI * Update setup.py
py
diff --git a/skitai/server/Skitai.py b/skitai/server/Skitai.py index <HASH>..<HASH> 100644 --- a/skitai/server/Skitai.py +++ b/skitai/server/Skitai.py @@ -255,6 +255,7 @@ class Loader: return None # worker process def close (self): + print ('11111111', self.wasc.httpserver.worker_ident) for attr, obj in list(self.wasc.objects.items ()): if attr == "logger": continue @@ -275,7 +276,7 @@ class Loader: except: self.wasc.logger.trace ("server") - print ('@@@@@@', self.wasc.httpserver.worker_ident) + print ('222222222', self.wasc.httpserver.worker_ident) if self.wasc.httpserver.worker_ident == "master": self.wasc.logger ("server", "[info] cleanup done, closing logger... bye") try:
handle KeyboardInterrupt on posix
py
diff --git a/responses/__init__.py b/responses/__init__.py index <HASH>..<HASH> 100644 --- a/responses/__init__.py +++ b/responses/__init__.py @@ -812,11 +812,6 @@ class RequestsMock(object): response = resp_callback(response) if resp_callback else response raise - stream = kwargs.get("stream") - if not stream: - response.content # NOQA required to ensure that response body is read. - response.close() - response = resp_callback(response) if resp_callback else response match.call_count += 1 self._calls.add(request, response)
Remove unnecessary code in in _on_request We don't need to access `response.content` anymore to load the response body stream.
py
diff --git a/holoviews/plotting/raster.py b/holoviews/plotting/raster.py index <HASH>..<HASH> 100644 --- a/holoviews/plotting/raster.py +++ b/holoviews/plotting/raster.py @@ -278,10 +278,10 @@ class RasterGridPlot(GridPlot, OverlayPlot): def update_frame(self, key, ranges=None): - grid_values = self.layout.values() + grid_values = self._get_frame(key).values() ranges = self.compute_ranges(self.layout, key, ranges) for i, plot in enumerate(self.handles['projs']): - view = grid_values[i].get(key, None) + view = grid_values[i] if view: plot.set_visible(True) data = view.values()[0].data if isinstance(view, CompositeOverlay) else view.data
RasterGridPlot fixes for Layouts with different dimensions
py
diff --git a/custodian/vasp/handlers.py b/custodian/vasp/handlers.py index <HASH>..<HASH> 100644 --- a/custodian/vasp/handlers.py +++ b/custodian/vasp/handlers.py @@ -457,9 +457,12 @@ class NonConvergingErrorHandler(ErrorHandler, MSONable): @classmethod def from_dict(cls, d): - return cls(output_filename=d["output_filename"], - nionic_steps=d.get("nionic_steps", 10), - change_algo=d.get("change_algo", False)) + if "nionic_steps" in d: + return cls(output_filename=d["output_filename"], + nionic_steps=d.get("nionic_steps", 10), + change_algo=d.get("change_algo", False)) + else: + return cls(output_filename=d["output_filename"]) def backup(outfile="vasp.out"):
Added backward compatibility in NonConvergingHandler
py
diff --git a/ipware/apps.py b/ipware/apps.py index <HASH>..<HASH> 100644 --- a/ipware/apps.py +++ b/ipware/apps.py @@ -1,5 +1,5 @@ from django.apps import AppConfig -from django.utils.translation import ugettext_lazy as _ +from django.utils.translation import gettext_lazy as _ class IPwareConfig(AppConfig):
Fix for Django <I> (#<I>)
py
diff --git a/bit/network/services.py b/bit/network/services.py index <HASH>..<HASH> 100644 --- a/bit/network/services.py +++ b/bit/network/services.py @@ -973,11 +973,11 @@ class NetworkAPI: BlockchainAPI.get_transaction_by_id, ] GET_UNSPENT_MAIN = [ + BlockstreamAPI.get_unspent, BlockchairAPI.get_unspent, - BitcoreAPI.get_unspent, # No limit SmartbitAPI.get_unspent, # Limit 1000 - BlockstreamAPI.get_unspent, BlockchainAPI.get_unspent, + BitcoreAPI.get_unspent, # No limit ] BROADCAST_TX_MAIN = [ BlockchairAPI.broadcast_tx, @@ -1004,10 +1004,10 @@ class NetworkAPI: SmartbitAPI.get_transaction_by_id_testnet, ] GET_UNSPENT_TEST = [ + BlockstreamAPI.get_unspent_testnet, BlockchairAPI.get_unspent_testnet, - BitcoreAPI.get_unspent_testnet, # No limit SmartbitAPI.get_unspent_testnet, # Limit 1000 - BlockstreamAPI.get_unspent_testnet, + BitcoreAPI.get_unspent_testnet, # No limit ] BROADCAST_TX_TEST = [ BlockchairAPI.broadcast_tx_testnet,
Downgrade BitCoreAPI in GET_UNSPENT_MAIN and GET_UNSPENT_TEST (#<I>)
py
diff --git a/phoebe/parameters/parameters.py b/phoebe/parameters/parameters.py index <HASH>..<HASH> 100644 --- a/phoebe/parameters/parameters.py +++ b/phoebe/parameters/parameters.py @@ -3833,7 +3833,13 @@ class FloatParameter(Parameter): self.set_value(kwargs.get('value', ''), unit) - self._dict_fields_other = ['description', 'value', 'quantity', 'default_unit', 'limits', 'visible_if', 'copy_for', 'timederiv'] # TODO: add adjust? or is that a different subclass? + self._dict_fields_other = ['description', 'value', 'quantity', 'default_unit', 'limits', 'visible_if', 'copy_for'] # TODO: add adjust? or is that a different subclass? + if conf.devel: + # NOTE: this check will take place when CREATING the parameter, + # so toggling devel after won't affect whether timederiv is included + # in string representations. + self._dict_fields_other += ['timederiv'] + self._dict_fields = _meta_fields_all + self._dict_fields_other @property
hide timederiv unless in developer mode closes #<I>
py
diff --git a/pdb.py b/pdb.py index <HASH>..<HASH> 100644 --- a/pdb.py +++ b/pdb.py @@ -371,6 +371,13 @@ class Pdb(pdb.Pdb, ConfigurableClass): self.history.append(line) return pdb.Pdb.default(self, line) + def do_help(self, arg): + try: + return pdb.Pdb.do_help(self, arg) + except AttributeError: + print("*** No help for '{command}'".format(command=arg), + file=self.stdout) + def help_hidden_frames(self): print("""\ Some frames might be marked as "hidden": by default, hidden frames are not
Catch AttributeError and print "No help" message because some functions in Python 3 have no doc string
py
diff --git a/autopython/cpython.py b/autopython/cpython.py index <HASH>..<HASH> 100644 --- a/autopython/cpython.py +++ b/autopython/cpython.py @@ -130,6 +130,18 @@ class PresenterShell(object): color_scheme=self._color_scheme, locals=ns) else: self._interpreter = PresenterInterpreter(locals=ns) + have_readline = True + try: + import readline + except ImportError: + try: + import pyreadline as readline + except ImportError: + have_readline = False + if have_readline: + import rlcompleter + readline.set_completer(rlcompleter.Completer(ns).complete) + readline.parse_and_bind("tab: complete") def begin(self): self.reset_interpreter() @@ -203,8 +215,7 @@ class PresenterShell(object): while self._interacting: try: try: - print(end=ps2 if need_more else ps1, flush=True) - line = input() + line = input(ps2 if need_more else ps1) if PY2: line = line.decode(sys.stdin.encoding) lines.append(line)
Add readline support (with autocomplete) for CPython interactive prompt
py
diff --git a/gnupg/_parsers.py b/gnupg/_parsers.py index <HASH>..<HASH> 100644 --- a/gnupg/_parsers.py +++ b/gnupg/_parsers.py @@ -1541,7 +1541,7 @@ class ListPackets(object): if not self.key: self.key = key self.encrypted_to.append(key) - elif key == ('NEED_PASSPHRASE', 'MISSING_PASSPHRASE'): + elif key in ('NEED_PASSPHRASE', 'MISSING_PASSPHRASE'): self.need_passphrase = True elif key == 'NEED_PASSPHRASE_SYM': self.need_passphrase_sym = True
Fix need_passphrase bug in ListPackets
py
diff --git a/indra/assemblers/html/assembler.py b/indra/assemblers/html/assembler.py index <HASH>..<HASH> 100644 --- a/indra/assemblers/html/assembler.py +++ b/indra/assemblers/html/assembler.py @@ -630,6 +630,7 @@ def _format_evidence_text(stmt, curation_dict=None, correct_tags=None): [cur for cur in curations if cur['error_type'] in correct_tags]) num_incorrect = num_curations - num_correct text_refs = {k.upper(): v for k, v in ev.text_refs.items()} + source_url = src_url(ev) ev_list.append({'source_api': source_api, 'pmid': ev.pmid, 'text_refs': text_refs, @@ -639,7 +640,7 @@ def _format_evidence_text(stmt, curation_dict=None, correct_tags=None): 'num_curations': num_curations, 'num_correct': num_correct, 'num_incorrect': num_incorrect, - 'source_url': ev.annotations.get('source_url', '') + 'source_url': source_url }) return ev_list
Use helper to get source url
py
diff --git a/bcbio/variation/prioritize.py b/bcbio/variation/prioritize.py index <HASH>..<HASH> 100644 --- a/bcbio/variation/prioritize.py +++ b/bcbio/variation/prioritize.py @@ -68,7 +68,7 @@ def _prep_priority_filter(gemini_db, data): """ from gemini import GeminiQuery out_file = "%s-priority.tsv" % utils.splitext_plus(gemini_db)[0] - if not utils.file_exists(out_file): + if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"): ref_chroms = set([x.name for x in ref.file_contigs(dd.get_ref_file(data), data["config"])]) with file_transaction(data, out_file) as tx_out_file: gq = GeminiQuery(gemini_db)
tumor-only: check for bgzip filter CSV on re-runs Avoid slow down when re-running projects with tumor-only prioritization since bgzip file produced after generating initial CSV.
py
diff --git a/salt/modules/pip.py b/salt/modules/pip.py index <HASH>..<HASH> 100644 --- a/salt/modules/pip.py +++ b/salt/modules/pip.py @@ -98,7 +98,8 @@ def _get_user(user, runas): salt.utils.warn_until( 'Lithium', 'The \'runas\' argument to pip.install is deprecated, and will be ' - 'removed in Salt {version}. Please use \'user\' instead.' + 'removed in Salt {version}. Please use \'user\' instead.', + stacklevel=3 ) # "There can only be one"
BUG: Set warning on deprecated runas option to occur at the right stack level
py
diff --git a/insights/specs/default.py b/insights/specs/default.py index <HASH>..<HASH> 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -348,6 +348,8 @@ class DefaultSpecs(Specs): # https://access.redhat.com/solutions/21680 return list(ps_httpds) + httpd_pid = simple_command("/usr/bin/pgrep -o httpd") + httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") httpd_M = foreach_execute(httpd_cmd, "%s -M") httpd_V = foreach_execute(httpd_cmd, "%s -V") ifcfg = glob_file("/etc/sysconfig/network-scripts/ifcfg-*")
Add spec "httpd limits" back (#<I>)
py
diff --git a/sovrin_common/identity.py b/sovrin_common/identity.py index <HASH>..<HASH> 100644 --- a/sovrin_common/identity.py +++ b/sovrin_common/identity.py @@ -53,6 +53,11 @@ class Identity(GeneratesRequest): def verkey(self): return self.identity.verkey + @verkey.setter + def verkey(self, new_val): + identifier = self.identifier + self.identity = DidIdentity(identifier, verkey=new_val) + @staticmethod def correctRole(role): return None if role == NULL else role
fixed setting a verkey in the Identity Object (#<I>)
py
diff --git a/salt/modules/napalm_netacl.py b/salt/modules/napalm_netacl.py index <HASH>..<HASH> 100644 --- a/salt/modules/napalm_netacl.py +++ b/salt/modules/napalm_netacl.py @@ -26,7 +26,7 @@ it requires NAPALM_ library to be installed: ``pip install napalm``. Please check Installation_ for complete details. .. _NAPALM: https://napalm.readthedocs.io -.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html +.. _Installation: https://napalm.readthedocs.io/en/latest/installation/index.html """ from __future__ import absolute_import, print_function, unicode_literals
Correct link to napalm installation. Resolves #<I>
py
diff --git a/pelix/shell/core.py b/pelix/shell/core.py index <HASH>..<HASH> 100644 --- a/pelix/shell/core.py +++ b/pelix/shell/core.py @@ -507,9 +507,9 @@ class ShellService(parser.Shell): try: max_depth = int(max_depth) if max_depth < 1: - max_depth = sys.maxint + max_depth = None except (ValueError, TypeError): - max_depth = sys.maxint + max_depth = None # pylint: disable=W0212 try: @@ -544,7 +544,8 @@ class ShellService(parser.Shell): trace_lines = [] depth = 0 frame = stack - while frame is not None and depth < max_depth: + while frame is not None \ + and (max_depth is None or depth < max_depth): # Store the line information trace_lines.append(self.__format_frame_info(frame)) @@ -572,9 +573,9 @@ class ShellService(parser.Shell): try: max_depth = int(max_depth) if max_depth < 1: - max_depth = sys.maxint + max_depth = None except (ValueError, TypeError): - max_depth = sys.maxint + max_depth = None # pylint: disable=W0212 try: @@ -600,7 +601,8 @@ class ShellService(parser.Shell): trace_lines = [] depth = 0 frame = stack - while frame is not None and depth < max_depth: + while frame is not None \ + and (max_depth is None or depth < max_depth): # Store the line information trace_lines.append(self.__format_frame_info(frame))
"thread(s)" shell commands works with Python 3 sys.maxint has been removed in Python 3
py
diff --git a/pymatgen/analysis/wulff.py b/pymatgen/analysis/wulff.py index <HASH>..<HASH> 100644 --- a/pymatgen/analysis/wulff.py +++ b/pymatgen/analysis/wulff.py @@ -588,7 +588,7 @@ class WulffShape: x_pts, y_pts, z_pts = all_xyz[0], all_xyz[1], all_xyz[2] index_list = [int(i) for i in np.linspace(0, len(x_pts) - 1, len(x_pts))] - tri_indices = np.array(itertools.combinations(index_list, 3)).T + tri_indices = np.array(list(itertools.combinations(index_list, 3))).T hkl = self.miller_list[plane.index] hkl = unicodeify_spacegroup("(" + "%s" * len(hkl) % hkl + ")") color = "rgba(%.5f, %.5f, %.5f, %.5f)" % tuple(np.array(plane_color) * 255)
Bug fix for plotly plotting of Wulff shapes
py
diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index <HASH>..<HASH> 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -624,7 +624,7 @@ def unit_private_ip(): @cached -def storage_get(attribute="", storage_id=""): +def storage_get(attribute=None, storage_id=None): """Get storage attributes""" _args = ['storage-get', '--format=json'] if storage_id: @@ -638,7 +638,7 @@ def storage_get(attribute="", storage_id=""): @cached -def storage_list(storage_name=""): +def storage_list(storage_name=None): """List the storage IDs for the unit""" _args = ['storage-list', '--format=json'] if storage_name:
Use None for default values in storage helpers For consistency with other helpers.
py
diff --git a/tests/urls.py b/tests/urls.py index <HASH>..<HASH> 100644 --- a/tests/urls.py +++ b/tests/urls.py @@ -1,4 +1,4 @@ -from django.conf.urls import include, patterns, url +from django.conf.urls import include, url from dynamic_rest.routers import DynamicRouter from tests import viewsets @@ -20,7 +20,6 @@ router.register(r'user_locations', viewsets.UserLocationViewSet) router.register_resource(viewsets.CatViewSet, namespace='v2') # canonical router.register(r'v1/user_locations', viewsets.UserLocationViewSet) -urlpatterns = patterns( - '', - url(r'^', include(router.urls)) +urlpatterns = ( + url(r'^', include(router.urls)), )
remove patterns (deprecated in <I>)
py
diff --git a/aiofiles/__init__.py b/aiofiles/__init__.py index <HASH>..<HASH> 100644 --- a/aiofiles/__init__.py +++ b/aiofiles/__init__.py @@ -1,6 +1,6 @@ """Utilities for asyncio-friendly file handling.""" from .threadpool import open -__version__ = "0.6.0" +__version__ = "0.7.0dev0" __all__ = ["open"]
<I> open for business!
py
diff --git a/salt/beacons/service.py b/salt/beacons/service.py index <HASH>..<HASH> 100644 --- a/salt/beacons/service.py +++ b/salt/beacons/service.py @@ -72,6 +72,16 @@ def beacon(config): ret_dict = {} ret_dict[service] = {'running': __salt__['service.status'](service)} + # If no options is given to the service, we fall back to the defaults + # assign a False value to oncleanshutdown and onchangeonly. Those + # key:values are then added to the service dictionary. + if config[service] is None: + defaults = { + 'oncleanshutdown':False, + 'onchangeonly':False + } + config[service] = defaults + # We only want to report the nature of the shutdown # if the current running status is False # as well as if the config for the beacon asks for it
Fixed issue number #<I> - When no parameters are given to a service, the service object is of type None and thus isn't iterable. This is contrary to the documentation which states that there are default values. Default values added as False
py
diff --git a/xarray/test/test_dask.py b/xarray/test/test_dask.py index <HASH>..<HASH> 100644 --- a/xarray/test/test_dask.py +++ b/xarray/test/test_dask.py @@ -332,5 +332,5 @@ class TestDataArrayAndDataset(DaskTestCase): # Test array creation from Variable with dask backend. # This is used e.g. in broadcast() a = DataArray(self.lazy_array.variable) - self.assertLazyAndIdentical(self.lazy_array, a) + self.assertLazyAndEqual(self.lazy_array, a)
Integrate no_dask_resolve with dask_broadcast branches
py
diff --git a/estnltk/layer/enveloping_span.py b/estnltk/layer/enveloping_span.py index <HASH>..<HASH> 100644 --- a/estnltk/layer/enveloping_span.py +++ b/estnltk/layer/enveloping_span.py @@ -53,7 +53,6 @@ class EnvelopingSpan: for x in zip(*[[i if isinstance(i, (list, tuple)) else itertools.cycle([i]) for i in getattr(self, item)] for item in items] - ): quickbreak = all(isinstance(i, itertools.cycle) for i in x) @@ -78,11 +77,6 @@ class EnvelopingSpan: def layer(self): return self._layer - @layer.setter - def layer(self, value): - # assert isinstance(value, Layer) or value is None - self._layer = value - @property def start(self): return self._base_span.start
removed EnvelopingSpan.layer setter
py
diff --git a/functional_tests/test_implicitized_intersect.py b/functional_tests/test_implicitized_intersect.py index <HASH>..<HASH> 100644 --- a/functional_tests/test_implicitized_intersect.py +++ b/functional_tests/test_implicitized_intersect.py @@ -92,7 +92,17 @@ def check_no_intersect(nodes1, nodes2): assert param_vals.size == 0 [email protected]('intersection_info', INTERSECTIONS) +def id_func(intersection_info): + return 'curves {:d} and {:d} (ID: {:d})'.format( + intersection_info['curve1'], intersection_info['curve2'], + intersection_info['id']) + + [email protected]( + 'intersection_info', + INTERSECTIONS, + ids=id_func, +) def test_intersect(intersection_info): # Get info for "curve 1". curve_id1 = intersection_info['curve1']
Adding IDs for `parametrize`-d functional tests. This way, a failure can actually give a human-readable description of what was being tested. Big time H/T to: <URL>
py
diff --git a/searx/engines/bing.py b/searx/engines/bing.py index <HASH>..<HASH> 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -52,7 +52,8 @@ def request(query, params): offset=offset) params['url'] = base_url + search_path - params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36' + params['headers']['User-Agent'] = ('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' + '(KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36') return params
fix pylint error in bing engine
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -47,10 +47,10 @@ setup( 'gevent>=1.0.2', 'psycogreen>=1.0', 'django-db-geventpool>=1.20.1', - 'redis>=2.10.5', + 'redis~=2.10.6', 'requests>=2.8.1', 'django-redis>=4.3.0', - 'channels>=1.1.6', + 'channels~=1.1.6', ], extras_require={ 'docs': [ @@ -65,7 +65,7 @@ setup( 'djangorestframework-filters>=0.9.1', 'django-guardian>=1.4.2', 'django-jenkins>=0.17.0', - 'asgi-redis>=1.4.2', + 'asgi-redis~=1.4.3', 'coverage>=3.7.1', 'pep8>=1.6.2', 'pylint>=1.4.3',
Pin versions of redis, asgi-redis and channels packages
py
diff --git a/alignak/external_command.py b/alignak/external_command.py index <HASH>..<HASH> 100644 --- a/alignak/external_command.py +++ b/alignak/external_command.py @@ -3664,19 +3664,3 @@ class ExternalCommandManager: realm.fill_potential_satellites_by_type('pollers') logger.debug("Poller %s added", poller_name) logger.debug("Potential %s", str(realm.get_potential_satellites_by_type('poller'))) - - -if __name__ == '__main__': - - FIFO_PATH = '/tmp/my_fifo' - - if os.path.exists(FIFO_PATH): - os.unlink(FIFO_PATH) - - if not os.path.exists(FIFO_PATH): - os.umask(0) - os.mkfifo(FIFO_PATH, 0660) - my_fifo = open(FIFO_PATH, 'w+') - logger.debug("my_fifo: %s", my_fifo) - - logger.debug(open(FIFO_PATH, 'r').readline())
Enh: Pylint - C<I> for constants names in external_command.py
py
diff --git a/tests/test_parser.py b/tests/test_parser.py index <HASH>..<HASH> 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -813,16 +813,15 @@ def _make_parser_test(LEXER, PARSER): def test_templates(self): g = _Lark(r""" - start: number_list "\n" number_dict + start: "[" sep{NUMBER, ","} "]" sep{item, delim}: item (delim item)* - number_list: "[" sep{NUMBER, ","} "]" - number_dict: "{" sep{(NUMBER ":" NUMBER), ";"} "}" // Just to test this NUMBER: /\d+/ %ignore " " """) - x = g.parse("[1, 2, 3, 4] {1:2, 3:4, 5:6}") - print(x) - x = g.parse("[1] {1:2}") + x = g.parse("[1, 2, 3, 4]") + self.assertSequenceEqual(x.children,['1', '2', '3', '4']) + x = g.parse("[1]") + self.assertSequenceEqual(x.children,['1']) print(x) def test_token_collision_WS(self):
Corrected & Simplified test
py
diff --git a/pykechain/models/widgets/widget_schemas.py b/pykechain/models/widgets/widget_schemas.py index <HASH>..<HASH> 100644 --- a/pykechain/models/widgets/widget_schemas.py +++ b/pykechain/models/widgets/widget_schemas.py @@ -32,7 +32,8 @@ def get_widget_meta_schema(widget_type=WidgetTypes.UNDEFINED): # } # }) -attachmentviewer_meta_schema = deepcopy(widget_meta_schema).update({ +attachmentviewer_meta_schema = deepcopy(widget_meta_schema) +attachmentviewer_meta_schema.update({ "properties": { "propertyInstanceId": {"$ref": "#/definitions/uuidString"}, "activityId": {"$ref": "#/definitions/uuidString"},
working concept of json schemas for attachment widget
py