diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/twine/commands/upload.py b/twine/commands/upload.py index <HASH>..<HASH> 100644 --- a/twine/commands/upload.py +++ b/twine/commands/upload.py @@ -20,6 +20,7 @@ import hashlib import os.path import subprocess import sys +import itertools try: from urlparse import urlparse, urlunparse @@ -57,18 +58,17 @@ def group_wheel_files_first(dist_files): if not any(fname for fname in dist_files if fname.endswith(".whl")): # Return early if there's no wheel files return dist_files + + group_func = lambda x: x.endswith(".whl") + sorted_distfiles = sorted(dist_files, key=group_func) wheels, not_wheels = [], [] - # Loop over the uploads and put the wheels first. - for upload in dist_files: - _, ext = os.path.splitext(upload) - if ext in (".whl",): - wheels.append(upload) + for grp, files in itertools.groupby(sorted_distfiles, key=group_func): + if grp: + wheels.extend(files) else: - not_wheels.append(upload) + not_wheels.extend(files) - # Make the new list with wheels first - grouped_uploads = wheels + not_wheels - return grouped_uploads + return wheels + not_wheels def find_dists(dists):
Use groupby rather than a forloop.
py
diff --git a/reana_commons/version.py b/reana_commons/version.py index <HASH>..<HASH> 100755 --- a/reana_commons/version.py +++ b/reana_commons/version.py @@ -14,4 +14,4 @@ and parsed by ``setup.py``. from __future__ import absolute_import, print_function -__version__ = "0.8.2" +__version__ = "0.8.3a1"
release: <I>a1
py
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index <HASH>..<HASH> 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5182,7 +5182,7 @@ class NDFrame(PandasObject, indexing.IndexingMixin): has the right type of data in it. For negative values of `n`, this function returns all rows except - the last `n` rows, equivalent to ``df[:-n]``. + the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. @@ -5257,7 +5257,7 @@ class NDFrame(PandasObject, indexing.IndexingMixin): after sorting or appending rows. For negative values of `n`, this function returns all rows except - the first `n` rows, equivalent to ``df[n:]``. + the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows.
Add absolute value to head() and tail() docstrings for negative values of n (#<I>) * add absolute value to head and tail docstrings * remove minus sign because n is already negative
py
diff --git a/scrubadub/scrubbers.py b/scrubadub/scrubbers.py index <HASH>..<HASH> 100644 --- a/scrubadub/scrubbers.py +++ b/scrubadub/scrubbers.py @@ -246,12 +246,12 @@ class Scrubber(object): # Figures out which detectors have iter_filth_documents and applies to them if isinstance(documents, dict): - document_names, document_texts = zip(*documents.items()) + document_names = list(documents.keys()) + document_texts = list(documents.values()) elif isinstance(documents, (tuple, list)): document_texts = documents document_names = [str(x) for x in range(len(documents))] - # currently doing this by aggregating all_filths and then sorting # inline instead of with a Filth.__cmp__ method, which is apparently # much slower http://stackoverflow.com/a/988728/564709
Fix types for document_names and text
py
diff --git a/spyder/plugins/statusbar/plugin.py b/spyder/plugins/statusbar/plugin.py index <HASH>..<HASH> 100644 --- a/spyder/plugins/statusbar/plugin.py +++ b/spyder/plugins/statusbar/plugin.py @@ -228,4 +228,8 @@ class StatusBar(SpyderPluginV2): def before_mainwindow_visible(self): """Perform actions before the mainwindow is visible""" # Organize widgets in the expected order + self._statusbar.setVisible(False) self._organize_status_widgets() + + def after_mainwindow_visible(self): + self._statusbar.setVisible(True)
Set the statusbar visible once the mainwindow is loaded to avoid glitches
py
diff --git a/safe/metadata/property/dictionary_property.py b/safe/metadata/property/dictionary_property.py index <HASH>..<HASH> 100644 --- a/safe/metadata/property/dictionary_property.py +++ b/safe/metadata/property/dictionary_property.py @@ -48,13 +48,19 @@ class DictionaryProperty(BaseProperty): for k, v in value.items(): if isinstance(v, basestring): try: + # Try to get dictionary, if possible. dictionary_value = json.loads(v) if isinstance(dictionary_value, dict): value[k] = dictionary_value else: pass - except ValueError as e: - pass + except ValueError: + # Try to get time, if possible. + try: + value[k] = datetime.strptime( + v, "%Y-%m-%dT%H:%M:%S.%f") + except ValueError: + pass return value except ValueError as e: raise MetadataCastError(e) @@ -72,7 +78,7 @@ class DictionaryProperty(BaseProperty): elif isinstance(v, (QDate, QDateTime)): string_value[k] = v.toString(Qt.ISODate) elif isinstance(v, datetime): - string_value[k] = v.date().isoformat() + string_value[k] = v.isoformat() elif isinstance(v, date): string_value[k] = v.isoformat() elif isinstance(v, dict):
Preserve datetime in provenance.
py
diff --git a/slug_preview/models.py b/slug_preview/models.py index <HASH>..<HASH> 100644 --- a/slug_preview/models.py +++ b/slug_preview/models.py @@ -60,6 +60,12 @@ class SlugPreviewField(models.SlugField): setattr(instance, self.name, slug) return slug + def south_field_triple(self): + from south.modelsinspector import introspector + path = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__) + args, kwargs = introspector(self) + return (path, args, kwargs) + # Avoid using AdminTextInputWidget if 'django.contrib.admin' in settings.INSTALLED_APPS:
Fix south support, if you still need this
py
diff --git a/pyethereum/blocks.py b/pyethereum/blocks.py index <HASH>..<HASH> 100644 --- a/pyethereum/blocks.py +++ b/pyethereum/blocks.py @@ -15,7 +15,7 @@ GENESIS_GAS_LIMIT = 10 ** 6 BLOCK_REWARD = 10 ** 18 BLOCK_DIFF_FACTOR = 1024 GASLIMIT_EMA_FACTOR = 1024 -INITIAL_MIN_GAS_PRICE = 10 ** 15 +GENESIS_MIN_GAS_PRICE = 0 BLKLIM_FACTOR_NOM = 6 BLKLIM_FACTOR_DEN = 5
set GENESIS_MIN_GAS_PRICE = 0
py
diff --git a/bcbio/cwl/create.py b/bcbio/cwl/create.py index <HASH>..<HASH> 100644 --- a/bcbio/cwl/create.py +++ b/bcbio/cwl/create.py @@ -707,8 +707,9 @@ def _to_cwlfile_with_indexes(val, get_retriever): return _item_to_cwldata(val["base"], get_retriever) else: # Standard named set of indices, like bwa + # Do not include snpEff, which we need to isolate inside a nested directory cp_dir, cp_base = os.path.split(os.path.commonprefix([val["base"]] + val["indexes"])) - if cp_base and cp_dir == os.path.dirname(val["base"]): + if cp_base and cp_dir == os.path.dirname(val["base"]) and not "/snpeff/" in cp_dir: return _item_to_cwldata(val["base"], get_retriever, val["indexes"]) else: dirname = os.path.dirname(val["base"])
CWL: always create tarball of snpeff, even if single file
py
diff --git a/tests/test_segmentation.py b/tests/test_segmentation.py index <HASH>..<HASH> 100644 --- a/tests/test_segmentation.py +++ b/tests/test_segmentation.py @@ -101,8 +101,8 @@ class TestModule_joint_label_fusion(unittest.TestCase): seglist[i] = seg r = 2 pp = ants.joint_label_fusion(ref, refmask, ilist, r_search=2, - label_list=seglist, rad=[r]*ref.dimension ) - pp = ants.joint_label_fusion(ref,refmask,ilist, r_search=2, rad=[r]*ref.dimension) + label_list=seglist, rad=2 ) + pp = ants.joint_label_fusion(ref,refmask,ilist, r_search=2, rad=2 ) @@ -120,7 +120,7 @@ class TestModule_kelly_kapowski(unittest.TestCase): mask = ants.get_mask( img ) segs = ants.kmeans_segmentation( img, k=3, kmask = mask) thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1], - w=segs['probabilityimages'][2], its=45, + w=segs['probabilityimages'][2], its=45, r=0.5, m=1)
WIP: test jlf failure
py
diff --git a/openquake/commonlib/riskmodels.py b/openquake/commonlib/riskmodels.py index <HASH>..<HASH> 100644 --- a/openquake/commonlib/riskmodels.py +++ b/openquake/commonlib/riskmodels.py @@ -17,7 +17,8 @@ class VulnerabilityNode(LiteralNode): validators = valid.parameters( vulnerabilitySetID=valid.name, vulnerabilityFunctionID=valid.name_with_dashes, - assetCategory=str, + assetCategory=valid.ChoiceCI( + 'population', 'buildings', 'single_asset'), lossCategory=valid.name, IML=valid.IML, lossRatio=valid.positivefloats,
Added single_assets to the list of valid asset categories
py
diff --git a/bcbio/distributed/transaction.py b/bcbio/distributed/transaction.py index <HASH>..<HASH> 100644 --- a/bcbio/distributed/transaction.py +++ b/bcbio/distributed/transaction.py @@ -127,7 +127,11 @@ def _move_file_with_sizecheck(tx_file, final_file): shutil.move(tx_file, tmp_file) # Validate that file sizes of file before and after transfer are identical - transfer_size = _tx_size(tmp_file) + try: + transfer_size = _tx_size(tmp_file) + # Avoid race conditions where transaction file has already been renamed + except OSError: + return assert want_size == transfer_size, \ ('distributed.transaction.file_transaction: File copy error: ' 'file or directory on temporary storage ({}) size {} bytes '
Transaction: avoid race conditions Transactional size checks can fail when multiple processes are creating and validing the same file and it gets moved in one process while the other still needs to check. This skips the final check in those cases to avoid failing out entirely.
py
diff --git a/fireplace/game.py b/fireplace/game.py index <HASH>..<HASH> 100644 --- a/fireplace/game.py +++ b/fireplace/game.py @@ -60,7 +60,7 @@ class BaseGame(Entity): @property def all_entities(self): - return CardList(chain(self.entities, self.hands, self.decks, self.graveyard)) + return chain(self.entities, self.hands, self.decks, self.graveyard) @property def graveyard(self): @@ -74,9 +74,6 @@ class BaseGame(Entity): def live_entities(self): return CardList(chain(self.players[0].live_entities, self.players[1].live_entities)) - def filter(self, *args, **kwargs): - return self.all_entities.filter(*args, **kwargs) - def action_start(self, type, source, index, target): self.manager.action_start(type, source, index, target) if type != PowSubType.PLAY:
Drop support for Game.filter() (Game.all_entities as a CardList)
py
diff --git a/aegean.py b/aegean.py index <HASH>..<HASH> 100755 --- a/aegean.py +++ b/aegean.py @@ -1242,10 +1242,11 @@ def refit_islands(group, stage, outerclip, istart): # do the fit # if the pixel beam is not valid, then recalculate using the location of the last source to have a valid psf - if pixbeam is None and src_valid_psf is not None: - pixbeam = global_data.psfhelper.get_pixbeam(src_valid_psf.ra,src_valid_psf.dec) - else: - logging.critical("Cannot determine pixel beam") + if pixbeam is None: + if src_valid_psf is not None: + pixbeam = global_data.psfhelper.get_pixbeam(src_valid_psf.ra,src_valid_psf.dec) + else: + logging.critical("Cannot determine pixel beam") fac = 1/np.sqrt(2) # TODO: why sqrt(2)? C = Cmatrix(mx, my, pixbeam.a*fwhm2cc*fac, pixbeam.b*fwhm2cc*fac, pixbeam.pa) B = Bmatrix(C)
bugfix: priorized fitting no-longer complains about pixbeam when it shouldn't
py
diff --git a/proso/release.py b/proso/release.py index <HASH>..<HASH> 100644 --- a/proso/release.py +++ b/proso/release.py @@ -1 +1 @@ -VERSION = '1.2.0-SNAPSHOT' +VERSION = '1.2.0'
release a new version <I>
py
diff --git a/pagedown/widgets.py b/pagedown/widgets.py index <HASH>..<HASH> 100644 --- a/pagedown/widgets.py +++ b/pagedown/widgets.py @@ -46,7 +46,7 @@ class PagedownWidget(forms.Textarea): """ % { 'attrs': flatatt(final_attrs), 'body': conditional_escape(force_unicode(value)), - 'id': attrs['id'], + 'id': final_attrs['id'], } return mark_safe(html)
Fixed small problem in widgets for #<I>
py
diff --git a/glim/prototype/app/config/default.py b/glim/prototype/app/config/default.py index <HASH>..<HASH> 100644 --- a/glim/prototype/app/config/default.py +++ b/glim/prototype/app/config/default.py @@ -4,11 +4,10 @@ import glim.paths config = { 'extensions' : { - # 'gredis' : { # 'default' : { # 'host' : 'localhost', - # 'port' : '1234', + # 'port' : '6379', # 'db' : 0 # } # }
set default port of redis extension to <I>
py
diff --git a/bokeh/properties.py b/bokeh/properties.py index <HASH>..<HASH> 100644 --- a/bokeh/properties.py +++ b/bokeh/properties.py @@ -478,12 +478,17 @@ class MetaHasProps(type): if name in new_class_attrs: raise RuntimeError("Two property generators both created %s.%s" % (class_name, name)) new_class_attrs[name] = prop + names.add(name) if isinstance(prop, BasicProperty): + # names_with_refs includes containers that have + # refs and container_names includes only + # containers that didn't have a ref if prop.descriptor.has_ref: names_with_refs.add(name) elif isinstance(prop.descriptor, ContainerProperty): container_names.add(name) - names.add(name) + # dataspecs can also be in names_with_refs or + # container_names above. if isinstance(prop.descriptor, DataSpec): dataspecs[name] = prop @@ -641,7 +646,7 @@ class HasProps(with_metaclass(MetaHasProps, object)): @classmethod def properties_containers(cls): - """ Returns a list of properties that are containers + """ Returns a list of properties that are containers but do not have refs. """ return accumulate_from_superclasses(cls, "__container_props__")
properties.py: Comments and small bugfix in add_prop We should names.add even a non-BasicProperty, though we don't have any non-BasicProperty right now. Better comment the existing code's behavior (that containers with refs are not in the set of containers), though this behavior seems weird.
py
diff --git a/omero_marshal/decode/decoders/channel.py b/omero_marshal/decode/decoders/channel.py index <HASH>..<HASH> 100644 --- a/omero_marshal/decode/decoders/channel.py +++ b/omero_marshal/decode/decoders/channel.py @@ -28,10 +28,7 @@ class Channel201501Decoder(Decoder): self.set_property(v, 'red', data.get('Red')) self.set_property(v, 'lookupTable', data.get('omero:lookupTable')) - logical_channel_id = data.get('omero:LogicalChannelId') - if logical_channel_id is None: - return v - logical_channel = LogicalChannelI(logical_channel_id) + logical_channel = LogicalChannelI(data.get('omero:LogicalChannelId')) logical_channel.emissionWave = \ self.to_unit(data.get('EmissionWavelength')) logical_channel.excitationWave = \
Support creation of new Channel-LogicalChannel structures Using the object constructor with a `None` identifier is supported so this is actually a fairly elegant way of handling both the round trip and brand new object use cases.
py
diff --git a/nodeconductor/cost_tracking/tasks.py b/nodeconductor/cost_tracking/tasks.py index <HASH>..<HASH> 100644 --- a/nodeconductor/cost_tracking/tasks.py +++ b/nodeconductor/cost_tracking/tasks.py @@ -46,21 +46,3 @@ def _update_ancestor_consumed(ancestor): if isinstance(descendant.scope, structure_models.ResourceMixin)] price_estimate.consumed = sum([descendant.consumed for descendant in resource_descendants]) price_estimate.save(update_fields=['consumed']) - - -def serializer_method(): - print 'serializer method' - print 'Current task', current_task - test_task() - - -@shared_task -def test_task(): - print 'Test task' - print 'Current task', current_task - backend_method() - - -def backend_method(): - print 'Backend method' - print 'Current task', current_task
Remove leftovers - wal-<I>
py
diff --git a/src/astral/__init__.py b/src/astral/__init__.py index <HASH>..<HASH> 100644 --- a/src/astral/__init__.py +++ b/src/astral/__init__.py @@ -27,8 +27,9 @@ timezone, latitude, longitude and elevation. The lookups can be perfomed using t For example :: - >>> from astral.geocoder import lookup - >>> location = lookup('London') + >>> from astral.geocoder import database, lookup + >>> db = database() + >>> location = lookup('London', db) >>> print('Information for %s' % location.name) Information for London >>> timezone = location.timezone
Updated example to use new database function
py
diff --git a/openstack_dashboard/dashboards/project/instances/tables.py b/openstack_dashboard/dashboards/project/instances/tables.py index <HASH>..<HASH> 100644 --- a/openstack_dashboard/dashboards/project/instances/tables.py +++ b/openstack_dashboard/dashboards/project/instances/tables.py @@ -1135,9 +1135,7 @@ class InstancesTable(tables.DataTable): ip = tables.Column(get_ips, verbose_name=_("IP Address"), attrs={'data-type': "ip"}) - size = tables.Column(get_size, - verbose_name=_("Size"), - attrs={'data-type': 'size'}) + size = tables.Column(get_size, sortable=False, verbose_name=_("Size")) keypair = tables.Column(get_keyname, verbose_name=_("Key Pair")) status = tables.Column("status", filters=(title, filters.replace_underscores),
Remove data-type and sort from flavor column Due to the flavor column in the instances table having a data-type of size, the sortable plugin tries to sort it by a size parser. Removing the sorter for that column seems the sanest decision as the end-user may expect the sorting to be done on cpu or disk or any number of factors that we cannot know in advance. Change-Id: I<I>bdcbaa<I>d<I>d<I> Closes-Bug: <I>
py
diff --git a/intranet/settings/base.py b/intranet/settings/base.py index <HASH>..<HASH> 100644 --- a/intranet/settings/base.py +++ b/intranet/settings/base.py @@ -232,6 +232,9 @@ EIGHTH_BLOCK_DATE_FORMAT = "D, N j, Y" # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOG_LEVEL = "DEBUG" if os.getenv("PRODUCTION", "FALSE") == "FALSE" else "INFO" +_log_levels = ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") +if os.getenv("LOG_LEVEL", None) in _log_levels: + LOG_LEVEL = os.environ["LOG_LEVEL"] LOGGING = { "version": 1,
Set log level based on env vars
py
diff --git a/keepassx/db.py b/keepassx/db.py index <HASH>..<HASH> 100644 --- a/keepassx/db.py +++ b/keepassx/db.py @@ -36,6 +36,13 @@ def encode_password(password): if not isinstance(password, TEXT_TYPE): # We'll need to decode back into text and then # encode to bytes. + # First we need to try to get the encoding from stdin, + # keeping in mind that sometimes we either don't + # have an encoding attribute or that it can be None + # (if it's replaced with a StringIO for example). + encoding = getattr(sys.stdin, 'encoding', 'utf-8') + if encoding is None: + encoding = 'utf-8' password = password.decode(sys.stdin.encoding) return password.encode(KP_PASSWORD_ENCODING, 'replace')
Fix stdin not having an encoding attr IE when we run on travis.yml.
py
diff --git a/prettyplotlib/_bar.py b/prettyplotlib/_bar.py index <HASH>..<HASH> 100644 --- a/prettyplotlib/_bar.py +++ b/prettyplotlib/_bar.py @@ -95,10 +95,6 @@ def bar(*args, **kwargs): # If there are negative counts, remove the bottom axes # and add a line at y=0 if any(h < 0 for h in height.tolist()): -# print h -# for h in height: -# if h<0.0: -# print h axes_to_remove = ['top', 'right', 'bottom'] ax.hlines(y=0, xmin=xmin, xmax=xmax, linewidths=0.75)
Update _bar.py Added legend support.
py
diff --git a/src/livedumper/dumper.py b/src/livedumper/dumper.py index <HASH>..<HASH> 100644 --- a/src/livedumper/dumper.py +++ b/src/livedumper/dumper.py @@ -197,7 +197,7 @@ class LivestreamerDumper(object): self.fd = None - def exit(self, msg=''): + def exit(self, msg=0): "Close an opened stream and call sys.exit(msg)." self.stop()
If no exit msg is given, exit with success status. sys.exit('') means to exit with failure status, so the default argument should be 0. Otherwise, a successfully completed download results in apparent failure, because livedumper_cli invokes dumper.exit() on completion. Fix issue #<I>
py
diff --git a/insights/client/config.py b/insights/client/config.py index <HASH>..<HASH> 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -130,7 +130,7 @@ DEFAULT_OPTS = { }, 'http_timeout': { # non-CLI - 'default': 10.0 + 'default': 120.0 }, 'insecure_connection': { # non-CLI
Increase http timeout (#<I>) * increase http timeout from <I>s to <I>s * mayhaps....... <I> seconds?
py
diff --git a/build_libtcod.py b/build_libtcod.py index <HASH>..<HASH> 100644 --- a/build_libtcod.py +++ b/build_libtcod.py @@ -69,9 +69,9 @@ else: if sys.platform == 'darwin': sources += walk_sources('dependencies/SDL-1.2.15/src/') + include_dirs += ['dependencies/SDL-1.2.15/include/SDL'] else: libraries += ['SDL'] - include_dirs += ['dependencies/SDL-1.2.15/include/SDL'] # included SDL headers are for whatever OS's don't easily come with them if sys.platform in ['win32', 'darwin']:
direct SDL header include dir was added to the wrong branch
py
diff --git a/gwpy/tests/test_timeseries.py b/gwpy/tests/test_timeseries.py index <HASH>..<HASH> 100644 --- a/gwpy/tests/test_timeseries.py +++ b/gwpy/tests/test_timeseries.py @@ -277,6 +277,8 @@ class TimeSeriesTestCase(TimeSeriesTestMixin, SeriesTestCase): # test multiprocessing sg2 = ts.spectrogram(0.5, fftlength=0.2, overlap=0.1, nproc=2) self.assertArraysEqual(sg, sg2) + # test methods + ts.spectrogram(0.5, fftlength=0.2, method='bartlett') def test_spectrogram2(self): ts = self._read()
Adding bartlett method test to timeseries unit tests.
py
diff --git a/spyder/plugins/editor/widgets/codeeditor.py b/spyder/plugins/editor/widgets/codeeditor.py index <HASH>..<HASH> 100644 --- a/spyder/plugins/editor/widgets/codeeditor.py +++ b/spyder/plugins/editor/widgets/codeeditor.py @@ -2869,6 +2869,7 @@ class CodeEditor(TextEditBaseWidget): TextEditBaseWidget.keyPressEvent(self, event) elif key == Qt.Key_Space and not shift and not ctrl \ and not has_selection and self.auto_unindent_enabled: + self.completion_widget.hide() leading_text = self.get_text('sol', 'cursor') if leading_text.lstrip() in ('elif', 'except'): ind = lambda txt: len(txt)-len(txt.lstrip())
hide completionsWidget if a space is written
py
diff --git a/tests/test_contractions.py b/tests/test_contractions.py index <HASH>..<HASH> 100644 --- a/tests/test_contractions.py +++ b/tests/test_contractions.py @@ -39,3 +39,10 @@ def test_capitalized(): tokens = EN.tokenize("Ain't") assert len(tokens) == 2 assert tokens[0].string == "Are" + + +def test_punct(): + tokens = EN.tokenize("We've") + assert len(tokens) == 2 + tokens = EN.tokenize("``We've") + assert len(tokens) == 3
* Add a couple more contractions tests
py
diff --git a/ooquery/ooquery.py b/ooquery/ooquery.py index <HASH>..<HASH> 100644 --- a/ooquery/ooquery.py +++ b/ooquery/ooquery.py @@ -30,7 +30,7 @@ class OOQuery(object): fields = [] for field in self._fields: table_field = self.parser.get_table_field(self.table, field) - fields.append(table_field.as_(field.replace('.', '_'))) + fields.append(table_field.as_(field)) return fields def select(self, fields=None, **kwargs):
Don't replace '.' for '_' in field name
py
diff --git a/src/owlwatch/test/test_model.py b/src/owlwatch/test/test_model.py index <HASH>..<HASH> 100644 --- a/src/owlwatch/test/test_model.py +++ b/src/owlwatch/test/test_model.py @@ -268,6 +268,8 @@ class TestPanel(unittest.TestCase): """Test comparison between Schema properties using its compare method with the last item different (but the same fields) """ + expected_status = 'OK' + mapping_json = None panel_json = None @@ -284,12 +286,12 @@ class TestPanel(unittest.TestCase): mapping_json=mapping_json) panel = Panel.from_json(panel_json) - result = es_mapping.compare_properties(panel.get_index_pattern('git')) + result = panel.get_index_pattern('git').compare_properties(es_mapping) - if result[0] != 'OK': - print(result[1]) + if result['status'] != expected_status: + print(result) - self.assertEqual(result[0], 'OK') + self.assertEqual(result['status'], expected_status) if __name__ == '__main__':
Fix broken outdated test Last test was broken because it used the old compare method output format for asserts and also didn't take into account comparison order. So it was not a problem of the test, it was just outdated.
py
diff --git a/duckduckpy/api.py b/duckduckpy/api.py index <HASH>..<HASH> 100644 --- a/duckduckpy/api.py +++ b/duckduckpy/api.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals +from __init__ import __version__ from collections import namedtuple -from utils import camel_to_snake_case +from duckduckpy.utils import camel_to_snake_case SERVER_HOST = 'api.duckduckgo.com' -VERSION = '0.1-alpha' -USER_AGENT = 'duckduckpy {0}'.format(VERSION) +USER_AGENT = 'duckduckpy {0}'.format(__version__) ICON_KEYS = set(['URL', 'Width', 'Height']) RESULT_KEYS = set(['FirstURL', 'Icon', 'Result', 'Text'])
Remove VERSION variable from api.py
py
diff --git a/src/hamster/lib/configuration.py b/src/hamster/lib/configuration.py index <HASH>..<HASH> 100644 --- a/src/hamster/lib/configuration.py +++ b/src/hamster/lib/configuration.py @@ -200,7 +200,7 @@ class GSettingsStore(gobject.GObject, Singleton): def __init__(self): gobject.GObject.__init__(self) - self._settings = gio.Settings('org.gnome.Hamster') + self._settings = gio.Settings(schema_id='org.gnome.Hamster') def _key_changed(self, client, key, data=None): """
explicit schema_id Fixed "Using positional arguments with the GObject constructor has been deprecated"
py
diff --git a/src/xray/conventions.py b/src/xray/conventions.py index <HASH>..<HASH> 100644 --- a/src/xray/conventions.py +++ b/src/xray/conventions.py @@ -317,6 +317,7 @@ def decode_cf_variable(var, mask_and_scale=True): dimensions = var.dimensions attributes = var.attributes.copy() encoding = var.encoding.copy() + indexing_mode = var._indexing_mode def pop_to(source, dest, k): """ @@ -356,5 +357,7 @@ def decode_cf_variable(var, mask_and_scale=True): units = pop_to(attributes, encoding, 'units') calendar = pop_to(attributes, encoding, 'calendar') data = utils.decode_cf_datetime(data, units=units, calendar=calendar) + indexing_mode = 'numpy' - return xarray.XArray(dimensions, data, attributes, encoding=encoding) + return xarray.XArray(dimensions, data, attributes, encoding=encoding, + indexing_mode=indexing_mode)
Preserve indexing mode in decode_cf_variable
py
diff --git a/pyradio/radio.py b/pyradio/radio.py index <HASH>..<HASH> 100644 --- a/pyradio/radio.py +++ b/pyradio/radio.py @@ -11,7 +11,7 @@ import curses import logging import os import random -import signal +#import signal from sys import version as python_version, version_info from os.path import join, basename, getmtime, getsize from time import ctime
radio.py: removing signal from imports
py
diff --git a/rst2ansi/table.py b/rst2ansi/table.py index <HASH>..<HASH> 100644 --- a/rst2ansi/table.py +++ b/rst2ansi/table.py @@ -242,16 +242,17 @@ class TableWriter(nodes.NodeVisitor): from rst2ansi import ANSITranslator - v = ANSITranslator(self.document, termsize=(width - 2, height)) - node.children[0].walkabout(v) - v.strip_empty_lines() - i = 1 - for l in v.lines: - for sl in l.split('\n'): - line = self.lines[self.line + i] - line = line[:self.cursor + 2] + sl + line[self.cursor + 2 + len(sl):] - self.lines[self.line + i] = line - i += 1 + if node.children: + v = ANSITranslator(self.document, termsize=(width - 2, height)) + node.children[0].walkabout(v) + v.strip_empty_lines() + i = 1 + for l in v.lines: + for sl in l.split('\n'): + line = self.lines[self.line + i] + line = line[:self.cursor + 2] + sl + line[self.cursor + 2 + len(sl):] + self.lines[self.line + i] = line + i += 1 self.col += cols self.cursor += width + 1
Fixed empty cells crashing the table writer
py
diff --git a/py/testdir_multi_jvm/test_parse_small_nopoll.py b/py/testdir_multi_jvm/test_parse_small_nopoll.py index <HASH>..<HASH> 100644 --- a/py/testdir_multi_jvm/test_parse_small_nopoll.py +++ b/py/testdir_multi_jvm/test_parse_small_nopoll.py @@ -1,4 +1,4 @@ -import unittest, random, time +import sys, unittest, random, time sys.path.extend(['.','..','py']) import h2o, h2o_cmd, h2o_hosts
typo, removed sys accidently a couple days back
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -244,11 +244,10 @@ else: setup_args['install_requires'] = [ 'twisted >= 2.0.0', 'Jinja2', - 'simplejson' ] # Python-2.6 and up includes json - #if not py_26: - #setup_args['install_requires'].append('simplejson') + if not py_26: + setup_args['install_requires'].append('simplejson') entry_points={ 'console_scripts': [
go back to just adding simplejson to the reqs in <I> and below
py
diff --git a/tests/test_write_readback.py b/tests/test_write_readback.py index <HASH>..<HASH> 100644 --- a/tests/test_write_readback.py +++ b/tests/test_write_readback.py @@ -844,11 +844,15 @@ class TestPythonMatlabFormat(object): while s in names and s[0].isdigit(): s = random_str_ascii(random.randint(1, 10)) names.append(s) + titles = [] + for _ in range(len(names)): + s = random_str_some_unicode(random.randint(1, 10)) + while s in titles: + s = random_str_some_unicode(random.randint(1, 10)) + titles.append(s) formats = [(random.choice(base_dtypes), random_numpy_shape(random.randint(1, 4), 10)) for _ in range(len(names))] - titles = [random_str_some_unicode(random.randint(1, 10)) - for _ in range(len(names))] offsets = [random.randint(0, 100) for _ in range(len(names))] desc = {'names': names,
Fixed bug (Issue #<I>) in generating random titles for the dtype fields in the unit tests, which must be unique.
py
diff --git a/onnx/utils.py b/onnx/utils.py index <HASH>..<HASH> 100644 --- a/onnx/utils.py +++ b/onnx/utils.py @@ -133,7 +133,8 @@ def extract_model( input_path, # type: Text output_path, # type: Text input_names, # type: List[Text] - output_names # type: List[Text] + output_names, # type: List[Text] + check_model=True, # type: bool ): # type: (...) -> None """Extracts sub-model from an ONNX model. @@ -148,6 +149,7 @@ def extract_model( output_path (string): The path to save the extracted ONNX model. input_names (list of string): The names of the input tensors that to be extracted. output_names (list of string): The names of the output tensors that to be extracted. + check_model (bool): Whether to run model checker on the extracted model. """ if not os.path.exists(input_path): raise ValueError("Invalid input model path: %s" % input_path) @@ -163,4 +165,5 @@ def extract_model( extracted = e.extract_model(input_names, output_names) onnx.save(extracted, output_path) - onnx.checker.check_model(output_path) + if check_model: + onnx.checker.check_model(output_path)
Optionally to run model checker on the extracted model (#<I>) Some dynamic shape models may fail due to lack of shape information, have an option to make the script not crash. (Note: run shape inference can fix the shape info.)
py
diff --git a/issuetracker/tests/test_builtin_resolvers.py b/issuetracker/tests/test_builtin_resolvers.py index <HASH>..<HASH> 100644 --- a/issuetracker/tests/test_builtin_resolvers.py +++ b/issuetracker/tests/test_builtin_resolvers.py @@ -104,11 +104,7 @@ def pytest_funcarg__content(request): if issue_id is None: # return default content return request.getfuncargvalue('content') - return ("""\ -Tracker test -============ - -Issue #{0} in tracker *{1}*""".format(issue_id, tracker)) + return '#{0}'.format(issue_id) class TrackerTest(object):
Simplified test file content for builtin tracker tests
py
diff --git a/gns3server/modules/iou/__init__.py b/gns3server/modules/iou/__init__.py index <HASH>..<HASH> 100644 --- a/gns3server/modules/iou/__init__.py +++ b/gns3server/modules/iou/__init__.py @@ -49,9 +49,10 @@ class IOU(BaseManager): def close_vm(self, vm_id, *args, **kwargs): vm = self.get_vm(vm_id) - i = self._used_application_ids[vm_id] - self._free_application_ids.insert(0, i) - del self._used_application_ids[vm_id] + if vm_id in self._used_application_ids: + i = self._used_application_ids[vm_id] + self._free_application_ids.insert(0, i) + del self._used_application_ids[vm_id] yield from super().close_vm(vm_id, *args, **kwargs) return vm
Fix a crash in CI, making log impossible to read
py
diff --git a/test/normalize_varscan_test.py b/test/normalize_varscan_test.py index <HASH>..<HASH> 100644 --- a/test/normalize_varscan_test.py +++ b/test/normalize_varscan_test.py @@ -279,10 +279,11 @@ class ValidateDirectoriesTestCase(unittest.TestCase): def test_validateDirectories_outputDirectoryNotCreated(self): script_dir = os.path.dirname(os.path.abspath(__file__)) input_dir = script_dir + "/tag_varscan_test/input" - first_out_dir = script_dir + "/tag_varscan_test/foo_bar/" - + first_out_dir = script_dir + "/tag_varscan_test/foo/bar/" + os.mkdir(input_dir + "/foo", 0555) + with self.assertRaises(SystemExit) as cm: - validate_directories(input_dir, first_out_dir + "/bar") + validate_directories(input_dir, first_out_dir) self.assertEqual(cm.exception.code, 1) class MockWriter():
test output directory can't be created
py
diff --git a/indra/tests/test_cbio_client.py b/indra/tests/test_cbio_client.py index <HASH>..<HASH> 100644 --- a/indra/tests/test_cbio_client.py +++ b/indra/tests/test_cbio_client.py @@ -73,8 +73,8 @@ def test_get_profile_data(): def test_get_ccle_cna(): profile_data = cbio_client.get_ccle_cna(['BRAF', 'AKT1'], ['LOXIMVI_SKIN', 'SKMEL30_SKIN']) - assert profile_data['SKMEL30_SKIN']['BRAF'] == 1 - assert profile_data['SKMEL30_SKIN']['AKT1'] == -1 + assert profile_data['SKMEL30_SKIN']['BRAF'] == -1 + assert profile_data['SKMEL30_SKIN']['AKT1'] == 1 assert profile_data['LOXIMVI_SKIN']['BRAF'] == 0 assert profile_data['LOXIMVI_SKIN']['AKT1'] == 0 assert len(profile_data) == 2
Fixed a failing test_get_ccle_cna() - wrong values for SKMEL<I>_SKIN line were being asserted
py
diff --git a/examples/hiv_example.py b/examples/hiv_example.py index <HASH>..<HASH> 100755 --- a/examples/hiv_example.py +++ b/examples/hiv_example.py @@ -1,8 +1,6 @@ #!/usr/bin/env python import sys, os, collections -sys.path.append(".") -sys.path.append("..") from nestly.nestly import * homedir = "/cs/researcher/matsen/Overbaugh_J/HIV_Data/"
Removed sys.path modifications.
py
diff --git a/cpp_coveralls/coverage.py b/cpp_coveralls/coverage.py index <HASH>..<HASH> 100644 --- a/cpp_coveralls/coverage.py +++ b/cpp_coveralls/coverage.py @@ -311,7 +311,7 @@ def collect(args): src_report['coverage'] = parse_gcov_file(fobj) src_files[src_path] = combine_reports(src_files.get(src_path), src_report) - report['source_files'] = src_files.values() + report['source_files'] = list(src_files.values()) # Also collects the source files that have no coverage reports. report['source_files'].extend( collect_non_report_files(args, discovered_files))
Fixed issue with python 3 returning a view
py
diff --git a/sparc/apps/cache/cache.py b/sparc/apps/cache/cache.py index <HASH>..<HASH> 100644 --- a/sparc/apps/cache/cache.py +++ b/sparc/apps/cache/cache.py @@ -171,11 +171,16 @@ class cache(object): area.initialize() while True: try: - new = area.import_source(source) + count = 0 + for item in source.items(): + if area.cache(item): + count += 1 + if kwargs['exit_'].is_set(): + break if ITransactionalCacheArea.providedBy(area): area.commit() logger.info("Found %d new items in cachablesource %s" % \ - (new, kwargs['cacheablesource'].attrib['id'])) + (count, kwargs['cacheablesource'].attrib['id'])) notify(CompletedCachableSourcePoll(source)) except Exception: if ITransactionalCacheArea.providedBy(area): @@ -205,7 +210,7 @@ class cache(object): while threading.active_count() > 1: time.sleep(.001) except KeyboardInterrupt: - logger.info("Exiting application.") + logger.info("KeyboardInterrupt signal caught, shutting down pollers...") exit_.set()
iterate cache source manually, so to be able to catch interupt signal faster
py
diff --git a/salt/modules/grains.py b/salt/modules/grains.py index <HASH>..<HASH> 100644 --- a/salt/modules/grains.py +++ b/salt/modules/grains.py @@ -416,7 +416,8 @@ def delkey(key): ''' .. versionadded:: nitrogen - Remove a grain complately from the grain system + Remove a grain completely from the grain system, this will remove the + grain key and value key The grain key from which to delete the value. @@ -433,7 +434,9 @@ def delval(key, destructive=False): ''' .. versionadded:: 0.17.0 - Delete a grain from the grains config file + Delete a grain value from the grains config file. This will just set the + grain value to `None`. To completely remove the grain run `grains.delkey` + of pass `destructive=True` to `grains.delval`. key The grain key from which to delete the value.
clean up grains del val and key docs (#<I>)
py
diff --git a/tests/demoTest.py b/tests/demoTest.py index <HASH>..<HASH> 100644 --- a/tests/demoTest.py +++ b/tests/demoTest.py @@ -39,6 +39,7 @@ class DemoTest(unittest.TestCase): def setUp(self): self.demos = os.listdir(demo_dir) + self.maxDiff = None def testDemos(self): for filename in self.demos: @@ -46,7 +47,7 @@ class DemoTest(unittest.TestCase): output = convert(filename, format) output_name = 'output1.' + format output_file = os.path.join(demo_dir, filename, output_name) - # with codecs.open(output_file, 'w', encoding='utf-8') as fp: + #with codecs.open(output_file, 'w', encoding='utf-8') as fp: # fp.write(output) with codecs.open(output_file, encoding='utf-8') as fp: expected = fp.read()
set maxDiff to None in demoTest
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -38,6 +38,7 @@ setup( version=about["__version__"], description=about["__summary__"], + long_description=open("README.rst").read(), license=about["__license__"], url=about["__uri__"],
Include the long_description from README.rst
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -64,8 +64,8 @@ setup( # Check that the pandoc-xnos script is on the PATH if not shutil.which('pandoc-xnos'): msg = """ - ERROR: `pandoc-xnos` script not found. You will need to find - the script and ensure it is on your PATH. Please file an Issue at + ERROR: `pandoc-xnos` script not found. This will need to + be corrected. If you need help, please file an Issue at https://github.com/tomduck/pandoc-xnos/issues.\n""" print(textwrap.dedent(msg)) sys.exit(-1)
Check that pandoc-xnos is on path.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ from distutils.core import setup setup( name = 'panasonic_viera', packages = ['panasonic_viera'], - version = '0.3.1', + version = '0.3.2', description = 'Library to control Panasonic Viera TVs', author = 'Florian Holzapfel', author_email = '[email protected]',
increase version number to <I>
py
diff --git a/ansigenome/utils.py b/ansigenome/utils.py index <HASH>..<HASH> 100644 --- a/ansigenome/utils.py +++ b/ansigenome/utils.py @@ -246,8 +246,7 @@ def roles_dict(path, repo_prefix=""): for role in roles: if is_role(os.path.join(path, role)): if isinstance(role, basestring): - role_url_ready = role_name(role).replace("_", "-") - role_repo = "{0}{1}".format(repo_prefix, role_url_ready) + role_repo = "{0}{1}".format(repo_prefix, role_name(role)) aggregated_roles[role] = role_repo
Do not convert _ to - in repo names
py
diff --git a/gluish/task.py b/gluish/task.py index <HASH>..<HASH> 100644 --- a/gluish/task.py +++ b/gluish/task.py @@ -11,7 +11,7 @@ A default task, that covers file system layout. # pylint: disable=F0401,E1101,E1103 from gluish import GLUISH_DATA from gluish.parameter import ClosestDateParameter -from luigi.task import id_to_name_and_params +from luigi.tools.parse_task import id_to_name_and_params import datetime import hashlib import luigi
do not use deprecated function, closes #2
py
diff --git a/docs/versionutils.py b/docs/versionutils.py index <HASH>..<HASH> 100644 --- a/docs/versionutils.py +++ b/docs/versionutils.py @@ -34,6 +34,7 @@ translations_list = [ ('ja_JP', 'Japanese'), ('ko_KR', 'Korean'), ('pt_UN', 'Portuguese'), + ('es_UN', 'Spanish'), ('ta_IN', 'Tamil'), ]
Prepare to publish Spanish docs (#<I>)
py
diff --git a/dota2py/data.py b/dota2py/data.py index <HASH>..<HASH> 100644 --- a/dota2py/data.py +++ b/dota2py/data.py @@ -28,6 +28,14 @@ GAME_MODES = { "dota_game_mode_13": "New Player Pool", } +REPLAY_GAME_MODE = { + 1: "All Pick", + 2: "Captains Mode", + 3: "Single Draft", + 4: "Random Draft", + 5: "All Random", +} + LEAVER_STATUS = { None: "Bot", "NULL": "Bot",
Add game mode identifiers for replays
py
diff --git a/src/python/pants/backend/python/tasks/pytest_run.py b/src/python/pants/backend/python/tasks/pytest_run.py index <HASH>..<HASH> 100644 --- a/src/python/pants/backend/python/tasks/pytest_run.py +++ b/src/python/pants/backend/python/tasks/pytest_run.py @@ -211,6 +211,9 @@ class PytestRun(TestRunnerTaskMixin, PythonTask): exclude_lines = def __repr__ raise NotImplementedError + pragma: no cover + pragma: no branch + pragma: recursive coverage """) @staticmethod
Adding pragma back in the default coverage config (#<I>) The [report] section contains an exclude_lines array. This breaks default pragmas and has to be fixed by excluding the pragma lines again. See doc: <URL>
py
diff --git a/demos/auth/authdemo.py b/demos/auth/authdemo.py index <HASH>..<HASH> 100755 --- a/demos/auth/authdemo.py +++ b/demos/auth/authdemo.py @@ -62,17 +62,24 @@ class AuthHandler(BaseHandler, tornado.auth.GoogleMixin): self.get_authenticated_user(self.async_callback(self._on_auth)) return self.authenticate_redirect() - + def _on_auth(self, user): if not user: raise tornado.web.HTTPError(500, "Google auth failed") self.set_secure_cookie("user", tornado.escape.json_encode(user)) self.redirect("/") + class LogoutHandler(BaseHandler): def get(self): + # This logs the user out of this demo app, but does not log them + # out of Google. Since Google remembers previous authorizations, + # returning to this app will log them back in immediately with no + # interaction (unless they have separately logged out of Google in + # the meantime). self.clear_cookie("user") - self.redirect("/") + self.write('You are now logged out. ' + 'Click <a href="/">here</a> to log back in.') def main(): tornado.options.parse_command_line()
Make authdemo's logout less confusing. Closes #<I>.
py
diff --git a/src/cloudant/_common_util.py b/src/cloudant/_common_util.py index <HASH>..<HASH> 100644 --- a/src/cloudant/_common_util.py +++ b/src/cloudant/_common_util.py @@ -24,8 +24,7 @@ from collections import Sequence import json from requests import RequestException, Session -from ._2to3 import LONGTYPE, STRTYPE, NONETYPE, UNITYPE, iteritems_, url_parse, \ - url_join +from ._2to3 import LONGTYPE, STRTYPE, NONETYPE, UNITYPE, iteritems_, url_join from .error import CloudantArgumentError, CloudantException # Library Constants @@ -356,10 +355,7 @@ class CookieSession(ClientSession): """ resp = super(CookieSession, self).request(method, url, **kwargs) - path = url_parse(url).path.lower() - post_to_session = method.upper() == 'POST' and path == '/_session' - - if not self._auto_renew or post_to_session: + if not self._auto_renew: return resp is_expired = any(( @@ -431,7 +427,7 @@ class IAMSession(ClientSession): resp = super(IAMSession, self).request(method, url, **kwargs) - if not self._auto_renew or url in [self._session_url, self._token_url]: + if not self._auto_renew: return resp if resp.status_code == 401:
Remove session endpoint checks These checks are redundant. Session endpoint requests are always made using the base request method.
py
diff --git a/pyhunter/pyhunter.py b/pyhunter/pyhunter.py index <HASH>..<HASH> 100644 --- a/pyhunter/pyhunter.py +++ b/pyhunter/pyhunter.py @@ -460,6 +460,8 @@ class PyHunter: """ Update a leads list. + :param leads_list_id: The id of the list to update. + :param name: Name of the list to update. Must be defined. :param team_id: The id of the list to share this list with.
Add docs for missing leads_list_id param
py
diff --git a/pyramid_pages/models.py b/pyramid_pages/models.py index <HASH>..<HASH> 100644 --- a/pyramid_pages/models.py +++ b/pyramid_pages/models.py @@ -115,7 +115,7 @@ class SacrudOptions(object): def sacrud_detail_col(cls): options = [ ('', [cls.name, cls.slug, cls.description, cls.visible, - cls.in_menu, cls.parent]) + cls.in_menu, getattr(cls, 'parent', None)]) ] if all(hasattr(cls, name) for name in ('redirect_url', 'redirect', 'redirect_type')):
fix SacrudOptions for model w/o parent
py
diff --git a/sciunit/__init__.py b/sciunit/__init__.py index <HASH>..<HASH> 100644 --- a/sciunit/__init__.py +++ b/sciunit/__init__.py @@ -172,11 +172,12 @@ class Test(object): score.observation = observation score.related_data = score.related_data.copy() # Don't let scores # share related_data. + score = self.bind_score(score,model,observation,prediction) return score def bind_score(self,score,model,observation,prediction): """ - For the user to bind addition features to the score. + For the user to bind additional features to the score. """ return score @@ -202,8 +203,7 @@ class Test(object): score.__class__.__name__))) # 5. score = self._bind_score(score,model,observation,prediction) - score = self.bind_score(score,model,observation,prediction) - + return score def judge(self, model, stop_on_error=True, deep_error=False, verbose=False):
Put bind inside of _bind
py
diff --git a/dvc/lock.py b/dvc/lock.py index <HASH>..<HASH> 100644 --- a/dvc/lock.py +++ b/dvc/lock.py @@ -87,6 +87,17 @@ if is_py3: self._tmp_dir, filename + ".lock" ) + # Fix for __del__ bug in flufl.lock [1] which is causing errors on + # Python shutdown [2]. + # [1] https://gitlab.com/warsaw/flufl.lock/issues/7 + # [2] https://github.com/iterative/dvc/issues/2573 + def __del__(self): + try: + if self._owned: + self.finalize() + except ImportError: + pass + else: import zc.lockfile
lock: add a workaround for a flufl.lock bug (#<I>) This bug doesn't affect us during regular dvc CLI commands, but is not visualy pleasing during API usage. Fixes #<I>
py
diff --git a/aioxmpp/structs.py b/aioxmpp/structs.py index <HASH>..<HASH> 100644 --- a/aioxmpp/structs.py +++ b/aioxmpp/structs.py @@ -5,10 +5,19 @@ These classes provide a way to hold structured data which is commonly encountered in the XMPP realm. +Jabber IDs +========== + .. autoclass:: JID(localpart, domain, resource) +Presence +======== + .. autoclass:: PresenceState +Languages +========= + .. autoclass:: LanguageTag .. autoclass:: LanguageRange @@ -16,7 +25,7 @@ encountered in the XMPP realm. .. autoclass:: LanguageMap Functions for working with language tags -======================================== +---------------------------------------- .. autofunction:: basic_filter_languages
Structure structs docs a bit
py
diff --git a/salt/utils/minions.py b/salt/utils/minions.py index <HASH>..<HASH> 100644 --- a/salt/utils/minions.py +++ b/salt/utils/minions.py @@ -191,6 +191,7 @@ class CkMinions(object): v_expr = comps[1] else: v_matcher = 'glob' + v_expr = valid if v_matcher in infinate: # We can't be sure what the subset is, only match the identical # target @@ -199,4 +200,7 @@ class CkMinions(object): return v_expr == expr v_minions = set(self.check_minions(v_expr, v_matcher)) minions = set(self.check_minions(expr, expr_form)) - return minions.difference(v_minions) + d_bool = bool(minions.difference(v_minions)) + if len(v_minions) == len(minions) and not d_bool: + return True + return d_bool
Fix case where all minions were matched
py
diff --git a/scripts/illumina_finished_msg.py b/scripts/illumina_finished_msg.py index <HASH>..<HASH> 100644 --- a/scripts/illumina_finished_msg.py +++ b/scripts/illumina_finished_msg.py @@ -181,6 +181,10 @@ def finished_message(tag_name, directory, files_to_copy, config): userid=config['userid'], password=config['password'], virtual_host=config['virtual_host'], insist=False) chan = conn.channel() + chan.queue_declare(queue=tag_name, exclusive=False, auto_delete=False, + durable=True) + chan.exchange_declare(exchange=config['exchange'], type="fanout", durable=True, + auto_delete=False) msg = amqp.Message(json.dumps(data), content_type='application/json', application_headers={'msg_type': tag_name})
Declare queue and exchange in passing script as well
py
diff --git a/test_spec.py b/test_spec.py index <HASH>..<HASH> 100755 --- a/test_spec.py +++ b/test_spec.py @@ -115,6 +115,20 @@ class ExpandedCoverage(unittest.TestCase): self.assertEqual(result, expected) + def test_recursion(self): + args = { + 'template': '{{# 1.2 }}{{# data }}{{.}}{{/ data }}{{/ 1.2 }}', + 'data': {'1': {'2': [{'data': ["1", "2", "3"]}]}} + } + + try: + result = chevron.render(**args) + except RuntimeError: + result = 'recursion' + expected = '123' + + self.assertEqual(result, expected) + # Run unit tests from command line if __name__ == "__main__":
Add a unittest for an edge-case recursion error. Currently this makes chevron run in an infinite loop (Until recursion depth limit happens)
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -29,7 +29,7 @@ import shlex # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [] +extensions = ["myst_parser"] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -37,7 +37,11 @@ templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = { + '.rst': 'restructuredtext', + '.txt': 'markdown', + '.md': 'markdown', +} # The encoding of source files. #source_encoding = 'utf-8-sig'
Added markdown extension to the documentation project. Refs: #<I>
py
diff --git a/test/drivers/python/table.py b/test/drivers/python/table.py index <HASH>..<HASH> 100644 --- a/test/drivers/python/table.py +++ b/test/drivers/python/table.py @@ -10,7 +10,7 @@ class TestTable(unittest.TestCase): # The AST these tests should generate self.ast = p.TableRef() - self.ast.db_name = 'db_name' + self.ast.db_name = self.db.ast self.ast.table_name = 'table_name' # Grabbing a table reference (the following tests are equivalent)
Tests typo - this is a bit better
py
diff --git a/sendgrid/message.py b/sendgrid/message.py index <HASH>..<HASH> 100644 --- a/sendgrid/message.py +++ b/sendgrid/message.py @@ -47,13 +47,16 @@ class Mail(SMTPAPIHeader): self.headers = opts.get('headers', '') self.date = opts.get('date', rfc822.formatdate()) + def parse_and_add(self, to): + name, email = rfc822.parseaddr(to.replace(',', '')) + if email: + self.to.append(email) + if name: + self.add_to_name(name) + def add_to(self, to): - if isinstance(to, str): - name, email = rfc822.parseaddr(to.replace(',', '')) - if email: - self.to.append(email) - if name: - self.add_to_name(name) + if isinstance(to, str) or isinstance(to, unicode): + self.parse_and_add(str(to)) else: for email in to: self.add_to(email)
Can pass unicode instead of only strings to add
py
diff --git a/tests/registrar_test.py b/tests/registrar_test.py index <HASH>..<HASH> 100644 --- a/tests/registrar_test.py +++ b/tests/registrar_test.py @@ -28,9 +28,6 @@ class TestRegistrar(unittest.TestCase): cis_search = self.reg.search({'course_id': 'cis'}) cis_dept = self.reg.department('cis') self.assertTrue(len(list(cis_search)) >= len(list(cis_dept)) > 20) - # There will always be a Kors class at Penn - sub_search = self.reg.search({'course_id': 'hist', 'instructor': 'Kors'}) - self.assertTrue(len(list(sub_search))) def test_search_params(self): params = self.reg.search_params()
Fix registrar tests by removing broken Kors test "There will always be a Kors class at Penn" - apparently not
py
diff --git a/tests/test_cli/test_cli.py b/tests/test_cli/test_cli.py index <HASH>..<HASH> 100644 --- a/tests/test_cli/test_cli.py +++ b/tests/test_cli/test_cli.py @@ -43,7 +43,7 @@ class TestCliDeployRuns(PatchSSHTestCase): def test_invalid_deploy(self): pass -# TODO +# TODO: need help how to fix these tests that I broke with the shell arg # result = run_cli( # '@local', # 'not-a-file.py', @@ -65,6 +65,7 @@ class TestCliDeployRuns(PatchSSHTestCase): 'fact', 'os', ) + # TODO: need help how to fix these tests that I broke with the shell arg # TODO assert result.exit_code == 0 # TODO assert '"somehost": null' in result.stdout @@ -161,7 +162,7 @@ class TestCliDeployRuns(PatchSSHTestCase): class TestCliDeployState(PatchSSHTestCase): pass -# TODO +# TODO: need help how to fix these tests that I broke with the shell arg # def test_deploy(self): # # Run 3 iterations of the test - each time shuffling the order of the # # hosts - ensuring that the ordering has no effect on the operation order.
more explicit about these TODOs; need help
py
diff --git a/src/qinfer/distributions.py b/src/qinfer/distributions.py index <HASH>..<HASH> 100644 --- a/src/qinfer/distributions.py +++ b/src/qinfer/distributions.py @@ -50,7 +50,7 @@ import warnings __all__ = [ 'Distribution', 'SingleSampleMixin', - 'SumDistribution', + 'MixtureDistribution', 'ProductDistribution', 'UniformDistribution', 'ConstantDistribution', @@ -126,7 +126,7 @@ class SingleSampleMixin(with_metaclass(abc.ABCMeta, object)): ## CLASSES ################################################################### -class SumDistribution(Distribution): +class MixtureDistribution(Distribution): r""" Takes a non-zero number of QInfer distributions :math:`D_k` as input and returns their weighted sum.
Changed name SumDistribution to MixtureDistribution
py
diff --git a/phono3py/cui/create_force_constants.py b/phono3py/cui/create_force_constants.py index <HASH>..<HASH> 100644 --- a/phono3py/cui/create_force_constants.py +++ b/phono3py/cui/create_force_constants.py @@ -435,7 +435,7 @@ def _create_phono3py_phonon_fc2(phono3py, alm_options, log_level): file_exists("FORCES_FC2", log_level) - natom = phono3py.supercell.get_number_of_atoms() + natom = phono3py.phonon_supercell.get_number_of_atoms() disp_dataset = _get_type2_dataset(natom, filename="FORCES_FC3") if disp_dataset: if force_to_eVperA is not None:
Fixed dim-fc2 user interface
py
diff --git a/rest_api/sawtooth_rest_api/routes.py b/rest_api/sawtooth_rest_api/routes.py index <HASH>..<HASH> 100644 --- a/rest_api/sawtooth_rest_api/routes.py +++ b/rest_api/sawtooth_rest_api/routes.py @@ -23,6 +23,7 @@ from aiohttp.helpers import parse_mimetype from google.protobuf.json_format import MessageToJson, MessageToDict from google.protobuf.message import Message as BaseMessage +from sawtooth_sdk.client.exceptions import ValidatorConnectionError from sawtooth_sdk.client.future import FutureTimeoutError from sawtooth_sdk.client.stream import Stream from sawtooth_sdk.protobuf.validator_pb2 import Message @@ -159,7 +160,12 @@ class RouteHandler(object): except FutureTimeoutError: raise errors.ValidatorUnavailable() - return response.content + try: + return response.content + # the error is caused by resolving a FutureError + # on validator disconnect. + except ValidatorConnectionError: + raise errors.ValidatorUnavailable() @staticmethod def _try_response_parse(proto, response, traps=None):
Update rest api for Future resolving FutureError When the Stream class detects a validator disconnect, and Futures are unresolved, it resolves them as FutureError instead of FutureResult. The rest_api now handles the error and returns an HTTPServiceUnavailable, <I> error.
py
diff --git a/tests/etl/test_classification_mirroring.py b/tests/etl/test_classification_mirroring.py index <HASH>..<HASH> 100644 --- a/tests/etl/test_classification_mirroring.py +++ b/tests/etl/test_classification_mirroring.py @@ -10,7 +10,7 @@ from django.conf import settings from treeherder.etl.classification_mirroring import ElasticsearchDocRequest, BugzillaCommentRequest -def test_tbpl_bug_request_body(jm, eleven_jobs_processed): +def test_elasticsearch_doc_request_body(jm, eleven_jobs_processed): """ Test the request body is created correctly """ @@ -52,7 +52,7 @@ def test_tbpl_bug_request_body(jm, eleven_jobs_processed): assert req.body == expected, diff(expected, req.body) -def test_tbpl_bugzilla_request_body(jm, eleven_jobs_processed): +def test_bugzilla_comment_request_body(jm, eleven_jobs_processed): """ Test the request body is created correctly """ @@ -88,7 +88,7 @@ def test_tbpl_bugzilla_request_body(jm, eleven_jobs_processed): assert req.body == expected -def test_tbpl_bugzilla_comment_length_capped(jm, eleven_jobs_processed): +def test_bugzilla_comment_length_capped(jm, eleven_jobs_processed): """ Test that the total number of characters in the comment is capped correctly. """
Bug <I> - Remove tbpl from test names
py
diff --git a/ginga/rv/plugins/Mosaic.py b/ginga/rv/plugins/Mosaic.py index <HASH>..<HASH> 100644 --- a/ginga/rv/plugins/Mosaic.py +++ b/ginga/rv/plugins/Mosaic.py @@ -331,7 +331,7 @@ class Mosaic(GingaPlugin.LocalPlugin): merge=merge, allow_expand=allow_expand, expand_pad_deg=expand_pad_deg, - suppress_callback=False) + suppress_callback=True) # annotate ingested image with its name? if annotate and (not allow_expand): @@ -481,7 +481,6 @@ class Mosaic(GingaPlugin.LocalPlugin): self.update_status("mosaicing images...") images, self.images = self.images, [] - #self.fv.gui_do(self._inline, images) self._inline(images) self.end_progress() @@ -491,7 +490,7 @@ class Mosaic(GingaPlugin.LocalPlugin): total_elapsed, self.process_elapsed) self.update_status(msg) - self.fv.gui_do(self.fitsimage.redraw, whence=0) + self.fv.gui_do(self.img_mosaic.make_callback, 'modified') def mosaic(self, paths, new_mosaic=False, name=None, image_loader=None): if image_loader is None:
Fix for non-gui thread executing GUI code in Mosaic - Fix for Mosaic code having a non-gui thread executing a GUI callback. This resulted in QTimer errors.
py
diff --git a/mongoctl/mongoctl_logging.py b/mongoctl/mongoctl_logging.py index <HASH>..<HASH> 100644 --- a/mongoctl/mongoctl_logging.py +++ b/mongoctl/mongoctl_logging.py @@ -20,7 +20,7 @@ logger = None _log_to_stdout = True _logging_level = logging.INFO -VERBOSE = 5 +VERBOSE = 15 logging.addLevelName(VERBOSE, "VERBOSE") ###############################################################################
Making verbose higher than debug
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,13 @@ from setuptools import setup, find_packages import airflow -# To generate install_requires from requirements.txt -# from pip.req import parse_requirements -# reqs = (str(ir.req) for ir in parse_requirements('requirements.txt')) +# Kept manually in sync with airflow.__version__ +version = '0.2.1' setup( name='airflow', description='Programmatically author, schedule and monitor data pipelines', - version=airflow.__version__, + version=version, packages=find_packages(), include_package_data=True, zip_safe=False, @@ -45,6 +44,5 @@ setup( author_email='[email protected]', url='https://github.com/mistercrunch/Airflow', download_url=( - 'https://github.com/mistercrunch/Airflow/tarball/' + - airflow.__version__), + 'https://github.com/mistercrunch/Airflow/tarball/' + version), )
Importing airflow in setup.py wan't a good idea, rolling back
py
diff --git a/indra/tools/reading/util/__init__.py b/indra/tools/reading/util/__init__.py index <HASH>..<HASH> 100644 --- a/indra/tools/reading/util/__init__.py +++ b/indra/tools/reading/util/__init__.py @@ -0,0 +1,17 @@ +def get_s3_root(s3_base, job_queue="run_db_reading_queue"): + return s3_base + 'logs/%s/' % job_queue + + +def get_s3_job_prefix(s3_base, job_name, job_queue="run_db_reading_queue"): + s3_root = get_s3_root(s3_base, job_queue) + return s3_root + '%s/' % job_name + + +def get_s3_and_job_prefixes(base_name, group_name=None): + """Returns the s3 prefix and job prefix.""" + if not group_name: + s3_base = base_name + job_base = base_name + else: + s3_base, job_base = [group_name + d + base_name for d in ['/', '_']] + return 'reading_results/' + s3_base + '/', job_base
Move functions that regularize s3 naming into indra. These functions used to live in indra_db.
py
diff --git a/htmresearch/algorithms/temporal_memory_factory.py b/htmresearch/algorithms/temporal_memory_factory.py index <HASH>..<HASH> 100644 --- a/htmresearch/algorithms/temporal_memory_factory.py +++ b/htmresearch/algorithms/temporal_memory_factory.py @@ -74,11 +74,8 @@ class ReversedExtendedTemporalMemory(FastETM): else: activeApicalCells = [] - self.activateBasalDendrites( + self.activateDendrites( activeExternalCells, - learn - ) - self.activateApicalDendrites( activeApicalCells, learn )
Call activateDendrites, not activateBasalDendrites. activateBasalDendrites and activateApicalDendrites are being removed.
py
diff --git a/sos/utilities.py b/sos/utilities.py index <HASH>..<HASH> 100644 --- a/sos/utilities.py +++ b/sos/utilities.py @@ -209,7 +209,7 @@ class AsyncReader(threading.Thread): slots = None if sizelimit: sizelimit = sizelimit * 1048576 # convert to bytes - slots = sizelimit / self.chunksize + slots = int(sizelimit / self.chunksize) self.deque = deque(maxlen=slots) self.start() self.join()
[utilities] Ensure slots in deque is always integer With python3, the 'slots' calculation returns a decimal number which deque() will not accept, thus throwing an exception. Always make sure slots is an integer value.
py
diff --git a/revolver/project.py b/revolver/project.py index <HASH>..<HASH> 100644 --- a/revolver/project.py +++ b/revolver/project.py @@ -85,7 +85,10 @@ class Deployinator(object): core.run("tar -xzf deploy.tar.gz") file.remove("deploy.tar.gz") - file.write("VERSION", git.revparse(self.revision)) + # TODO Fix file.write(). Sometimes the temp-file workaround of + # Fabric seems to be broken. Uncomment the following line + # after everything is fixed. + # file.write("VERSION", git.revparse(self.revision)) finally: core.local("rm -rf %s" % tmp_tar)
Buggy file.write() disabled and a new TODO added
py
diff --git a/setuptools/command/install_lib.py b/setuptools/command/install_lib.py index <HASH>..<HASH> 100644 --- a/setuptools/command/install_lib.py +++ b/setuptools/command/install_lib.py @@ -79,6 +79,8 @@ class install_lib(orig.install_lib): base = os.path.join('__pycache__', '__init__.' + imp.get_tag()) yield base + '.pyc' yield base + '.pyo' + yield base + '.opt-1.pyc' + yield base + '.opt-2.pyc' def copy_tree( self, infile, outfile,
Fix package list inconsistency caused by namespace package on Python <I> namespace package will be skipped during installation. Since Python <I>, .pyo files are removed and new .opt-1.pyc (and .opt-2.pyc) files are introduced [1]. However setuptools does not understand that new naming therefore the corresponding foo.opt-1.pyc is still added into package list (via --record). The inconsistency leads to a packaging error. [1] <URL>
py
diff --git a/nats/js/client.py b/nats/js/client.py index <HASH>..<HASH> 100644 --- a/nats/js/client.py +++ b/nats/js/client.py @@ -922,7 +922,7 @@ class JetStreamContext(JetStreamManager): req_subject = f"{self._prefix}.STREAM.MSG.GET.{stream_name}" req = {'last_by_subj': subject} data = json.dumps(req) - resp = await self._api_request(req_subject, data.encode()) + resp = await self._api_request(req_subject, data.encode(), timeout=self._timeout) raw_msg = api.RawStreamMsg.from_response(resp['message']) if raw_msg.hdrs: hdrs = base64.b64decode(raw_msg.hdrs)
Fix incorrect timeout propagation This commit adds timeout propagation on key value get operation.
py
diff --git a/tests/grid/test_network.py b/tests/grid/test_network.py index <HASH>..<HASH> 100644 --- a/tests/grid/test_network.py +++ b/tests/grid/test_network.py @@ -143,7 +143,7 @@ class TestTimeSeriesControl: # check shape number_of_timesteps = len(self.timeseries.timeindex) - number_of_cols = len(self.topology.generators_df.index) + number_of_cols = len(self.topology._generators_df.index) assert self.timeseries.generators_active_power.shape == ( number_of_timesteps, number_of_cols) assert self.timeseries.generators_reactive_power.shape == (
temporary fix test_worst_case (might change back once slack handeling is implemented)
py
diff --git a/tests/clitests/runtests.py b/tests/clitests/runtests.py index <HASH>..<HASH> 100644 --- a/tests/clitests/runtests.py +++ b/tests/clitests/runtests.py @@ -64,8 +64,9 @@ class CasperExecTestBase(unittest.TestCase): if not what: raise AssertionError('Empty lookup') if isinstance(what, (list, tuple)): + output = self.runCommand(cmd, **kwargs) for entry in what: - self.assertIn(entry, self.runCommand(cmd, **kwargs)) + self.assertIn(entry, output) else: self.assertIn(what, self.runCommand(cmd))
Dont run tests multiple times per string contains test
py
diff --git a/kafka/coordinator/base.py b/kafka/coordinator/base.py index <HASH>..<HASH> 100644 --- a/kafka/coordinator/base.py +++ b/kafka/coordinator/base.py @@ -193,12 +193,6 @@ class BaseCoordinator(object): """ while self.coordinator_unknown(): - # Dont look for a new coordinator node if we are just waiting - # for connection to finish - if self.coordinator_id is not None: - self._client.poll() - continue - future = self._send_group_coordinator_request() self._client.poll(future=future)
Can no longer have coordinator_id if coordinator_unknown()
py
diff --git a/phydmslib/simulate.py b/phydmslib/simulate.py index <HASH>..<HASH> 100644 --- a/phydmslib/simulate.py +++ b/phydmslib/simulate.py @@ -11,6 +11,7 @@ import phydmslib.models from phydmslib.constants import * import pyvolve from tempfile import mkstemp +import random def pyvolvePartitions(model, divselection=None): @@ -90,7 +91,7 @@ def pyvolvePartitions(model, divselection=None): return partitions -def simulateAlignment(model, treeFile, alignmentPrefix): +def simulateAlignment(model, treeFile, alignmentPrefix, randomSeed=False): """ Simulate an alignment given a model and tree (units = subs/site). @@ -112,7 +113,10 @@ def simulateAlignment(model, treeFile, alignmentPrefix): file with the name having the prefix giving by `alignmentPrefix` and the suffix `'_simulatedalignment.fasta'`. """ - + if randomSeed == False: + pass + else: + random.seed(randomSeed) #Transform the branch lengths by dividing by the model `branchScale` tree = Bio.Phylo.read(treeFile, 'newick') tree.root_at_midpoint()
Add set random seed switch to simulate.py
py
diff --git a/salt/output/highstate.py b/salt/output/highstate.py index <HASH>..<HASH> 100644 --- a/salt/output/highstate.py +++ b/salt/output/highstate.py @@ -1,5 +1,22 @@ ''' -Print out highstate data +The return data from the Highstate command is a standard data structure +which is parsed by the highstate outputter to deliver a clean and readable +set of information about the HighState run on minions. + +Two configurations can be set to modify the highstate outputter. These values +can be set in the master config to change the output of the ``salt`` command or +set in the minion config to change the output of the ``salt-call`` command. + +state_verbose: + By default `state_verbose` is set to `True`, setting this to `False` will + instruct the highstate outputter to omit displaying anything in green, this + means that nothing with a result of True and no chnages will not be printed +state_output: + The highstate outputter has two output modes, `full` and `terse`. The + default is set to full, which will display many lines of detailed + information for each executed chunk. If the `state_output` option is + set to `terse` then the output is greatly simplified and shown in only one + line ''' # Import salt libs import pprint
Add docs to the highstate outputter
py
diff --git a/qutepart/completer.py b/qutepart/completer.py index <HASH>..<HASH> 100644 --- a/qutepart/completer.py +++ b/qutepart/completer.py @@ -212,6 +212,7 @@ class _CompletionList(QListView): if event.type() == QEvent.KeyPress and event.modifiers() == Qt.NoModifier: if event.key() == Qt.Key_Escape: self.closeMe.emit() + return True elif event.key() == Qt.Key_Down: if self._selectedIndex + 1 < self.model().rowCount(): self._selectItem(self._selectedIndex + 1)
Stop Esc event processing, if closed autocompletion
py
diff --git a/salt/cli/caller.py b/salt/cli/caller.py index <HASH>..<HASH> 100644 --- a/salt/cli/caller.py +++ b/salt/cli/caller.py @@ -122,8 +122,15 @@ class Caller(object): ''' Execute the salt call logic ''' + ret = self.call() + out = ret['return'] + # If the type of return is not a dict we wrap the return data + # This will ensure that --local and local functions will return the + # same data structure as publish commands. + if type(ret['return']) != type({}): + out = {'local': ret['return']} salt.output.display_output( - {'local': ret['return']}, + out, ret.get('out', 'pprint'), self.opts)
Fix output from salt-call to look like normal salt output
py
diff --git a/mutant/hacks.py b/mutant/hacks.py index <HASH>..<HASH> 100644 --- a/mutant/hacks.py +++ b/mutant/hacks.py @@ -4,13 +4,14 @@ # override the item_field __cmp__ method to return NotImplemented if the # object it's compared to isn't a Field instance. Let's monkey patch it! # see https://code.djangoproject.com/ticket/17851 -from django.db.models.fields import Field -try: - assert Field() != None -except AttributeError: - def _Field__cmp__(self, other): - if isinstance(other, Field): - return cmp(self.creation_counter, other.creation_counter) - return NotImplemented - Field.__cmp__ = _Field__cmp__ - assert Field() != None \ No newline at end of file +def patch_db_field_compare(): + from django.db.models.fields import Field + try: + assert Field() != None + except AttributeError: + def _Field__cmp__(self, other): + if isinstance(other, Field): + return cmp(self.creation_counter, other.creation_counter) + return NotImplemented + Field.__cmp__ = _Field__cmp__ + assert Field() != None
Make sure hacks are only loaded when required
py
diff --git a/python/lookout/sdk/service_data.py b/python/lookout/sdk/service_data.py index <HASH>..<HASH> 100644 --- a/python/lookout/sdk/service_data.py +++ b/python/lookout/sdk/service_data.py @@ -37,6 +37,9 @@ class DataStub: ) def _build_metadata(self, context, metadata): + if context is None: + return metadata + new_metadata = context.pack_metadata() if metadata: new_metadata.extend(metadata)
Permit to call get_changes with None context
py
diff --git a/doc/conf.py b/doc/conf.py index <HASH>..<HASH> 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -151,8 +151,8 @@ if on_rtd: proc = subprocess.Popen("ls ../", stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "program output:", out - #proc = subprocess.Popen("sphinx-apidoc -f -o . ../GPy", stdout=subprocess.PIPE, shell=True) - proc = subprocess.Popen("make html", stdout=subprocess.PIPE, shell=True) + #Lets regenerate our rst files from the source, -P adds private modules (i.e kern._src) + proc = subprocess.Popen("sphinx-apidoc -P -f -o . ../GPy", stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "program output:", out #proc = subprocess.Popen("whereis numpy", stdout=subprocess.PIPE, shell=True)
Updated conf.py to work again rather than cause an infinite loop
py
diff --git a/cheroot/test/test_server.py b/cheroot/test/test_server.py index <HASH>..<HASH> 100644 --- a/cheroot/test/test_server.py +++ b/cheroot/test/test_server.py @@ -214,7 +214,7 @@ def test_peercreds_unix_sock(peercreds_enabled_server_and_client): bind_addr = bind_addr.decode() unix_base_uri = 'http+unix://{}'.format( - bind_addr.replace('\0', '%00').replace('/', '%2F'), + bind_addr.replace('\x00', '%00').replace('/', '%2F'), ) expected_peercreds = os.getpid(), os.getuid(), os.getgid() @@ -247,7 +247,7 @@ def test_peercreds_unix_sock_with_lookup(peercreds_enabled_server_and_client): bind_addr = bind_addr.decode() unix_base_uri = 'http+unix://{}'.format( - bind_addr.replace('\0', '%00').replace('/', '%2F'), + bind_addr.replace('\x00', '%00').replace('/', '%2F'), ) import grp
Use canonical form of null character.
py
diff --git a/pyinstrument/renderers/speedscope.py b/pyinstrument/renderers/speedscope.py index <HASH>..<HASH> 100644 --- a/pyinstrument/renderers/speedscope.py +++ b/pyinstrument/renderers/speedscope.py @@ -204,7 +204,7 @@ class SpeedscopeRenderer(Renderer): profile_decls.append('"type": %s' % encode_str(profile_type)) profile_name: str = session.program - profile_decls.append('"name": %s' % encode_str(session.program)) + profile_decls.append('"name": %s' % encode_str(profile_name)) unit: str = "seconds" profile_decls.append('"unit": %s' % encode_str(unit))
SpeedscopeRenderer.render: use profile_name object This commit uses the previously-unused profile_name object within SpeedscopeRenderer.render.
py
diff --git a/src/codemod.py b/src/codemod.py index <HASH>..<HASH> 100755 --- a/src/codemod.py +++ b/src/codemod.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 # Copyright (c) 2007-2008 Facebook #
implement pep-<I> make it explicit that codemod needs python2, as it does not run with python3. according to pep-<I>, python distributors are allowed to provide either python 2 or 3 as `python` command, and should provide `python2` and `python3` commands to refer to python v2 or 3 respectively. see <URL>
py