diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/build.py b/build.py index <HASH>..<HASH> 100755 --- a/build.py +++ b/build.py @@ -579,21 +579,27 @@ def build_packages(build_output, version, nightly=False, rc=None, iteration=1, s # the build root (to include the package name) package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1])) if nightly: - name = '{}-nightly_{}_{}'.format(name, - platform, - arch) - elif static: - name = '{}-{}-{}_static_{}_{}'.format(name, - package_version, - package_iteration, - platform, - arch) + if static: + name = '{}-static-nightly_{}_{}'.format(name, + platform, + arch) + else: + name = '{}-nightly_{}_{}'.format(name, + platform, + arch) else: - name = '{}-{}-{}_{}_{}'.format(name, - package_version, - package_iteration, - platform, - arch) + if static: + name = '{}-{}-{}-static_{}_{}'.format(name, + package_version, + package_iteration, + platform, + arch) + else: + name = '{}-{}-{}_{}_{}'.format(name, + package_version, + package_iteration, + platform, + arch) current_location = os.path.join(os.getcwd(), current_location) if package_type == 'tar':
Fixes naming for statically-compiled packages.
py
diff --git a/d1_libclient_python/src/setup.py b/d1_libclient_python/src/setup.py index <HASH>..<HASH> 100755 --- a/d1_libclient_python/src/setup.py +++ b/d1_libclient_python/src/setup.py @@ -41,6 +41,11 @@ def main(): 'rdflib == 4.0.1', 'google.foresite-toolkit == 1.3', 'python-dateutil == 2.1', + # Requests + 'requests[security] == 2.12.4', + 'cachecontrol == 0.11.7', + 'requests-toolbelt == 0.7.0', + ], setup_requires=[ 'setuptools_git >= 1.1'
Add dependencies for Requests, Toolbelt and CacheControl
py
diff --git a/kafka/consumer/fetcher.py b/kafka/consumer/fetcher.py index <HASH>..<HASH> 100644 --- a/kafka/consumer/fetcher.py +++ b/kafka/consumer/fetcher.py @@ -537,15 +537,24 @@ class Fetcher(six.Iterator): # which can be passed to FetchRequest() via .items() fetchable = collections.defaultdict(lambda: collections.defaultdict(list)) + # avoid re-fetching pending offsets + pending = set() + for fetch_offset, tp, _ in self._records: + pending.add((tp, fetch_offset)) + for partition in self._subscriptions.fetchable_partitions(): node_id = self._client.cluster.leader_for_partition(partition) + position = self._subscriptions.assignment[partition].position + + # fetch if there is a leader, no in-flight requests, and no _records if node_id is None or node_id == -1: log.debug("No leader found for partition %s." " Requesting metadata update", partition) self._client.cluster.request_update() - elif self._client.in_flight_request_count(node_id) == 0: - # fetch if there is a leader and no in-flight requests - position = self._subscriptions.assignment[partition].position + + elif ((partition, position) not in pending and + self._client.in_flight_request_count(node_id) == 0): + partition_info = ( partition.partition, position,
Dont send FetchRequest for (obviously) pending data
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -137,8 +137,8 @@ html_theme_options = { 'github_banner': True, 'show_powered_by': False, 'extra_nav_links': { - 'invenio-csl-rest@GitHub': 'http://github.com/inveniosoftware/invenio-csl-rest', - 'invenio-csl-rest@PyPI': 'http://pypi.python.org/pypi/invenio-csl-rest/', + 'invenio-csl-rest@GitHub': 'https://github.com/inveniosoftware/invenio-csl-rest', + 'invenio-csl-rest@PyPI': 'https://pypi.python.org/pypi/invenio-csl-rest/', } } @@ -329,3 +329,6 @@ texinfo_documents = [ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} + +# Autodoc configuraton. +autoclass_content = 'both'
docs: autodoc configuration and HTTPS fix * Replaces HTTP links with HTTPS where possible.
py
diff --git a/allennlp/tests/data/dataset_readers/multiprocess_dataset_reader_test.py b/allennlp/tests/data/dataset_readers/multiprocess_dataset_reader_test.py index <HASH>..<HASH> 100644 --- a/allennlp/tests/data/dataset_readers/multiprocess_dataset_reader_test.py +++ b/allennlp/tests/data/dataset_readers/multiprocess_dataset_reader_test.py @@ -1,9 +1,11 @@ +import sys from collections import Counter from multiprocessing import Queue, Process from queue import Empty from typing import Tuple import numpy as np +import pytest from allennlp.common.testing import AllenNlpTestCase from allennlp.data.dataset_readers import MultiprocessDatasetReader, SequenceTaggingDatasetReader @@ -23,6 +25,10 @@ def fingerprint(instance: Instance) -> Tuple[str, ...]: return text_tuple + labels_tuple [email protected]( + sys.platform == "darwin" and sys.version_info > (3, 6), + reason="This test causes internal Python errors on the Mac since version 3.7", +) class TestMultiprocessDatasetReader(AllenNlpTestCase): def setUp(self) -> None: super().setUp()
Skip a test that we know will fail (#<I>) * Skip a test that we know will fail * Productivity through formatting
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,8 @@ log.info('$PATH=%s' % os.environ['PATH']) # Check the Python version: -if sys.version_info < (3, 4): +supported_versions = [(3, 4), (3, 5), (3, 6)] +if sys.version_info not in supported_versions: raise RuntimeError( 'See https://github.com/genicam/harvesters#requirements' )
Declare specific Python versions that Harvester supports
py
diff --git a/mesh_tensorflow/transformer/utils.py b/mesh_tensorflow/transformer/utils.py index <HASH>..<HASH> 100644 --- a/mesh_tensorflow/transformer/utils.py +++ b/mesh_tensorflow/transformer/utils.py @@ -1205,6 +1205,11 @@ def get_inputs_from_file(input_filename, ignore_comments=False): with tf.io.gfile.GFile(input_filename, "r") as f: inputs = [line.rstrip() for line in f] + # If this is an empty file (because of stripping), return early. + if not inputs: + tf.logging.info("input file is empty after rstrip: %s", input_filename) + return [] + # Strip the last empty line. if not inputs[-1]: inputs.pop()
[MTF] Minor usability change in get_inputs_from_file for accidentally empty files. PiperOrigin-RevId: <I>
py
diff --git a/bloop/models.py b/bloop/models.py index <HASH>..<HASH> 100644 --- a/bloop/models.py +++ b/bloop/models.py @@ -836,6 +836,9 @@ def unbind(meta, name=None, dynamo_name=None): else: raise RuntimeError("Must provide name= or dynamo_name= to unbind from meta") + # Nothing in bloop should allow name or dynamo_name + # collisions to exist, so this is either a bug or + # the user manually hacked up meta. assert len(columns) <= 1 assert len(indexes) <= 1 assert not (columns and indexes)
Add comment to bare asserts in bloop.models.unbind
py
diff --git a/pyhomematic/devicetypes/actors.py b/pyhomematic/devicetypes/actors.py index <HASH>..<HASH> 100644 --- a/pyhomematic/devicetypes/actors.py +++ b/pyhomematic/devicetypes/actors.py @@ -335,7 +335,7 @@ class IPSwitch(GenericSwitch, HelperActionOnTime): def ELEMENT(self): if "HmIP-BSM" in self.TYPE: return [4] - elif "HmIP-FSM" in self.TYPE: + elif "HmIP-FSM" in self.TYPE or "HmIP-FSM16" in self.TYPE: return [2] else: return [3] @@ -385,7 +385,7 @@ class IPSwitchPowermeter(IPSwitch, HMSensor): # init metadata sensorIndex = None - if "HmIP-FSM" in self.TYPE: + if "HmIP-FSM" in self.TYPE or "HmIP-FSM16" in self.TYPE: sensorIndex = 5 elif "HMIP-PSM" in self.TYPE or "HmIP-PSM" in self.TYPE or "HmIP-PSM-CH" in self.TYPE: sensorIndex = 6 @@ -517,6 +517,7 @@ DEVICETYPES = { "HmIP-PSM": IPSwitchPowermeter, "HmIP-PSM-CH": IPSwitchPowermeter, "HmIP-FSM": IPSwitchPowermeter, + "HmIP-FSM16": IPSwitchPowermeter, "HmIP-BSM": IPSwitchPowermeter, "HMIP-BDT": IPKeyDimmer, "HmIP-BDT": IPKeyDimmer,
Added HmIP-FSM<I>, Issue #<I>
py
diff --git a/dingo/core/powerflow/__init__.py b/dingo/core/powerflow/__init__.py index <HASH>..<HASH> 100644 --- a/dingo/core/powerflow/__init__.py +++ b/dingo/core/powerflow/__init__.py @@ -35,8 +35,6 @@ class PFConfigDingo: if self._scenarios is None: raise ValueError('PF config: Please set at least one scenario.') - elif len(self._scenarios) != len(self._timeranges): - raise ValueError('PF config: Count of scenarios has to equal count of timeranges.') if not isinstance(self._timeranges, DatetimeIndex): if not isinstance(timesteps_count, int) or not isinstance(timestep_start, datetime): @@ -50,6 +48,9 @@ class PFConfigDingo: periods=timesteps_count, start=timestep_start)) + elif len(self._scenarios) != len(self._timeranges): + raise ValueError('PF config: Count of scenarios has to equal count of timeranges.') + @property def scenarios(self):
fix bug in pf config class
py
diff --git a/satpy/tests/test_multiscene.py b/satpy/tests/test_multiscene.py index <HASH>..<HASH> 100644 --- a/satpy/tests/test_multiscene.py +++ b/satpy/tests/test_multiscene.py @@ -531,7 +531,6 @@ def test_save_mp4(smg, tmp_path): scenes[0][ds_id].attrs['end_time'] = datetime(2018, 1, 1, 12) mscn = MultiScene(scenes) - tmp_path fn = str(tmp_path / 'test_save_mp4_{name}_{start_time:%Y%m%d_%H}_{end_time:%Y%m%d_%H}.mp4') writer_mock = mock.MagicMock()
Remove spurious tmp_path reference from test case
py
diff --git a/facepy/graph_api.py b/facepy/graph_api.py index <HASH>..<HASH> 100755 --- a/facepy/graph_api.py +++ b/facepy/graph_api.py @@ -69,6 +69,9 @@ class GraphAPI(object): Supported types are 'post', 'user', 'page', 'event', 'group', 'place' and 'checkin'. """ + SUPPORTED_TYPES = ['post', 'user', 'page', 'event', 'group', 'place', 'checkin'] + if type not in SUPPORTED_TYPES: + raise ValueError('Supported types are %s' % ', '.join(SUPPORTED_TYPES)) options = dict({ 'q': term,
Raise ValueError for unsupported types on GraphAPI#search
py
diff --git a/backtrader/indicator.py b/backtrader/indicator.py index <HASH>..<HASH> 100644 --- a/backtrader/indicator.py +++ b/backtrader/indicator.py @@ -23,13 +23,12 @@ from __future__ import absolute_import, division, print_function, unicode_litera from six.moves import xrange -from .lineiterator import LineIterator +from .lineiterator import LineIterator, IndicatorBase -class Indicator(LineIterator): +class Indicator(IndicatorBase): _ltype = LineIterator.IndType - def preonce(self, start, end): # generic implementation for i in xrange(start, end):
indicator - Indicator uses intermediate Base Class
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ setup( include_package_data=True, zip_safe=False, install_requires=[ - 'pysaml2==2.4.0', + 'pysaml2==2.2.0', 'python-memcached==1.48', ], )
Revert pysaml2 requirement version back to <I> It looks like <I> broke something, at least in my app.
py
diff --git a/pycbc/waveform/waveform.py b/pycbc/waveform/waveform.py index <HASH>..<HASH> 100644 --- a/pycbc/waveform/waveform.py +++ b/pycbc/waveform/waveform.py @@ -89,7 +89,7 @@ def _lalsim_fd_waveform(**p): lalsimulation.SimInspiralSetTidalOrder(flags, p['tidal_order']) hp1, hc1 = lalsimulation.SimInspiralChooseFDWaveform(float(p['coa_phase']), - delta_f, + p['delta_f'], float(pnutils.solar_mass_to_kg(p['mass1'])), float(pnutils.solar_mass_to_kg(p['mass2'])), float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
fix delta_f not coming from kwd
py
diff --git a/claptcha/claptcha.py b/claptcha/claptcha.py index <HASH>..<HASH> 100644 --- a/claptcha/claptcha.py +++ b/claptcha/claptcha.py @@ -214,6 +214,8 @@ class Claptcha(object): @margin.setter @_with_pair_validator def margin(self, margin): + if 2*margin[1] > self.h: + raise ClaptchaError("Margin y cannot be larger than half of image height.") self.__margin = (int(margin[0]), int(margin[1])) @property @@ -286,7 +288,6 @@ class Claptcha(object): w, h = image.size w *= 5 h *= 5 - print(image.size) l_image = Image.new('RGBA', (w, h), (0, 0, 0, 0)) l_draw = ImageDraw.Draw(l_image)
margin: Exception on improper margin y (compared to image height)
py
diff --git a/reana_commons/version.py b/reana_commons/version.py index <HASH>..<HASH> 100755 --- a/reana_commons/version.py +++ b/reana_commons/version.py @@ -14,4 +14,4 @@ and parsed by ``setup.py``. from __future__ import absolute_import, print_function -__version__ = "0.7.0.dev20200520" +__version__ = "0.7.0.dev20200602"
release: <I>.de<I>
py
diff --git a/bfg9000/tools/cc.py b/bfg9000/tools/cc.py index <HASH>..<HASH> 100644 --- a/bfg9000/tools/cc.py +++ b/bfg9000/tools/cc.py @@ -8,7 +8,6 @@ from .utils import darwin_install_name, library_macro from ..file_types import * from ..iterutils import iterate, uniques from ..path import Path, Root -from ..platforms import platform_name class CcCompiler(object): @@ -157,7 +156,7 @@ class CcLinker(object): def _link_lib(self, library): if isinstance(library, WholeArchive): - if platform_name() == 'darwin': + if self.platform.name == 'darwin': return ['-Wl,-force_load', library.link.path] return ['-Wl,--whole-archive', library.link.path, '-Wl,--no-whole-archive'] @@ -209,7 +208,8 @@ class CcSharedLibraryLinker(CcLinker): @property def mode_args(self): - return ['-shared', '-fPIC'] + shared = '-dynamiclib' if self.platform.name == 'darwin' else '-shared' + return [shared, '-fPIC'] def _import_lib(self, library): if self.platform.has_import_library:
Fix some darwin-specific option logic in the cc builders
py
diff --git a/airtest/aircv/cal_confidence.py b/airtest/aircv/cal_confidence.py index <HASH>..<HASH> 100644 --- a/airtest/aircv/cal_confidence.py +++ b/airtest/aircv/cal_confidence.py @@ -24,13 +24,15 @@ def cal_rgb_confidence(img_src_rgb, img_sch_rgb): """同大小彩图计算相似度.""" # 扩展置信度计算区域 img_sch_rgb = cv2.copyMakeBorder(img_sch_rgb, 10,10,10,10,cv2.BORDER_REPLICATE) - + # 转HSV强化颜色的影响 + img_src_rgb = cv2.cvtColor(img_src_rgb, cv2.COLOR_BGR2HSV) + img_sch_rgb = cv2.cvtColor(img_sch_rgb, cv2.COLOR_BGR2HSV) src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb) # 计算BGR三通道的confidence,存入bgr_confidence: bgr_confidence = [0, 0, 0] for i in range(3): - res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCORR_NORMED) + res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp) bgr_confidence[i] = max_val
use hsv to cal confidence (cherry picked from commit <I>f<I>c<I>d<I>f7e<I>c<I>f<I>c1dc<I>)
py
diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py index <HASH>..<HASH> 100644 --- a/salt/modules/zypper.py +++ b/salt/modules/zypper.py @@ -348,7 +348,7 @@ def mod_repo(repo, **kwargs): repo or alias alias by which the zypper refers to the repo - url or mirrorlist + url, mirrorlist or baseurl the URL for zypper to reference enabled @@ -384,7 +384,7 @@ def mod_repo(repo, **kwargs): # An attempt to add new one? if repo not in repos_cfg.sections(): - url = kwargs.get('url', kwargs.get('mirrorlist')) + url = kwargs.get('url', kwargs.get('mirrorlist', kwargs.get('baseurl'))) if not url: raise CommandExecutionError( 'Repository \'{0}\' not found and no URL passed to create one.'.format(repo))
Added baseurl as alias for url and mirrorlist in salt.modules.zypper.mod_repo.
py
diff --git a/pyemu/pst/pst_utils.py b/pyemu/pst/pst_utils.py index <HASH>..<HASH> 100644 --- a/pyemu/pst/pst_utils.py +++ b/pyemu/pst/pst_utils.py @@ -586,7 +586,13 @@ def try_run_inschek(pst): """ for ins_file,out_file in zip(pst.instruction_files,pst.output_files): - df = _try_run_inschek(ins_file,out_file) + df = None + try: + i = InstructionFile(ins_file,pst=pst) + df = i.read_output_file(out_file) + except Exception as e: + warnings.warn("error processing instruction file {0}, trying inschek: {1}".format(ins_file,str(e))) + df = _try_run_inschek(ins_file,out_file) if df is not None: pst.observation_data.loc[df.index, "obsval"] = df.obsval
weaving instruction file stuff into try_inschek
py
diff --git a/make_magic.py b/make_magic.py index <HASH>..<HASH> 100644 --- a/make_magic.py +++ b/make_magic.py @@ -615,6 +615,10 @@ class GridFrame(wx.Frame): Switch the type of grid between site/sample (Users may add ages at either age) """ + + if self.grid.changes: + self.onSave(None) + label = event.GetEventObject().Label if label == 'sample': new_parent_type = 'site' @@ -623,8 +627,6 @@ class GridFrame(wx.Frame): new_parent_type = 'location' self.er_magic.age_type = 'site' - if self.grid.changes: - self.onSave(None) self.grid.Destroy() @@ -1025,6 +1027,7 @@ class GridFrame(wx.Frame): if self.drop_down_menu: self.drop_down_menu.clean_up() + starred_cols = self.grid.remove_starred_labels() self.grid.SaveEditControlValue() # locks in value in cell currently edited @@ -1112,6 +1115,9 @@ class GridFrame(wx.Frame): item = self.er_magic.update_methods[self.grid_type](old_item_name, new_item_name, new_parent_name, new_er_data, new_pmag_data, replace_data=True) + if not event: + return + wx.MessageBox('Saved!', 'Info', style=wx.OK | wx.ICON_INFORMATION) self.Destroy()
re-do order so that site/sample age info is actually properly saved when toggling
py
diff --git a/utils/fetch_languages.py b/utils/fetch_languages.py index <HASH>..<HASH> 100644 --- a/utils/fetch_languages.py +++ b/utils/fetch_languages.py @@ -28,10 +28,10 @@ def fetch_supported_languages(): names.sort() for engine_name in names: - print("fetching languages of engine %s" % engine_name) - if hasattr(engines[engine_name], 'fetch_supported_languages'): engines_languages[engine_name] = engines[engine_name].fetch_supported_languages() + print("fetched %s languages from engine %s" % ( + len(engines_languages[engine_name]), engine_name)) if type(engines_languages[engine_name]) == list: engines_languages[engine_name] = sorted(engines_languages[engine_name])
utils/fetch_languages.py: print more meaningfull messages
py
diff --git a/src/oidcendpoint/user_authn/authn_context.py b/src/oidcendpoint/user_authn/authn_context.py index <HASH>..<HASH> 100755 --- a/src/oidcendpoint/user_authn/authn_context.py +++ b/src/oidcendpoint/user_authn/authn_context.py @@ -177,19 +177,22 @@ class AuthnBroker(object): def pick_by_path(self, path): for key, item in self.db.items(): + _method = item['method'] try: - _path = item["view_path"] - except KeyError: + _path = _method.action + except AttributeError: continue else: if _path == path: - return item["method"] + return _method raise KeyError('No authn method at that path') def default(self): - if len(self.db) == 1: + if len(self.db) >= 1: item = list(self.db.values())[0] return item['method'], item['acr'] + else: + return None def pick_auth(endpoint_context, areq, comparision_type=""):
Looking for path/action in the right place.
py
diff --git a/sos/plugins/openstack_sahara.py b/sos/plugins/openstack_sahara.py index <HASH>..<HASH> 100644 --- a/sos/plugins/openstack_sahara.py +++ b/sos/plugins/openstack_sahara.py @@ -76,7 +76,7 @@ class RedHatSahara(OpenStackSahara, RedHatPlugin): def setup(self): super(RedHatSahara, self).setup() - self.add_copy_spec("/etc/sudoers.d/sahara") + self.add_copy_spec("/etc/sudoers.d/sahara*") # vim: et ts=4 sw=4
[openstack_sahara] capture all sahara* sudoers files It seems that nowadays the file is /etc/sudoers.d/sahara-rootwrap. Closes: #<I>.
py
diff --git a/egg/_gui.py b/egg/_gui.py index <HASH>..<HASH> 100644 --- a/egg/_gui.py +++ b/egg/_gui.py @@ -2353,6 +2353,11 @@ class TreeDictionary(BaseObject): def get_list_values(self, key): """ Returns the values for a list item of the specified key. + + Parameters + ---------- + key : string + Dictionary key to query. """ # Make sure it's a list if not self.get_type(key) in ['list']: @@ -2362,6 +2367,18 @@ class TreeDictionary(BaseObject): # Return a copy of the list values return list(self.get_widget(key).opts['values']) + def get_list_index(self, key): + """ + Returns the index of the currently selected list item of the specified + key. + + Parameters + ---------- + key : string + Dictionary key to query. + """ + return self.get_list_values(key).index(self.get_value(key)) + def get_dictionary(self, short_keys=False): """ Returns the list of parameters and a dictionary of values
Update _gui.py Ability to get the list index from TreeDictionary combo box.
py
diff --git a/geventreactor.py b/geventreactor.py index <HASH>..<HASH> 100644 --- a/geventreactor.py +++ b/geventreactor.py @@ -274,6 +274,8 @@ class DelayedCall(object): L.append('>') return ''.join(L) + __repr__ = __str__ + class Stream(Greenlet, styles.Ephemeral): def __init__(self, reactor, selectable, method):
Added the missing __repr__ of geventreactor.DelayedCall
py
diff --git a/shinken/daemon.py b/shinken/daemon.py index <HASH>..<HASH> 100644 --- a/shinken/daemon.py +++ b/shinken/daemon.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +# -*- coding: utf-8 -*- # Copyright (C) 2009-2011 : # Gabes Jean, [email protected]
Replacing the useless shebang by a useful utf-8 encoding. Sorry guys, I'm tired too sometimes :)
py
diff --git a/tensorpack/input_source/input_source.py b/tensorpack/input_source/input_source.py index <HASH>..<HASH> 100644 --- a/tensorpack/input_source/input_source.py +++ b/tensorpack/input_source/input_source.py @@ -398,8 +398,8 @@ class DummyConstantInput(TensorInput): tlist = [] ctx = get_current_tower_context() assert ctx is not None - assert len(self.shapes) == len(self._desc) - for idx, p in enumerate(self._desc): + assert len(self.shapes) == len(self._spec) + for idx, p in enumerate(self._spec): tlist.append(tf.constant( 0, dtype=p.dtype, name='dummy-{}-{}'.format(p.name, ctx.index),
bugfix in DummyConstantInput
py
diff --git a/site_config/models.py b/site_config/models.py index <HASH>..<HASH> 100644 --- a/site_config/models.py +++ b/site_config/models.py @@ -58,10 +58,12 @@ class ConfigField(models.Model): SPLIT_TYPES = [FIELD_TYPE_TEXT, FIELD_TYPE_INPUT] site = models.ForeignKey( - Site, verbose_name=_('Site'), default=settings.SITE_ID) + Site, verbose_name=_('Site'), default=settings.SITE_ID, + on_delete=models.CASCADE) group = models.ForeignKey( - ConfigGroup, null=True, blank=True, verbose_name=_('Group')) + ConfigGroup, null=True, blank=True, verbose_name=_('Group'), + on_delete=models.CASCADE) label = models.CharField(_('Label'), max_length=255)
Add `on_delete` params to foreign keys.
py
diff --git a/treeherder/model/models.py b/treeherder/model/models.py index <HASH>..<HASH> 100644 --- a/treeherder/model/models.py +++ b/treeherder/model/models.py @@ -1018,6 +1018,11 @@ class Group(models.Model): class ClassifiedFailure(models.Model): + """ + Classifies zero or more TextLogErrors as a failure. + + Optionally linked to a bug. + """ id = models.BigAutoField(primary_key=True) text_log_errors = models.ManyToManyField("TextLogError", through='TextLogErrorMatch', related_name='classified_failures')
Document what ClassifiedFailures represent
py
diff --git a/pkg_resources/__init__.py b/pkg_resources/__init__.py index <HASH>..<HASH> 100644 --- a/pkg_resources/__init__.py +++ b/pkg_resources/__init__.py @@ -1512,6 +1512,19 @@ is not allowed. ... ValueError: Use of .. or absolute path in a resource path \ is not allowed. + + Blank values are allowed + + >>> vrp('') + >>> bool(warned) + False + + Non-string values are not. + + >>> vrp(None) + Traceback (most recent call last): + ... + AttributeError: ... """ invalid = ( os.path.pardir in path.split(posixpath.sep) or
Add two tests capturing expectation for '' and None to _validate_resource_path. Ref #<I>.
py
diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index <HASH>..<HASH> 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -2134,9 +2134,6 @@ class FileTest(ModuleCase, SaltReturnAssertsMixin): for filename in glob.glob('{0}.bak*'.format(testcase_filedest)): os.unlink(filename) - @skipIf(six.PY3, 'This test will have a LOT of rewriting to support both Py2 and Py3') - # And I'm more comfortable with the author doing it - s0undt3ch - @skipIf(IS_WINDOWS, 'Don\'t know how to fix for Windows') def test_issue_8947_utf8_sls(self): ''' Test some file operation with utf-8 characters on the sls
Unskip this test It has been edited to work properly and should be fine in PY3 now.
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,7 +15,7 @@ # import os # import sys # sys.path.insert(0, os.path.abspath('.')) - +from persim import __version__ # -- Project information ----------------------------------------------------- @@ -24,9 +24,9 @@ copyright = u'2018, Nathaniel Saul' author = u'Nathaniel Saul' # The short X.Y version -version = u'' +version = __version__ # The full version, including alpha/beta/rc tags -release = u'0.0.6' +release = __version__ # -- General configuration ---------------------------------------------------
docs automatically get version number from library
py
diff --git a/example.py b/example.py index <HASH>..<HASH> 100755 --- a/example.py +++ b/example.py @@ -4,7 +4,7 @@ import datetime import sys import IndexedRedis from IndexedRedis import IndexedRedisModel, IRField -from IndexedRedis.fields import IRCompressedField, IRFieldChain, IRRawField +from IndexedRedis.fields import IRCompressedField, IRFieldChain, IRRawField, IRBytesField # vim: ts=4 sw=4 expandtab @@ -21,7 +21,7 @@ class Song(IndexedRedisModel): 'description', 'copyright', IRRawField('mp3_data'), # Do not perform any conversion on the data. - IRCompressedField('thumbnail', compressMode='gzip'), # Compress this field in storage using "bz2" compression + IRFieldChain('thumbnail', [IRBytesField(), IRCompressedField(compressMode='gzip')]), # Compress this field in storage using "bz2" compression IRField('tags', valueType=list), ]
Change example to show IRBytesField on thumbnail
py
diff --git a/python/tests/phonenumbermatchertest.py b/python/tests/phonenumbermatchertest.py index <HASH>..<HASH> 100644 --- a/python/tests/phonenumbermatchertest.py +++ b/python/tests/phonenumbermatchertest.py @@ -853,6 +853,18 @@ class PhoneNumberMatcherTest(unittest.TestCase): """Returns True if there were no matches found.""" return not matcher.has_next() + def testDoubleExtensionX(self): + # Python version extra test - multiple x for extension marker + xx_ext = "800 234 1 111 xx 1111" + # This gives different results for different leniency values (and so + # can't be used in a NumberTest). + m0 = PhoneNumberMatcher(xx_ext, "US", leniency=Leniency.POSSIBLE).next() + self.assertEqual(xx_ext, m0.raw_string) + m1 = PhoneNumberMatcher(xx_ext, "US", leniency=Leniency.VALID).next() + self.assertEqual("800 234 1 111", m1.raw_string) + matcher2 = PhoneNumberMatcher(xx_ext, "US", leniency=Leniency.STRICT_GROUPING) + self.assertFalse(matcher2.has_next()) + def testInternals(self): # Python-specific test: coverage of internals from phonenumbers.phonenumbermatcher import _limit, _verify, _is_national_prefix_present_if_required
Reinstate Python extra test but as a standalone case
py
diff --git a/setuptools_odoo/core.py b/setuptools_odoo/core.py index <HASH>..<HASH> 100644 --- a/setuptools_odoo/core.py +++ b/setuptools_odoo/core.py @@ -22,7 +22,7 @@ ODOO_VERSION_INFO = { 'addon_dep_version': '>=8,<9', }, '9.0': { - 'dep': 'odoo>=9,<10', + 'odoo_dep': 'odoo>=9,<10', 'base_addons': base_addons.odoo9, 'addon_dep_version': '>=9,<10', },
fix typo in <I> data
py
diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index <HASH>..<HASH> 100644 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -67,6 +67,8 @@ class ModelTesterMixin: if model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values(): return { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() + if isinstance(v, torch.Tensor) and v.ndim != 0 + else v for k, v in inputs_dict.items() } return inputs_dict @@ -157,7 +159,7 @@ class ModelTesterMixin: model.to(torch_device) model.eval() with torch.no_grad(): - outputs = model(**inputs_dict) + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(model.config.output_hidden_states, False) self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
Fix the CI (#<I>) * Fix CI
py
diff --git a/pypot/server/snap.py b/pypot/server/snap.py index <HASH>..<HASH> 100644 --- a/pypot/server/snap.py +++ b/pypot/server/snap.py @@ -4,6 +4,7 @@ import bottle import socket import re import logging +from ast import literal_eval as make_tuple from .server import AbstractServer from.httpserver import EnableCors @@ -90,7 +91,7 @@ class SnapRobotServer(AbstractServer): """ for m_settings in motors_register_value.split(';'): settings = m_settings.split(':') - rr.set_motor_register_value(settings[0], settings[1], float(settings[2])) + rr.set_motor_register_value(settings[0], settings[1], make_tuple(settings[2])) return 'Done!' # TODO : delete ?
Fix parsing register value in Snap! We were unable to set the pid register
py
diff --git a/safe_qgis/report/html_renderer.py b/safe_qgis/report/html_renderer.py index <HASH>..<HASH> 100644 --- a/safe_qgis/report/html_renderer.py +++ b/safe_qgis/report/html_renderer.py @@ -165,9 +165,9 @@ class HtmlRenderer(): QtCore.Qt.ScrollBarAlwaysOff) # noinspection PyUnresolvedReferences + self.htmlLoadedFlag = False self.webView.loadFinished.connect(self.html_loaded_slot) self.webView.setHtml(myHtml) - self.htmlLoadedFlag = False myTimeOut = 20 myCounter = 0 mySleepPeriod = 1
tentative fix for html_renderer tests hanging
py
diff --git a/sandboxapi/wildfire.py b/sandboxapi/wildfire.py index <HASH>..<HASH> 100644 --- a/sandboxapi/wildfire.py +++ b/sandboxapi/wildfire.py @@ -104,9 +104,10 @@ class WildFireAPI(sandboxapi.SandboxAPI): except (ValueError, IndexError) as e: raise sandboxapi.SandboxError(e) - def is_available(self) -> bool: + def is_available(self): """Checks to see if the WildFire sandbox is up and running. + :rtype: bool :return: True if the WildFire sandbox is responding, otherwise False. WildFire doesn't have an explicit endpoint for checking the sandbox status, so this is kind of a hack.
Removed an annotation for python 2 support.
py
diff --git a/metaseq/filetype_adapters.py b/metaseq/filetype_adapters.py index <HASH>..<HASH> 100644 --- a/metaseq/filetype_adapters.py +++ b/metaseq/filetype_adapters.py @@ -13,6 +13,7 @@ Subclasses must define make_fileobj(), which returns an object to be iterated over in __getitem__ """ from bx.bbi.bigbed_file import BigBedFile +from bx.bbi.bigwig_file import BigWigFile import numpy as np import subprocess import pysam @@ -124,6 +125,15 @@ class BigWigAdapter(BaseAdapter): "__getitem__ not implemented for %s" % self.__class__.__name__) def summarize(self, interval, bins=None): + bw = BigWigFile(open(self.fn)) + s = bw.get_as_array( + interval.chrom, + interval.start, + interval.stop,) + s[np.isnan(s)] = 0 + return s + + def ucsc_summarize(self, interval, bins=None): # if bins is none, then adaptively work something out...say, 100-bp # bins if bins is None:
use bx-python for bigwig summary
py
diff --git a/eemeter/ee/derivatives.py b/eemeter/ee/derivatives.py index <HASH>..<HASH> 100644 --- a/eemeter/ee/derivatives.py +++ b/eemeter/ee/derivatives.py @@ -108,7 +108,7 @@ def unpack(modeled_trace, baseline_label, reporting_label, normal_index, weather_normal_source) if hourly_trace_data is not None: normal_index = pd.date_range( - '2015-01-01', freq='H', periods=normalyear_periods, + '2015-01-01', freq='H', periods=365*24, tz=pytz.UTC) hourly_annualized_fixture = formatter.create_demand_fixture( normal_index, weather_normal_source) @@ -1309,7 +1309,7 @@ def normal_year_co2_avoided(deriv_input, resource_curve): _report_failed_derivative(series) return None - try: + if 1:# try: co2_by_load = avert.get_co2_by_load() load_by_hour = avert.get_load_by_hour() load_by_hour = load_by_hour[~( @@ -1335,6 +1335,6 @@ def normal_year_co2_avoided(deriv_input, resource_curve): 'value': [v for v in avoided_emissions.values], 'variance': [None for v in avoided_emissions.values] } - except: + else:#except: _report_failed_derivative(series) return None
Correct the length of the hourly fixture
py
diff --git a/airflow/hooks/base_hook.py b/airflow/hooks/base_hook.py index <HASH>..<HASH> 100644 --- a/airflow/hooks/base_hook.py +++ b/airflow/hooks/base_hook.py @@ -7,6 +7,8 @@ from airflow import settings from airflow.models import Connection from airflow.utils import AirflowException +CONN_ENV_PREFIX = 'AIRFLOW_CONN_' + class BaseHook(object): """ @@ -34,10 +36,11 @@ class BaseHook(object): return db def get_connection(self, conn_id): - if os.environ.get(conn_id): - temp_uri = urlparse(os.environ.get(conn_id)) - conn = Connection(uri=temp_uri) - if conn is None: + environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id) + conn = None + if environment_uri: + conn = Connection(uri=environment_uri) + else: conn = random.choice(self.get_connections(conn_id)) if conn.host: logging.info("Using connection to: " + conn.host)
fixed the base_hook in regards to env variable
py
diff --git a/bang/util.py b/bang/util.py index <HASH>..<HASH> 100644 --- a/bang/util.py +++ b/bang/util.py @@ -217,8 +217,6 @@ class S3Handler(BufferingHandler): def initialize_logging(config): multiprocessing.current_process().name = 'Stack' cfg = config.get(A.LOGGING, {}) - console_level = cfg.get(A.logging.CONSOLE_LEVEL, 'INFO') - log.setLevel(console_level) # log to s3 if there's a destination specified in the config bucket = cfg.get(A.logging.S3_BUCKET) @@ -267,7 +265,8 @@ def initialize_logging(config): formatter = logging.Formatter(CONSOLE_LOGGING_FORMAT) handler = logging.StreamHandler() # default stream is stderr handler.setFormatter(formatter) - handler.setLevel(logging.DEBUG) + console_level = cfg.get(A.logging.CONSOLE_LEVEL, 'INFO') + handler.setLevel(console_level) log.setLevel(logging.DEBUG) log.addHandler(handler) log.debug('Logging initialized.')
Set console logging level correctly. This resolves #<I>.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ def read(fname): setup( name='django-userprofiles', - version='0.3', + version='0.3.1', description='Registration, e-mail verifications and profiles.', long_description=read('README.rst'), author='Stephan Jaekel',
bumped version: <I>
py
diff --git a/tacl/catalogue.py b/tacl/catalogue.py index <HASH>..<HASH> 100644 --- a/tacl/catalogue.py +++ b/tacl/catalogue.py @@ -36,7 +36,7 @@ class Catalogue (dict): """ reader = csv.reader(open(path), delimiter=' ', skipinitialspace=True) for row in reader: - if len(row) == 2: + if len(row) > 1: self[row[0]] = row[1] def save (self, path):
Allowed more than two space-separated fields in a catalogue file, to avoid confusion with trailing spaces after a label.
py
diff --git a/ib_insync/contract.py b/ib_insync/contract.py index <HASH>..<HASH> 100644 --- a/ib_insync/contract.py +++ b/ib_insync/contract.py @@ -166,8 +166,13 @@ class Bond(Contract): class FuturesOption(Contract): __slots__ = () - def __init__(self, **kwargs): - Contract.__init__(self, secType='FOP', **kwargs) + def __init__(self, symbol='', lastTradeDateOrContractMonth='', + strike='', right='', exchange='', multiplier='', + currency='', **kwargs): + Contract.__init__(self, secType='FOP', symbol=symbol, + lastTradeDateOrContractMonth=lastTradeDateOrContractMonth, + strike=strike, right=right, exchange=exchange, + multiplier=multiplier, currency=currency, **kwargs) class MutualFund(Contract):
Issue #<I> FuturesOption constructor like Option
py
diff --git a/eth_account/messages.py b/eth_account/messages.py index <HASH>..<HASH> 100644 --- a/eth_account/messages.py +++ b/eth_account/messages.py @@ -128,7 +128,8 @@ def encode_structured_data( - text, as a json-encoded string - hexstr, as a hex-encoded (json-encoded) string - .. WARNING:: Note that this code has not gone through an external audit. + .. WARNING:: Note that this code has not gone through an external audit, and + the test cases are incomplete. Also, watch for updates to the format, as the EIP is still in DRAFT. :param primitive: the binary message to be signed
Add warning about missing tests for structed msgs The more I dig in, the more I think it's probably very broken.
py
diff --git a/salt/states/npm.py b/salt/states/npm.py index <HASH>..<HASH> 100644 --- a/salt/states/npm.py +++ b/salt/states/npm.py @@ -101,7 +101,7 @@ def installed(name, return ret else: installed_pkgs = dict((p.lower(), info) - for p, info in list(installed_pkgs.items())) + for p, info in installed_pkgs.items()) pkgs_satisfied = [] pkgs_to_install = []
List call not needed. Changing it back to what it was
py
diff --git a/lavalink/utils.py b/lavalink/utils.py index <HASH>..<HASH> 100644 --- a/lavalink/utils.py +++ b/lavalink/utils.py @@ -151,11 +151,12 @@ def decode_track(track: str) -> AudioTrack: 'identifier': identifier, 'isStream': is_stream, 'uri': uri, - 'isSeekable': not is_stream + 'isSeekable': not is_stream, + 'source': source } } - return AudioTrack(track_object, 0, source=source, position=position, encoder_version=version) + return AudioTrack(track_object, 0, position=position, encoder_version=version) # def encode_track(track: dict):
(AudioTrack) avoid placing source in extra.
py
diff --git a/solvebio/test/test_dataset.py b/solvebio/test/test_dataset.py index <HASH>..<HASH> 100644 --- a/solvebio/test/test_dataset.py +++ b/solvebio/test/test_dataset.py @@ -33,9 +33,8 @@ class DatasetTests(SolveBioTestCase): check_fields = set(['class_name', 'created_at', 'data_type', 'dataset', 'dataset_id', 'description', 'facets_url', - 'full_name', 'id', 'name', 'updated_at', - 'url']) + 'id', 'url']) self.assertSetEqual(set(dataset_field.keys()), check_fields) expected = """ @@ -86,7 +85,6 @@ class DatasetTests(SolveBioTestCase): 'tabulated dataset fields') def test_dataset_facets(self): - field = Dataset.retrieve(self.TEST_DATASET_NAME).fields('hgnc_id') + field = Dataset.retrieve(self.TEST_DATASET_NAME).fields('status') facets = field.facets() - self.assertTrue(facets.total >= 0, - 'facet should have an total field >= 0') + self.assertTrue(len(facets['values']) >= 0)
fix facet tests for new API response
py
diff --git a/src/hieroglyph/writer.py b/src/hieroglyph/writer.py index <HASH>..<HASH> 100644 --- a/src/hieroglyph/writer.py +++ b/src/hieroglyph/writer.py @@ -118,8 +118,13 @@ class BaseSlideTranslator(HTMLTranslator): def visit_title(self, node): if isinstance(node.parent, slide): - slide_level = node.attributes.get('level', self.section_level) - level = slide_level + self.initial_header_level - 1 + slide_level = node.parent.attributes.get( + 'level', + self.section_level) + level = max( + slide_level + self.initial_header_level - 1, + 1, + ) tag = 'h%s' % level self.body.append(self.starttag(node, tag, ''))
Fixed level calculation for slide directive titles.
py
diff --git a/post_office/admin.py b/post_office/admin.py index <HASH>..<HASH> 100644 --- a/post_office/admin.py +++ b/post_office/admin.py @@ -19,6 +19,7 @@ class LogInline(admin.StackedInline): class EmailAdmin(admin.ModelAdmin): list_display = ('to', 'subject', 'template', 'status', 'last_updated') inlines = [LogInline] + list_filter = ['status'] def queryset(self, request): return super(EmailAdmin, self).queryset(request).select_related('template')
Added status filter in EmailAdmin.
py
diff --git a/girder/api/v1/item.py b/girder/api/v1/item.py index <HASH>..<HASH> 100644 --- a/girder/api/v1/item.py +++ b/girder/api/v1/item.py @@ -45,7 +45,7 @@ class Item(Resource): @access.public(scope=TokenScope.DATA_READ) @filtermodel(model='item') @describeRoute( - Description('Search for an item by certain properties.') + Description('List or search for items.') .responseClass('Item') .param('folderId', "Pass this to list all items in a folder.", required=False)
api: Fix item endpoint description Description of collection and item endpoint are not consistent.
py
diff --git a/rllib/policy/torch_policy.py b/rllib/policy/torch_policy.py index <HASH>..<HASH> 100644 --- a/rllib/policy/torch_policy.py +++ b/rllib/policy/torch_policy.py @@ -4,7 +4,6 @@ import numpy as np import time from typing import Callable, Dict, List, Optional, Tuple, Type, Union -import ray from ray.rllib.models.modelv2 import ModelV2 from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper @@ -104,7 +103,7 @@ class TorchPolicy(Policy): """ self.framework = "torch" super().__init__(observation_space, action_space, config) - if torch.cuda.is_available() and ray.get_gpu_ids(): + if torch.cuda.is_available(): self.device = torch.device("cuda") else: self.device = torch.device("cpu")
[RLlib] Issue <I> TorchPolicy GPU. (#<I>)
py
diff --git a/gtkmvco/gtkmvc3/__init__.py b/gtkmvco/gtkmvc3/__init__.py index <HASH>..<HASH> 100644 --- a/gtkmvco/gtkmvc3/__init__.py +++ b/gtkmvco/gtkmvc3/__init__.py @@ -55,7 +55,7 @@ __all__ = ["Model", "TreeStoreModel", "ListStoreModel", "TextBufferModel", "observable", "observer", "adapters", # packages ] -__version = (3,0,0) +__version = (1,0,0) # visible classes from gtkmvc3.model import Model, TreeStoreModel, ListStoreModel, TextBufferModel
Changed version to be <I> Fixes issue #8
py
diff --git a/openquake/signalling.py b/openquake/signalling.py index <HASH>..<HASH> 100644 --- a/openquake/signalling.py +++ b/openquake/signalling.py @@ -53,7 +53,7 @@ def connect(): chn = conn.channel() # I use the vhost as a realm, which seems to be an arbitrary string chn.access_request(cfg['vhost'], active=False, read=True) - chn.exchange_declare(cfg['exchange'], 'topic', auto_delete=True) + chn.exchange_declare(cfg['exchange'], 'topic', auto_delete=False) return conn, chn @@ -76,7 +76,7 @@ def create_queue(job_id, levels, name=''): conn, chn = connect() - name, _, _ = chn.queue_declare(queue=name) + name, _, _ = chn.queue_declare(queue=name, auto_delete=False) for level in levels: chn.queue_bind(name, cfg['exchange'],
For queues and exchanges auto_delete=False Former-commit-id: aff8bbe4fcdba<I>ff<I>bac3d1a<I>f<I>dec<I>a
py
diff --git a/cumulusci/robotframework/Salesforce.py b/cumulusci/robotframework/Salesforce.py index <HASH>..<HASH> 100644 --- a/cumulusci/robotframework/Salesforce.py +++ b/cumulusci/robotframework/Salesforce.py @@ -295,8 +295,15 @@ class Salesforce(object): self.selenium.get_webelement(menu_locator).click() def _populate_field(self, locator, value): - self.selenium.set_focus_to_element(locator) + # clearing the field in a cross-browser, cross-platform + # way is surprisingly difficult. .clear() doesn't work in + # all browsers, and sending keyboard shortcuts doesn't work + # on all platforms. So, we'll take a belt-and-suspenders + # apprach and throw the whole arsenal at the problem. field = self.selenium.get_webelement(locator) + self.selenium.set_focus_to_element(locator) + field.clear() + self.selenium.driver.execute_script("arguments[0].value = '';", field) field.send_keys(Keys.HOME + Keys.SHIFT + Keys.END) field.send_keys(value)
Fix _populate_field to work on firefox (as well as other browsers) I didn't add any tests because we already have a test that fails on firefox without this fix (ui.robot:Populate Field)
py
diff --git a/rtpipe/RT.py b/rtpipe/RT.py index <HASH>..<HASH> 100644 --- a/rtpipe/RT.py +++ b/rtpipe/RT.py @@ -258,10 +258,12 @@ def dataflag(d, data_read): for flag in d['flaglist']: mode, sig, conv = flag - chans = n.arange(d['nchan']) # chans, pol are indices for splitting up work - for pol in range(d['npol']): - status = rtlib.dataflag(data_read, chans, pol, d, sig, mode, conv) - logger.info(status) + for ss in d['spw']: + chans = n.array([chan for chan in range(d['nchan']*ss/d['nspw'], d['nchan']*(ss+1)/d['nspw']) if chan in d['chans']]) +# chans = n.arange(d['nchan']) # chans, pol are indices for splitting up work + for pol in range(d['npol']): + status = rtlib.dataflag(data_read, chans, pol, d, sig, mode, conv) + logger.info(status) def dataflagatom(chans, pol, d, sig, mode, conv): """ Wrapper function to get shared memory as numpy array into pool
flags by spw channel groups to avoid whole-spw flagging
py
diff --git a/stacker/plan.py b/stacker/plan.py index <HASH>..<HASH> 100644 --- a/stacker/plan.py +++ b/stacker/plan.py @@ -52,8 +52,9 @@ class Step(object): return self._run_func(results, self.stack, status=self.status) def set_status(self, status): - logger.debug("Setting %s state to %s.", self.stack.name, status.name) - self.status = status + if status is not self.status: + logger.debug("Setting %s state to %s.", self.stack.name, status.name) + self.status = status def complete(self): self.set_status(COMPLETE)
Only set status if the status has changed
py
diff --git a/superset/views/core.py b/superset/views/core.py index <HASH>..<HASH> 100755 --- a/superset/views/core.py +++ b/superset/views/core.py @@ -2553,9 +2553,9 @@ class Superset(BaseSupersetView): ) session = db.session() - mydb = session.query(models.Database).filter_by(id=database_id).first() + mydb = session.query(models.Database).filter_by(id=database_id).one_or_none() if not mydb: - json_error_response( + return json_error_response( "Database with id {} is missing.".format(database_id), status=400 )
[superset] Fix, missing return on error and improved query (#<I>)
py
diff --git a/djangular/forms/angular_validation.py b/djangular/forms/angular_validation.py index <HASH>..<HASH> 100644 --- a/djangular/forms/angular_validation.py +++ b/djangular/forms/angular_validation.py @@ -40,6 +40,9 @@ class NgFormValidationMixin(NgFormBaseMixin): errors_function = getattr(VALIDATION_MAPPING_MODULE, 'Default_angular_errors') potential_errors = types.MethodType(errors_function, bound_field.field)() errors.append(SafeTuple((identifier, self.field_error_css_classes, '$dirty', '$valid', 'valid', ''))) # for valid fields + if bound_field.value(): + # valid bound fields shall display OK tick, even when in pristine state + errors.append(SafeTuple((identifier, self.field_error_css_classes, '$pristine', '$valid', 'valid', ''))) errors.extend([SafeTuple((identifier, self.field_error_css_classes, '$dirty', pe[0], 'invalid', force_text(pe[1]))) for pe in potential_errors]) return errors
Validation tick is added to bound field in pristine mode
py
diff --git a/poetry/puzzle/provider.py b/poetry/puzzle/provider.py index <HASH>..<HASH> 100644 --- a/poetry/puzzle/provider.py +++ b/poetry/puzzle/provider.py @@ -189,7 +189,7 @@ class Provider: try: venv.run("python", "setup.py", "egg_info") - egg_info = list(tmp_dir.glob("*.egg-info"))[0] + egg_info = next(tmp_dir.glob("**/*.egg-info")) meta = pkginfo.UnpackedSDist(str(egg_info))
GH-<I> - fix searching of .egg-info (#<I>)
py
diff --git a/spacy/util.py b/spacy/util.py index <HASH>..<HASH> 100644 --- a/spacy/util.py +++ b/spacy/util.py @@ -434,6 +434,27 @@ def compounding(start, stop, compound): curr *= compound +def stepping(start, stop, steps): + """Yield an infinite series of values that step from a start value to a + final value over some number of steps. Each step is (stop-start)/steps. + + After the final value is reached, the generator continues yielding that + value. + + EXAMPLE: + >>> sizes = stepping(1., 200., 100) + >>> assert next(sizes) == 1. + >>> assert next(sizes) == 1 * (200.-1.) / 100 + >>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100 + """ + def clip(value): + return max(value, stop) if (start > stop) else min(value, stop) + curr = float(start) + while True: + yield clip(curr) + curr += (stop - start) / steps + + def decaying(start, stop, decay): """Yield an infinite series of linearly decaying values.""" def clip(value):
Add a stepping function, for changing batch sizes or learning rates
py
diff --git a/salt/modules/zypper.py b/salt/modules/zypper.py index <HASH>..<HASH> 100644 --- a/salt/modules/zypper.py +++ b/salt/modules/zypper.py @@ -879,13 +879,19 @@ def clean_locks(): salt '*' pkg.clean_locks ''' - if not os.path.exists(LOCKS): - return False - - cmd = ('zypper --non-interactive cl') - __salt__['cmd.run'](cmd, output_loglevel='trace') + LCK = "removed" + out = {LCK: 0} + if not os.path.exists("/etc/zypp/locks"): + return out + + doc = dom.parseString(__salt__['cmd.run']('zypper --non-interactive -x cl', output_loglevel='trace')) + for node in doc.getElementsByTagName("message"): + text = node.childNodes[0].nodeValue.lower() + if text.startswith(LCK): + out[LCK] = text.split(" ")[1] + break - return True + return out def remove_lock(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument
Return an actual amount of removed locks.
py
diff --git a/pyrogram/methods/users/iter_profile_photos.py b/pyrogram/methods/users/iter_profile_photos.py index <HASH>..<HASH> 100644 --- a/pyrogram/methods/users/iter_profile_photos.py +++ b/pyrogram/methods/users/iter_profile_photos.py @@ -28,7 +28,7 @@ class IterProfilePhotos(Scaffold): chat_id: Union[int, str], offset: int = 0, limit: int = 0, - ) -> Optional[AsyncGenerator["types.Message", None]]: + ) -> Optional[AsyncGenerator["types.Photo", None]]: """Iterate through a chat or a user profile photos sequentially. This convenience method does the same as repeatedly calling :meth:`~pyrogram.Client.get_profile_photos` in a
Fix iter_profile_photos wrong hinted return type
py
diff --git a/test/asyncio_tests/test_asyncio_database.py b/test/asyncio_tests/test_asyncio_database.py index <HASH>..<HASH> 100644 --- a/test/asyncio_tests/test_asyncio_database.py +++ b/test/asyncio_tests/test_asyncio_database.py @@ -31,7 +31,7 @@ from test.asyncio_tests import (asyncio_test, from test.test_environment import host, port -class MotorDatabaseTest(AsyncIOTestCase): +class TestAsyncIODatabase(AsyncIOTestCase): @asyncio_test def test_database(self): # Test that we can create a db directly, not just get on from
MOTOR-<I> Rename test to TestAsyncIODatabase.
py
diff --git a/mesh_tensorflow/transformer/utils.py b/mesh_tensorflow/transformer/utils.py index <HASH>..<HASH> 100644 --- a/mesh_tensorflow/transformer/utils.py +++ b/mesh_tensorflow/transformer/utils.py @@ -2268,10 +2268,15 @@ def eval_model(estimator, summary = tf.Summary() targets = cached_targets[eval_dataset.name] metric_result = metric_fn(targets, predictions) - for metric_name, metric_value in metric_result.items(): - tag = "eval/{}/{}".format(eval_dataset.name, metric_name) - tf.logging.info("%s at step %d: %.3f", tag, global_step, metric_value) - summary.value.add(tag=tag, simple_value=metric_value) + if isinstance(metric_result, tf.Summary): + tf.logging.info("Precomputed summary at step %d", global_step) + summary_writer.add_summary(metric_result, global_step) + else: + for metric_name, metric_value in metric_result.items(): + tag = "eval/{}/{}".format(eval_dataset.name, metric_name) + tf.logging.info("%s at step %d: %.3f", tag, global_step, + metric_value) + summary.value.add(tag=tag, simple_value=metric_value) summary_writer.add_summary(summary, global_step) summary_writer.flush()
Modified the `eval_model` function in mesh_tensorflow/transformer/utils.py to accept Summary protos in addition to tag-to-scalar dicts. PiperOrigin-RevId: <I>
py
diff --git a/kdtree/kdtree.py b/kdtree/kdtree.py index <HASH>..<HASH> 100644 --- a/kdtree/kdtree.py +++ b/kdtree/kdtree.py @@ -247,6 +247,7 @@ class KDNode(Node): tmp_l, tmp_r = self.left, self.right self.left, self.right = root.left, root.right root.left, root.right = tmp_l if tmp_l is not root else self, tmp_r if tmp_r is not root else self + self.axis, root.axis = root.axis, self.axis if max_p is not self:
also swap Node's axis when swapping pos in tree
py
diff --git a/src/foremast/pipeline/create_pipeline.py b/src/foremast/pipeline/create_pipeline.py index <HASH>..<HASH> 100644 --- a/src/foremast/pipeline/create_pipeline.py +++ b/src/foremast/pipeline/create_pipeline.py @@ -127,6 +127,7 @@ class SpinnakerPipeline: 'slack': slack, 'root_volume_size': root_volume_size, 'ami_template_file': ami_template_file, + 'pipeline': self.settings['pipeline'] }, 'id': pipeline_id }
pass all pipeline data to wrapper creation
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ setup( long_description=read('README.md'), long_description_content_type='text/markdown', license='BSD', - keywords='ews exchange autodiscover outlook o365 office365', + keywords='ews exchange autodiscover microsoft outlook exchange-web-services o365 office365', install_requires=['requests>=2.7', 'requests_ntlm>=0.2.0', 'dnspython>=1.14.0', 'pytz', 'lxml>3.0', 'cached_property', 'future', 'six', 'tzlocal', 'python-dateutil', 'pygments', 'defusedxml', 'isodate', 'base64io'],
Actually, just align with the keywords on GitHub
py
diff --git a/gphoto2/gphoto2.py b/gphoto2/gphoto2.py index <HASH>..<HASH> 100644 --- a/gphoto2/gphoto2.py +++ b/gphoto2/gphoto2.py @@ -158,7 +158,8 @@ class Directory(object): with open(local_path, 'rb') as fp: lib.gp_file_new_from_fd(camerafile_p, fp.fileno()) lib.gp_camera_folder_put_file( - self._cam, self.path, os.path.basename(local_path), + self._cam, bytes(self.path) + b"/", + bytes(os.path.basename(local_path)), backend.FILE_TYPES['normal'], camerafile_p[0], self._ctx) def __eq__(self, other):
Pass path variables as bytestrings into Directory.upload
py
diff --git a/spikeextractors/sortingextractor.py b/spikeextractors/sortingextractor.py index <HASH>..<HASH> 100644 --- a/spikeextractors/sortingextractor.py +++ b/spikeextractors/sortingextractor.py @@ -135,12 +135,8 @@ class SortingExtractor(ABC, BaseExtractor): raise ValueError("feature values should have the same length as the spike train") else: if isinstance(feature_name, str) and len(value) == len(indexes): - indexes = np.array(indexes) - indexes_sorted_indices = np.argsort(indexes) - value_sorted = value[indexes_sorted_indices] - indexes_sorted = indexes[indexes_sorted_indices] - self._features[unit_id][feature_name] = value_sorted - self._features[unit_id][feature_name + '_idxs'] = indexes_sorted + self._features[unit_id][feature_name] = value + self._features[unit_id][feature_name + '_idxs'] = indexes else: if not isinstance(feature_name, str): raise ValueError("feature_name must be a string")
Preserve memmap type when setting features!
py
diff --git a/scripts/lib/mha_config_helper.py b/scripts/lib/mha_config_helper.py index <HASH>..<HASH> 100644 --- a/scripts/lib/mha_config_helper.py +++ b/scripts/lib/mha_config_helper.py @@ -1,7 +1,7 @@ import ConfigParser class MHA_config_helper(object): - CONFIG_PATH = "/usr/local/mha-helper/conf/global.conf.sample" + CONFIG_PATH = "/usr/local/mha-helper/conf/global.conf" IP = "/sbin/ip" ARPING = "/sbin/arping" SSH = "/usr/bin/ssh"
Correct the global config path in config_helper
py
diff --git a/ccmlib/node.py b/ccmlib/node.py index <HASH>..<HASH> 100644 --- a/ccmlib/node.py +++ b/ccmlib/node.py @@ -1233,3 +1233,13 @@ class Node(): common.replace_in_file(dst,'CASSANDRA_PARAMS=',' $env:CASSANDRA_PARAMS=\'-Dcassandra' + # -Dcassandra ' -Dlogback.configurationFile=/"\' + "$env:CASSANDRA_CONF" + \'/logback.xml"\'' + # -Dlogback.configurationFile=/"$env:CASSANDRA_CONF/logback.xml" ' + \' -Dcassandra.config=file:"\' + "///$env:CASSANDRA_CONF" + \'/cassandra.yaml"\'') # -Dcassandra.config=file:"///$env:CASSANDRA_CONF/cassandra.yaml" + + def get_conf_option(self, option): + conf_file = os.path.join(self.get_conf_dir(), common.CASSANDRA_CONF) + with open(conf_file, 'r') as f: + data = yaml.load(f) + + if option in data: + return data[option] + else: + return None
Add function for easy parsing of node's yaml file
py
diff --git a/pyimgur/request.py b/pyimgur/request.py index <HASH>..<HASH> 100644 --- a/pyimgur/request.py +++ b/pyimgur/request.py @@ -92,9 +92,12 @@ def send_request(url, params=None, method='GET', data_field='data', if data_field is not None: content = content[data_field] if not resp.ok: - error_msg = "Imgur ERROR message: {}".format(content['error']) - print(error_msg) - print("-" * len(error_msg)) + try: + error_msg = "Imgur ERROR message: {}".format(content['error']) + print(error_msg) + print("-" * len(error_msg)) + except Exception: + pass resp.raise_for_status() ratelimit_info = {key: int(value) for (key, value) in resp.headers.items() if key.startswith('x-ratelimit')}
Wrap imgur error in try... except If the response isn't json with a error key, such as a <I> bad request, then the error handling would error out.
py
diff --git a/sos/report/plugins/foreman_installer.py b/sos/report/plugins/foreman_installer.py index <HASH>..<HASH> 100644 --- a/sos/report/plugins/foreman_installer.py +++ b/sos/report/plugins/foreman_installer.py @@ -25,6 +25,8 @@ class ForemanInstaller(Plugin, DebianPlugin, UbuntuPlugin): "/etc/foreman-installer/*", "/var/log/foreman-installer/*", "/var/log/foreman-maintain/*", + "/var/lib/foreman-maintain/data.yml", + "/etc/foreman-maintain/foreman_maintain.yml", # specifically collect .applied files # that would be skipped otherwise as hidden files "/etc/foreman-installer/scenarios.d/*/.applied",
[foreman-installer] collect foreman-maintain config and storage data data.yml keeps status of foreman-maintain activitis in progress. foreman_maintain.yml contains foreman-maintain config. Resolves: #<I>
py
diff --git a/lib/devpipeline_configure/cache.py b/lib/devpipeline_configure/cache.py index <HASH>..<HASH> 100644 --- a/lib/devpipeline_configure/cache.py +++ b/lib/devpipeline_configure/cache.py @@ -116,6 +116,9 @@ class _CachedComponent: def __iter__(self): return _CachedComponetKeys(self._component) + def __contains__(self, item): + return item in self._component + class _CachedComponentIterator: # pylint: disable=too-few-public-methods
Implement __contains__ method for component cache
py
diff --git a/wsdiscovery/threaded.py b/wsdiscovery/threaded.py index <HASH>..<HASH> 100644 --- a/wsdiscovery/threaded.py +++ b/wsdiscovery/threaded.py @@ -210,7 +210,14 @@ class NetworkingThread(_StoppableDaemonThread): self._seqnum += 1 else: for sock in list(self._multiOutUniInSockets.values()): - sock.sendto(data, (msg.getAddr(), msg.getPort())) + try: + sock.sendto(data, (msg.getAddr(), msg.getPort())) + except OSError as e: + # sendto will fail for interfaces that do not support multicast or are not up. + # An example of the first case is a wireguard vpn interface. + # In either just log as debug and ignore the error. + logger.debug("Interface for %s does not support multicast or is not UP.\n\tOSError %s", + socket.inet_ntoa(sock.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, 4)), e) if self._capture: self._capture.write("%i SEND %s:%s\n" % (self._seqnum, msg.getAddr(), msg.getPort())) self._capture.write(data.decode("utf-8") + "\n")
Handle case where sendto fails on interface that does not support it. This can be because the interface is a VPN tunnel or is DOWN.
py
diff --git a/playhouse/postgres_ext.py b/playhouse/postgres_ext.py index <HASH>..<HASH> 100644 --- a/playhouse/postgres_ext.py +++ b/playhouse/postgres_ext.py @@ -272,8 +272,9 @@ class TSVectorField(IndexedFieldMixin, TextField): def Match(field, query, language=None): params = (language, query) if language is not None else (query,) + field_params = (language, field) if language is not None else (field,) return Expression( - fn.to_tsvector(field), + fn.to_tsvector(*field_params), OP.TS_MATCH, fn.to_tsquery(*params))
Fix Postgres extension's full text search Match the field should also be converted to to_tsvector with language
py
diff --git a/voikko-fi/common/generate_lex_common.py b/voikko-fi/common/generate_lex_common.py index <HASH>..<HASH> 100644 --- a/voikko-fi/common/generate_lex_common.py +++ b/voikko-fi/common/generate_lex_common.py @@ -208,7 +208,7 @@ def write_entry(main_vocabulary,vocabulary_files,word, entry): def get_options(): try: optlist = ["min-frequency=", "extra-usage=", "style=", "destdir=", "no-baseform", "sourceid", "vanhat", "sukija", "sukija-ys"] - (opts, args) = getopt.getopt(sys.argv[1:], "", optlist) + (opts, args) = getopt.getopt([f for f in sys.argv[1:] if f.startswith("--")], "", optlist) except getopt.GetoptError: sys.stderr.write("Invalid option list for %s\n" % sys.argv[0]) sys.exit(1)
Fix option parsing after changes in ab<I>a8
py
diff --git a/python/src/nnabla/utils/converter/onnx/exporter.py b/python/src/nnabla/utils/converter/onnx/exporter.py index <HASH>..<HASH> 100644 --- a/python/src/nnabla/utils/converter/onnx/exporter.py +++ b/python/src/nnabla/utils/converter/onnx/exporter.py @@ -2454,7 +2454,8 @@ class OnnxExporter: def Broadcast(self, func): # Convert Constant+Expand nl = [] - shape = func.broadcast_param.shape.dim + shape = [self._batch_size if d < + 0 else d for d in func.broadcast_param.shape.dim] # Constant constant_newshape = fork_name("constant") c = generate_constant(constant_newshape, func.name + "_shape",
Fix Broadcast for ONNX Exporter.
py
diff --git a/jarn/mkrelease/mkrelease.py b/jarn/mkrelease/mkrelease.py index <HASH>..<HASH> 100644 --- a/jarn/mkrelease/mkrelease.py +++ b/jarn/mkrelease/mkrelease.py @@ -19,7 +19,7 @@ from setuptools import Setuptools from scp import SCP from scm import SCMFactory from urlparser import URLParser -from exit import msg_exit, err_exit +from exit import msg_exit, err_exit, warn MAXALIASDEPTH = 23 @@ -84,7 +84,8 @@ class Defaults(object): def getboolean(section, key, default=None): if parser.has_option(section, key): - return parser.getboolean(section, key) + try: return parser.getboolean(section, key) + except ValueError, e: warn(e) return default main_section = 'mkrelease'
Catch ValueErrors raised by ConfigParser.
py
diff --git a/salt/runners/jobs.py b/salt/runners/jobs.py index <HASH>..<HASH> 100644 --- a/salt/runners/jobs.py +++ b/salt/runners/jobs.py @@ -204,6 +204,33 @@ def list_jobs(ext_source=None, ''' List all detectable jobs and associated functions + ext_source + The external job cache to use. Default: `None`. + + search_metadata + Search the metadata of a job for the provided string of dictionary. + Default: 'None'. + + search_function + Search the function of a job for the provided string. + Default: 'None'. + + search_target + Search the target of a job for the provided minion name. + Default: 'None'. + + start_time + Search for jobs where the start time of the job is greater than + or equal to the provided time stamp. Any timestamp supported + by the Dateutil (required) module can be used. + Default: 'None'. + + end_time + Search for jobs where the start time of the job is less than + or equal to the provided time stamp. Any timestamp supported + by the Dateutil (required) module can be used. + Default: 'None'. + CLI Example: .. code-block:: bash
Adding some docs for list_jobs parameters.
py
diff --git a/pycvodes/_config.py b/pycvodes/_config.py index <HASH>..<HASH> 100644 --- a/pycvodes/_config.py +++ b/pycvodes/_config.py @@ -1,4 +1,4 @@ env = { 'LAPACK': 'lapack', - 'SUNDIALS_LIBS': 'sundials_cvodes,sundials_nvecserial', + 'SUNDIALS_LIBS': 'sundials_cvodes,sundials_nvecserial,m', }
Add 'm' to libs
py
diff --git a/hairball/__init__.py b/hairball/__init__.py index <HASH>..<HASH> 100644 --- a/hairball/__init__.py +++ b/hairball/__init__.py @@ -8,7 +8,7 @@ from hairball.plugins import PluginController __version__ = '0.1a' -class ScratchAnalysis(object): +class Hairball(object): def __init__(self, argv): self.plugins = [] description = ('PATH can be either the path to a scratch file, or a ' @@ -62,6 +62,7 @@ class ScratchAnalysis(object): module_name = '{0}.{1}'.format(package, module_name) try: module = __import__(module_name, fromlist=[class_name]) + # Initializes the plugin by calling its constructor plugin = getattr(module, class_name)() # Verify plugin is of the correct class @@ -105,7 +106,7 @@ class ScratchAnalysis(object): def main(): - sa = ScratchAnalysis(sys.argv[1:]) - sa.initialize_plugins() - sa.process() - sa.finalize() + hairball = Hairball(sys.argv[1:]) + hairball.initialize_plugins() + hairball.process() + hairball.finalize()
The main object is called Hairball, duh.
py
diff --git a/astroplan/core.py b/astroplan/core.py index <HASH>..<HASH> 100644 --- a/astroplan/core.py +++ b/astroplan/core.py @@ -462,10 +462,7 @@ class Observer(object): Returns the lower and upper limits on the time and altitudes of the horizon crossing. """ - alt = Latitude(alt) - - if len(np.shape(alt)) == 1: - alt = alt[np.newaxis, :] + alt = np.atleast_2d(Latitude(alt)) n_targets = alt.shape[0] if rise_set == 'rising':
Using np.atleast_2d because I want to be slick like @adrn
py
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index <HASH>..<HASH> 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -633,28 +633,28 @@ class FrameApply(Apply): obj = self.obj axis = self.axis - # TODO: Avoid having to change state - self.obj = self.obj if self.axis == 0 else self.obj.T - self.axis = 0 - - result = None try: - result = super().agg() + if axis == 1: + result = FrameRowApply( + obj.T, + self.orig_f, + self.raw, + self.result_type, + self.args, + self.kwargs, + ).agg() + result = result.T if result is not None else result + else: + result = super().agg() except TypeError as err: exc = TypeError( "DataFrame constructor called with " f"incompatible data and dtype: {err}" ) raise exc from err - finally: - self.obj = obj - self.axis = axis - - if axis == 1: - result = result.T if result is not None else result if result is None: - result = self.obj.apply(self.orig_f, axis, args=self.args, **self.kwargs) + result = obj.apply(self.orig_f, axis, args=self.args, **self.kwargs) return result
CLN: Don't modify state in FrameApply.agg (#<I>)
py
diff --git a/pyorbital/orbital.py b/pyorbital/orbital.py index <HASH>..<HASH> 100644 --- a/pyorbital/orbital.py +++ b/pyorbital/orbital.py @@ -111,11 +111,15 @@ def get_observer_look(sat_lon, sat_lat, sat_alt, utc_time, lon, lat, alt): az_ = np.arctan(-top_e / top_s) - if hasattr(az_, 'chunks'): - # dask array + if hasattr(az_, 'chunks') and not hasattr(az_, 'loc'): + # dask array, but not xarray import dask.array as da az_ = da.where(top_s > 0, az_ + np.pi, az_) az_ = da.where(az_ < 0, az_ + 2 * np.pi, az_) + elif hasattr(az_, 'loc'): + # xarray + az_.data[top_s > 0] += np.pi + az_.data[az_.data < 0] += 2 * np.pi else: az_[top_s > 0] += np.pi az_[az_ < 0] += 2 * np.pi
Support xarray in get_observer_look get_observer_look was erroneously turning dask.array into xarray dataarrays.. However, the standard 'else:'-block did not work for xarray dataarrays. either. Add a special case 'elif:'-block for xarray dataarrays. Fixes #<I>.
py
diff --git a/tests/unit/test_commands.py b/tests/unit/test_commands.py index <HASH>..<HASH> 100644 --- a/tests/unit/test_commands.py +++ b/tests/unit/test_commands.py @@ -582,11 +582,20 @@ class TestCommands(object): """ query = '|en que no desea la nueva pregunta' res = pmxbot.translate(c, e, '#test', 'testrunner', query) + assert 'new question' in res.lower() query = 'es|en que no desea la nueva pregunta' res = pmxbot.translate(c, e, '#test', 'testrunner', query) + assert 'new question' in res.lower() + + @pytest.has_internet + def test_translate_invalid_lang(self): + """ + An invalid language should give a nice error message. + """ # sp is not a language invalid_query = 'sp|en que no desea la nueva pregunta' - res = pmxbot.translate(c, e, '#test', 'testrunner', query) + res = pmxbot.translate(c, e, '#test', 'testrunner', invalid_query) + assert 'are you sure' in res.lower() def test_excuse(self): import excuses
Updated test_translate to actually fail when translation is failing.
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,7 +19,6 @@ import sphinx_rtd_theme import okcupyd -os.environ['PYTHON_EGG_CACHE'] = '.' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here.
Remove dumb attempt to set environment variable for rtd.
py
diff --git a/visidata/shell.py b/visidata/shell.py index <HASH>..<HASH> 100644 --- a/visidata/shell.py +++ b/visidata/shell.py @@ -124,7 +124,7 @@ class DirSheet(DeferredSaveSheet): rowtype = 'files' # rowdef: Path columns = [ DeferredSetColumn('directory', - getter=lambda col,row: row.parent.relpath(col.sheet.source.resolve()), + getter=lambda col,row: row.parent.relpath(col.sheet.source), setter=lambda col,row,val: col.sheet.moveFile(row, val)), DeferredSetColumn('filename', getter=lambda col,row: row.name + row.ext,
[Bugfix DirSheet] Since commit <I>dd4, an unresolved path is expected - DirSheet was not picking up the directories of files
py
diff --git a/salt/cloud/clouds/msazure.py b/salt/cloud/clouds/msazure.py index <HASH>..<HASH> 100644 --- a/salt/cloud/clouds/msazure.py +++ b/salt/cloud/clouds/msazure.py @@ -497,7 +497,7 @@ def create(vm_): vm_['name'], exc.message ), # Show the traceback if the debug logging level is enabled - exc_info=log.isEnabledFor(logging.DEBUG) + exc_info_on_loglevel=logging.DEBUG ) return False
Use `exc_info_on_loglevel` instead of `exc_info`
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -10,11 +10,11 @@ setup( "Topic :: Software Development :: Libraries :: Python Modules" ], description="CS50 library for Python", - install_requires=["Flask>=1.0", "SQLAlchemy", "sqlparse", "termcolor"], + install_requires=["Flask>=1.0", "SQLAlchemy", "sqlparse", "termcolor", "wheel"], keywords="cs50", name="cs50", package_dir={"": "src"}, packages=["cs50"], url="https://github.com/cs50/python-cs50", - version="8.0.0" + version="8.0.1" )
added wheel, fixes #<I>
py
diff --git a/pkutils.py b/pkutils.py index <HASH>..<HASH> 100644 --- a/pkutils.py +++ b/pkutils.py @@ -34,7 +34,7 @@ from functools import total_ordering import semver -__version__ = '0.13.3' +__version__ = '0.13.4' __title__ = 'pkutils' __author__ = 'Reuben Cummings'
Bump to version <I>
py
diff --git a/claripy/vsa/strided_interval.py b/claripy/vsa/strided_interval.py index <HASH>..<HASH> 100644 --- a/claripy/vsa/strided_interval.py +++ b/claripy/vsa/strided_interval.py @@ -48,7 +48,7 @@ def normalize_types(f): # Make sure the `reversed` property of self is kept the same after operation if self._reversed: self_reversed = True - self = self.copy() + self = self._reverse() self._reversed = False else:
fix a (possible) bug in the strided-interval normalizer
py
diff --git a/openquake/calculators/classical.py b/openquake/calculators/classical.py index <HASH>..<HASH> 100644 --- a/openquake/calculators/classical.py +++ b/openquake/calculators/classical.py @@ -164,6 +164,7 @@ class PSHACalculator(base.HazardCalculator): else: tiles = [self.sitecol] param = dict(truncation_level=oq.truncation_level, imtls=oq.imtls) + maxweight = None for tile_i, tile in enumerate(tiles, 1): num_tasks = 0 num_sources = 0 @@ -171,7 +172,9 @@ class PSHACalculator(base.HazardCalculator): logging.info('Prefiltering tile %d of %d', tile_i, len(tiles)) src_filter = SourceFilter(tile, oq.maximum_distance) csm = self.csm.filter(src_filter) - if tile_i == 1: # set it only on the first tile + if csm.weight == 0: # the tile was completely filtered out + continue + if maxweight is None: # set maxweight only once maxweight = csm.get_maxweight(tasks_per_tile) logging.info('Using maxweight=%d', maxweight) if csm.has_dupl_sources and not opt:
Fixed the maxweight logic [skip hazardlib] [demos]
py
diff --git a/spacy/_ml.py b/spacy/_ml.py index <HASH>..<HASH> 100644 --- a/spacy/_ml.py +++ b/spacy/_ml.py @@ -227,6 +227,8 @@ def drop_layer(layer, factor=2.): def Tok2Vec(width, embed_size, pretrained_dims=0): + if pretrained_dims is None: + pretrained_dims = 0 cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH] with Model.define_operators({'>>': chain, '|': concatenate, '**': clone, '+': add}): norm = HashEmbed(width, embed_size, column=cols.index(NORM), name='embed_norm')
Avoid comparison to None in Tok2Vec
py
diff --git a/pymysql/connections.py b/pymysql/connections.py index <HASH>..<HASH> 100644 --- a/pymysql/connections.py +++ b/pymysql/connections.py @@ -485,10 +485,10 @@ class EOFPacketWrapper(object): self.packet = from_packet from_packet.advance(1) - self.warning_count = from_packet.read(2) - server_status = struct.unpack('<h', self.packet.read(2))[0] + self.warning_count = struct.unpack('<h', from_packet.read(2))[0] + self.server_status = struct.unpack('<h', self.packet.read(2))[0] if DEBUG: print("server_status=", server_status) - self.has_next = server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS + self.has_next = self.server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS def __getattr__(self, key): if hasattr(self.packet, key):
Fix warning_count is not int.
py