diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/tools/clean.py b/tools/clean.py index <HASH>..<HASH> 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -18,7 +18,15 @@ Only really intended to be used by internal build scripts. """ import os +import sys + try: os.remove("pymongo/_cbson.so") except: pass + +try: + from pymongo import _cbson + sys.exit("could still import _cbson") +except ImportError: + pass
minor: clean script fails if cleaning fails
py
diff --git a/pyinfra/api/util.py b/pyinfra/api/util.py index <HASH>..<HASH> 100644 --- a/pyinfra/api/util.py +++ b/pyinfra/api/util.py @@ -372,10 +372,9 @@ class get_file_io(object): @property def cache_key(self): - if hasattr(self.filename_or_io, 'read'): - return id(self.filename_or_io) - - else: + # If we're a filename, cache against that - we don't cache in-memory + # file objects. + if isinstance(self.filename_or_io, six.string_types): return self.filename_or_io @@ -385,9 +384,10 @@ def get_file_sha1(filename_or_io): ''' file_data = get_file_io(filename_or_io) + cache_key = file_data.cache_key - if file_data.cache_key in FILE_SHAS: - return FILE_SHAS[file_data.cache_key] + if cache_key and cache_key in FILE_SHAS: + return FILE_SHAS[cache_key] with file_data as file_io: hasher = sha1() @@ -401,7 +401,10 @@ def get_file_sha1(filename_or_io): buff = file_io.read(BLOCKSIZE) digest = hasher.hexdigest() - FILE_SHAS[file_data.cache_key] = digest + + if cache_key: + FILE_SHAS[cache_key] = digest + return digest
Only cache calculated SHA1s for on-disk files (nothing in-memory).
py
diff --git a/allauth/account/forms.py b/allauth/account/forms.py index <HASH>..<HASH> 100644 --- a/allauth/account/forms.py +++ b/allauth/account/forms.py @@ -32,7 +32,7 @@ class PasswordField(forms.CharField): app_settings.PASSWORD_INPUT_RENDER_VALUE) kwargs['widget'] = forms.PasswordInput(render_value=render_value, attrs={'placeholder': - _('Password')}) + _(kwargs.get("label"))}) super(PasswordField, self).__init__(*args, **kwargs)
Fix placeholder of "Password (again)" fields Both fields "Password" and "Password (again)" passed same placeholder (Password) to forms.PasswordInput, even with the label changing correctly. This commit fix that behavior using the label of Password field also as placeholder.
py
diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index <HASH>..<HASH> 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -443,6 +443,9 @@ class Schedule(object): config_dir, os.path.dirname(self.opts.get('default_include', salt.config.DEFAULT_MINION_OPTS['default_include']))) + if salt.utils.is_proxy(): + # each proxy will have a separate _schedule.conf file + minion_d_dir = os.path.join(minion_d_dir, self.opts['proxyid']) if not os.path.isdir(minion_d_dir): os.makedirs(minion_d_dir)
Save _schedule.conf under <minion ID> dir So each proxy can have its own scheduled functions, independently.
py
diff --git a/pythran/spec.py b/pythran/spec.py index <HASH>..<HASH> 100644 --- a/pythran/spec.py +++ b/pythran/spec.py @@ -107,8 +107,9 @@ class SpecParser: p[0] = eval(p[1]) def p_error(self, p): - err = SyntaxError("Invalid Pythran spec near '" + str(p.value) + "'") - err.lineno = p.lineno + p_val = p.value if p else '' + err = SyntaxError("Invalid Pythran spec near '" + str(p_val) + "'") + err.lineno = self.lexer.lineno if self.input_file: err.filename = self.input_file raise err;
Fix error when no parenthesis in export statement The previous, verbose code actually crashed when for example someone forgets to put parenthesis: def test(): ... In that case the p token passed to p_error is None, and so it crashes when trying to access p.value
py
diff --git a/beaver/config.py b/beaver/config.py index <HASH>..<HASH> 100644 --- a/beaver/config.py +++ b/beaver/config.py @@ -376,6 +376,8 @@ class FileConfig(): self._logger.debug('Skipping glob due to no files found: %s' % filename) continue + config = self._gen_config(config) + for globbed_file in globs: files[os.path.realpath(globbed_file)] = config
ensure all sections have the proper values on start
py
diff --git a/webapi_tests/certapp/certapp.py b/webapi_tests/certapp/certapp.py index <HASH>..<HASH> 100644 --- a/webapi_tests/certapp/certapp.py +++ b/webapi_tests/certapp/certapp.py @@ -62,7 +62,7 @@ class CertAppMixin(object): self.assertTrue("certtest" in self.marionette.get_url()) # Request that screen never dims or switch off. Acquired wake locks - # are implicitly released # when the window object is closed or + # are implicitly released when the window object is closed or # destroyed. self.marionette.execute_script(""" var wakeLock = window.navigator.requestWakeLock("screen");
fixup! Bug <I> - Turn on screen, unlock screen, and disable dimming
py
diff --git a/piwik/indico_piwik/plugin.py b/piwik/indico_piwik/plugin.py index <HASH>..<HASH> 100644 --- a/piwik/indico_piwik/plugin.py +++ b/piwik/indico_piwik/plugin.py @@ -94,8 +94,8 @@ class PiwikPlugin(IndicoPlugin): self.register_js_bundle('jqtree_js', 'js/lib/jqTree/tree.jquery.js') self.register_css_bundle('jqtree_css', 'js/lib/jqTree/jqtree.css') - def track_download(self, attachment, user, **kwargs): - if not self.settings.get('enabled_for_downloads'): + def track_download(self, attachment, from_preview, **kwargs): + if from_preview or not self.settings.get('enabled_for_downloads'): return if attachment.type == AttachmentType.link: resource_url = attachment.link_url
Piwik: Do not track attachment downloads from preview We already tracked it when opening the preview
py
diff --git a/wfdb/io/record.py b/wfdb/io/record.py index <HASH>..<HASH> 100644 --- a/wfdb/io/record.py +++ b/wfdb/io/record.py @@ -2738,7 +2738,7 @@ def rdrecord(record_name, sampfrom=0, sampto=None, channels=None, # Read the header fields if (pn_dir is not None) and ('.' not in pn_dir): - dir_list = pn_dir.split(os.sep) + dir_list = pn_dir.split('/') pn_dir = posixpath.join(dir_list[0], get_version(dir_list[0]), *dir_list[1:]) if record_name.endswith('.edf'):
Fixes download on Windows #<I> Fixes URL path error generated on Windows machines while trying to read and download certain content. Fixes #<I>.
py
diff --git a/py/selenium/webdriver/remote/webdriver.py b/py/selenium/webdriver/remote/webdriver.py index <HASH>..<HASH> 100644 --- a/py/selenium/webdriver/remote/webdriver.py +++ b/py/selenium/webdriver/remote/webdriver.py @@ -45,9 +45,10 @@ class WebDriver(object): command_executor - Either a command.CommandExecutor object or a string that specifies the URL of a remote server to send commands to. desired_capabilities - Dictionary holding predefined values for starting - a browser - browser_profile: A browser profile directory as a Base64-encoded - zip file. Only used if Firefox is requested. + a browser + browser_profile: + A selenium.webdriver.firefox.firefox_profile.FirefoxProfile + object. Only used if Firefox is requested. """ if desired_capabilities is None: raise WebDriverException("Desired Capabilities can't be None") @@ -88,8 +89,9 @@ class WebDriver(object): version: Which browser version to request. platform: Which platform to request the browser on. javascript_enabled: Whether the new session should support JavaScript. - browser_profile: A browser profile directory as a Base64-encoded - zip file. Only used if Firefox is requested. + browser_profile: + A selenium.webdriver.firefox.firefox_profile.FirefoxProfile + object. Only used if Firefox is requested. """ if browser_profile: desired_capabilities['firefox_profile'] = browser_profile.encoded
JasonLeyba: Updating documentation about the expected type of browser_profile. r<I>
py
diff --git a/extras/appengine/sqlformat/legacy.py b/extras/appengine/sqlformat/legacy.py index <HASH>..<HASH> 100644 --- a/extras/appengine/sqlformat/legacy.py +++ b/extras/appengine/sqlformat/legacy.py @@ -105,7 +105,18 @@ def _get_examples(): def _get_sql(data, files=None): sql = None if files is not None and 'datafile' in files: - sql = files['datafile'].read().decode('utf-8') + raw = files['datafile'].read() + try: + sql = raw.decode('utf-8') + except UnicodeDecodeError, err: + logging.error(err) + logging.debug(repr(raw)) + sql = (u'-- UnicodeDecodeError: %s\n' + u'-- Please make sure to upload UTF-8 encoded data for now.\n' + u'-- If you want to help improving this part of the application\n' + u'-- please file a bug with some demo data at:\n' + u'-- http://code.google.com/p/python-sqlparse/issues/entry\n' + u'-- Thanks!\n' % err) if not sql: sql = data.get('data') return sql or ''
Silence UnicodeDecodeErrors, but try to collect some demo data.
py
diff --git a/rux/pdf.py b/rux/pdf.py index <HASH>..<HASH> 100644 --- a/rux/pdf.py +++ b/rux/pdf.py @@ -38,13 +38,7 @@ class PDFGenerator(object): def __init__(self): self.commands = ['wkhtmltopdf', - '-', - # '--quiet', # Be less verbose - '--page-size', # Set paper size to: A4 - 'A4', - '--outline', - '--outline-depth', # Set the depth of the outline - '2',] + '-',] self.config = config.default self.blog = blog self.author = author
command pdf add osx support
py
diff --git a/insights/client/client.py b/insights/client/client.py index <HASH>..<HASH> 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -363,6 +363,7 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): upload = pconn.upload_archive(tar_file, content_type, collection_duration) if upload.status_code in (200, 202): + write_to_disk(constants.lastupload_file) msg_name = determine_hostname(config.display_name) logger.info("Successfully uploaded report for %s.", msg_name) if config.register:
client: write lastupload during platform upload (#<I>)
py
diff --git a/salt/config/__init__.py b/salt/config/__init__.py index <HASH>..<HASH> 100644 --- a/salt/config/__init__.py +++ b/salt/config/__init__.py @@ -2218,7 +2218,6 @@ def include_config(include, orig_path, verbose, exit_on_config_errors=False): main config file. ''' # Protect against empty option - if not include: return {} @@ -3835,10 +3834,10 @@ def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_con defaults['default_include']) include = overrides.get('include', []) - overrides.update(include_config(default_include, path, verbose=False), - exit_on_config_errors=exit_on_config_errors) - overrides.update(include_config(include, path, verbose=True), - exit_on_config_errors=exit_on_config_errors) + overrides.update(include_config(default_include, path, verbose=False, + exit_on_config_errors=exit_on_config_errors)) + overrides.update(include_config(include, path, verbose=True, + exit_on_config_errors=exit_on_config_errors)) opts = apply_master_config(overrides, defaults) _validate_ssh_minion_opts(opts) _validate_opts(opts)
Fix incorrect invocation of salt.config.include_config This was causing exit_on_config_errors to not be passed through, and adding an `exit_on_config_errors` key to the master config.
py
diff --git a/pyvista/plotting/qt_plotting.py b/pyvista/plotting/qt_plotting.py index <HASH>..<HASH> 100644 --- a/pyvista/plotting/qt_plotting.py +++ b/pyvista/plotting/qt_plotting.py @@ -688,6 +688,7 @@ class BackgroundPlotter(QtInteractor): self.render_timer.timeout.connect(self.render) self.app_window.signal_close.connect(self.render_timer.stop) self.render_timer.start(twait) + self._first_time = False def _close_callback(self): """Make sure a screenhsot is acquired before closing."""
Set the first_time variable to enable render (#<I>)
py
diff --git a/skiski/ski.py b/skiski/ski.py index <HASH>..<HASH> 100644 --- a/skiski/ski.py +++ b/skiski/ski.py @@ -1,6 +1,12 @@ from helper import Typename +class VirtualCurry: + + def __b__(self, x): + return self.dot(x) + + class I(metaclass=Typename("I")): """ the identity operator
refrect __b__ to dot
py
diff --git a/py/testdir_single_jvm/test_GBM_many_cols_enum.py b/py/testdir_single_jvm/test_GBM_many_cols_enum.py index <HASH>..<HASH> 100644 --- a/py/testdir_single_jvm/test_GBM_many_cols_enum.py +++ b/py/testdir_single_jvm/test_GBM_many_cols_enum.py @@ -136,7 +136,7 @@ class Basic(unittest.TestCase): # GBM(train iterate)**************************************** h2o.beta_features = True - ntrees = 100 + ntrees = 10 for max_depth in [5,10,20,40]: params = { 'learn_rate': .2,
Drop from <I> trees to <I> so the test finishes.
py
diff --git a/cmd2.py b/cmd2.py index <HASH>..<HASH> 100755 --- a/cmd2.py +++ b/cmd2.py @@ -319,7 +319,8 @@ def get_paste_buffer(): """ pb_str = pyperclip.paste() - if six.PY2: + # If value returned from the clipboard is unicode and this is Python 2, convert to a "normal" Python 2 string first + if six.PY2 and not isinstance(pb_str, str): import unicodedata pb_str = unicodedata.normalize('NFKD', pb_str).encode('ascii', 'ignore')
Minor attempt at ruggedization of clipboard stuff in some weird cases on Python 2
py
diff --git a/beanstalkc.py b/beanstalkc.py index <HASH>..<HASH> 100644 --- a/beanstalkc.py +++ b/beanstalkc.py @@ -185,7 +185,7 @@ class Connection(object): ['NOT_FOUND']) -class Job: +class Job(object): def __init__(self, conn, jid, body, reserved=True): self.conn = conn self.jid = jid
Minor nit: make Job inherit from object
py
diff --git a/bakery/management/commands/build.py b/bakery/management/commands/build.py index <HASH>..<HASH> 100644 --- a/bakery/management/commands/build.py +++ b/bakery/management/commands/build.py @@ -221,14 +221,16 @@ Will use settings.BUILD_DIR by default." # If it is one we want to gzip... else: - # ... let the world know... - logger.debug("Gzipping %s" % filename) + # ... work out the file path ... + f_name = os.path.join(dest_path, filename) + + # ... let the world know ... + logger.debug("Gzipping %s" % f_name) if self.verbosity > 1: - self.stdout.write("Gzipping %s" % filename) + self.stdout.write("Gzipping %s" % f_name) # ... create the new file in the build directory ... f_in = open(og_file, 'rb') - f_name = os.path.join(dest_path, filename) # ... copy the file to gzip compressed output ... if float(sys.version[:3]) >= 2.7:
More logging for #<I>
py
diff --git a/ruuvitag_sensor/decoder.py b/ruuvitag_sensor/decoder.py index <HASH>..<HASH> 100644 --- a/ruuvitag_sensor/decoder.py +++ b/ruuvitag_sensor/decoder.py @@ -17,10 +17,16 @@ def get_decoder(data_type): object: Data decoder """ if data_type == 2: + log.warning("DATA TYPE 2 IS OBSOLETE. UPDATE YOUR TAG") + # https://github.com/ruuvi/ruuvi-sensor-protocols/blob/master/dataformat_04.md return UrlDecoder() if data_type == 4: + log.warning("DATA TYPE 4 IS OBSOLETE. UPDATE YOUR TAG") + # https://github.com/ruuvi/ruuvi-sensor-protocols/blob/master/dataformat_04.md return UrlDecoder() if data_type == 3: + log.warning("DATA TYPE 3 IS DEPRECATED - UPDATE YOUR TAG") + # https://github.com/ruuvi/ruuvi-sensor-protocols/blob/master/dataformat_03.md return Df3Decoder() return Df5Decoder()
Add deprecation warnings for outdated protocols (#<I>) As of <I> days ago, Ruuvi marked older protocols as obsolete or deprecated.
py
diff --git a/oceansdb/cars.py b/oceansdb/cars.py index <HASH>..<HASH> 100644 --- a/oceansdb/cars.py +++ b/oceansdb/cars.py @@ -155,6 +155,16 @@ class CARS_var_nc(object): self.load_dims(dims=['lat', 'lon', 'depth']) self.set_keys() + def __getitem__(self, item): + """ + !!!ATENTION!!! Need to improve this. + cars_data() should be modified to be used when loading ncs with source, thus avoiding the requirement on this getitem but running transparent. + """ + if item == 'mn': + return cars_data(self.ncs[0]) + else: + return self.ncs[0].variables[item] + def keys(self): return self.KEYS
Temporary solution to getitem for CARS.
py
diff --git a/openquake/risk/job/general.py b/openquake/risk/job/general.py index <HASH>..<HASH> 100644 --- a/openquake/risk/job/general.py +++ b/openquake/risk/job/general.py @@ -218,8 +218,10 @@ class RiskJobMixin(mixins.Mixin): Generates the tuples (point, asset) for all assets known to this job that are contained in grid. - point is a point of the grid - asset is a dict representing an asset + :returns: tuples (point, asset) where: + * point is a :py:class:`openquake.shapes.Point` on the grid + + * asset is a :py:class:`dict` representing an asset """ for point in grid:
Commented grid_assets Former-commit-id: <I>da<I>fcec3e<I>cbc<I>fbc6f<I>e<I>f<I>f<I>
py
diff --git a/pocket.py b/pocket.py index <HASH>..<HASH> 100644 --- a/pocket.py +++ b/pocket.py @@ -1,4 +1,5 @@ import requests +import json from functools import wraps @@ -79,7 +80,7 @@ def bulk_wrapper(fn): 'actions': [query], } payload.update(self._payload) - return self._make_request(url, payload) + return self._make_request(url, json.dumps(payload), headers={'content-type': 'application/json'}) return wrapped @@ -119,11 +120,13 @@ class Pocket(object): @classmethod def _make_request(cls, url, payload, headers=None): print payload + print url r = requests.post(url, data=payload, headers=headers) if r.status_code > 399: error_msg = cls.statuses.get(r.status_code) extra_info = r.headers.get('X-Error') + print r.headers raise EXCEPTIONS.get(r.status_code, PocketException)( '%s. %s' % (error_msg, extra_info) ) @@ -267,7 +270,7 @@ class Pocket(object): payload.update(self._payload) self._bulk_query = [] - return self._make_request(url, payload) + return self._make_request(url, json.dumps(payload), headers={'content-type': 'application/json'}) @classmethod def get_request_token(
added headers to bulk request for some reason, calls to /v3/send needed application/json content type headers, else would raise <I>
py
diff --git a/blockstore/lib/operations/preorder.py b/blockstore/lib/operations/preorder.py index <HASH>..<HASH> 100644 --- a/blockstore/lib/operations/preorder.py +++ b/blockstore/lib/operations/preorder.py @@ -185,7 +185,8 @@ def parse(bin_payload): return { 'opcode': 'NAME_PREORDER', 'preorder_name_hash': name_hash, - 'consensus_hash': consensus_hash + 'consensus_hash': consensus_hash, + 'quantity': 1 }
Add implicit quantity field to preorder (= 1 for single preorder)
py
diff --git a/src/urh/dev/gr/SpectrumThread.py b/src/urh/dev/gr/SpectrumThread.py index <HASH>..<HASH> 100644 --- a/src/urh/dev/gr/SpectrumThread.py +++ b/src/urh/dev/gr/SpectrumThread.py @@ -30,6 +30,9 @@ class SpectrumThread(AbstractBaseThread): while not self.isInterruptionRequested(): try: rcvd += recv(32768) # Receive Buffer = 32768 Byte + except zmq.error.Again: + # timeout + continue except (zmq.error.ContextTerminated, ConnectionResetError): self.stop("Stopped receiving, because connection was reset") return
fix crash for spectrum analyzer with GR backend (#<I>)
py
diff --git a/airflow/models/trigger.py b/airflow/models/trigger.py index <HASH>..<HASH> 100644 --- a/airflow/models/trigger.py +++ b/airflow/models/trigger.py @@ -24,6 +24,7 @@ from airflow.models.base import Base from airflow.models.taskinstance import TaskInstance from airflow.triggers.base import BaseTrigger from airflow.utils import timezone +from airflow.utils.retries import run_with_db_retries from airflow.utils.session import provide_session from airflow.utils.sqlalchemy import ExtendedJSON, UtcDateTime from airflow.utils.state import State @@ -88,9 +89,11 @@ class Trigger(Base): (triggers have a one-to-many relationship to both) """ # Update all task instances with trigger IDs that are not DEFERRED to remove them - session.query(TaskInstance).filter( - TaskInstance.state != State.DEFERRED, TaskInstance.trigger_id.isnot(None) - ).update({TaskInstance.trigger_id: None}) + for attempt in run_with_db_retries(): + with attempt: + session.query(TaskInstance).filter( + TaskInstance.state != State.DEFERRED, TaskInstance.trigger_id.isnot(None) + ).update({TaskInstance.trigger_id: None}) # Get all triggers that have no task instances depending on them... ids = [ trigger_id
Handle occasional deadlocks in trigger with retries (#<I>) Fixes: #<I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -35,8 +35,8 @@ setup( # 'Development Status :: 1 - Planning', # 'Development Status :: 2 - Pre-Alpha', # 'Development Status :: 3 - Alpha', - 'Development Status :: 4 - Beta', - # 'Development Status :: 5 - Production/Stable', + # 'Development Status :: 4 - Beta', + 'Development Status :: 5 - Production/Stable', # 'Development Status :: 6 - Mature', # 'Development Status :: 7 - Inactive', 'Operating System :: OS Independent',
Upgrade development status to Production/Stable
py
diff --git a/djangoratings/models.py b/djangoratings/models.py index <HASH>..<HASH> 100644 --- a/djangoratings/models.py +++ b/djangoratings/models.py @@ -13,6 +13,12 @@ class Vote(models.Model): class Meta: unique_together = (('content_type', 'object_id', 'key', 'user', 'ip_address')) + def partial_ip_address(self): + ip = self.ip_address.split('.') + ip[-1] = 'xxx' + return '.'.join(ip) + partial_ip_address = property(partial_ip_address) + class Score(models.Model): content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField()
Added partial_ip_address property to Vote model
py
diff --git a/ipyrad/core/assembly.py b/ipyrad/core/assembly.py index <HASH>..<HASH> 100644 --- a/ipyrad/core/assembly.py +++ b/ipyrad/core/assembly.py @@ -1024,7 +1024,24 @@ class Assembly(object): else: ## index the reference sequence ## Allow force to reindex the reference sequence - index_reference_sequence(self, force) + ## send to run on the cluster. + lbview = ipyclient.load_balanced_view() + async = lbview.apply(index_reference_sequence, *(self, force)) + + ## print a progress bar for the indexing + start = time.time() + while 1: + elapsed = datetime.timedelta(seconds=int(time.time()-start)) + printstr = " {} | {} | s3 |".format("indexing reference", elapsed) + finished = int(async.ready()) + progressbar(1, finished, printstr, spacer=self._spacer) + if finished: + print("") + break + time.sleep(0.9) + ## error check + if not async.successful(): + raise IPyradWarningExit(async.result()) ## Get sample objects from list of strings samples = _get_samples(self, samples)
added a progress bar tracker for reference indexing
py
diff --git a/tests/unit/utils/test_ssdp.py b/tests/unit/utils/test_ssdp.py index <HASH>..<HASH> 100644 --- a/tests/unit/utils/test_ssdp.py +++ b/tests/unit/utils/test_ssdp.py @@ -13,6 +13,7 @@ from tests.support.mock import ( from salt.utils import ssdp import datetime +from salt.ext.six.moves import zip try: import pytest
Lintfix: W<I> (Python3 incompatibility)
py
diff --git a/src/you_get/extractors/universal.py b/src/you_get/extractors/universal.py index <HASH>..<HASH> 100644 --- a/src/you_get/extractors/universal.py +++ b/src/you_get/extractors/universal.py @@ -67,6 +67,13 @@ def universal_download(url, output_dir='.', merge=True, info_only=False, **kwarg urls += re.findall(r'href="(https?://[^"]+\.png)"', page) urls += re.findall(r'href="(https?://[^"]+\.gif)"', page) + # MPEG-DASH MPD + mpd_urls = re.findall(r'src="(https?://[^"]+\.mpd)"', page) + for mpd_url in mpd_urls: + cont = get_content(mpd_url) + base_url = r1(r'<BaseURL>(.*)</BaseURL>', cont) + urls += [ r1(r'(.*/)[^/]*', mpd_url) + base_url ] + # have some candy! candies = [] i = 1
[universal] quick & dirty support for MPD files (with BaseURL)
py
diff --git a/mapillary_tools/commands/video_process_and_upload.py b/mapillary_tools/commands/video_process_and_upload.py index <HASH>..<HASH> 100644 --- a/mapillary_tools/commands/video_process_and_upload.py +++ b/mapillary_tools/commands/video_process_and_upload.py @@ -147,6 +147,8 @@ class Command: vars_args = vars(args) if "geotag_source" in vars_args and vars_args["geotag_source"] == 'blackvue_videos' and ("device_make" not in vars_args or ("device_make" in vars_args and not vars_args["device_make"])): vars_args["device_make"] = "Blackvue" + if "device_make" in vars_args and vars_args["device_make"] == 'Blackvue': + vars_args["duplicate_angle"] = "360" sample_video(**({k: v for k, v in vars_args.iteritems() if k in inspect.getargspec(sample_video).args}))
fix: add last blackvue fix to video_process_and_upload_command
py
diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index <HASH>..<HASH> 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -272,6 +272,8 @@ class Span(object): elif 400 <= http_status < 500: if http_status == 403: self.set_status("permission_denied") + elif http_status == 404: + self.set_status("not_found") elif http_status == 429: self.set_status("resource_exhausted") elif http_status == 413:
fix(tracing): Handle <I>
py
diff --git a/salt/states/boto_secgroup.py b/salt/states/boto_secgroup.py index <HASH>..<HASH> 100644 --- a/salt/states/boto_secgroup.py +++ b/salt/states/boto_secgroup.py @@ -376,7 +376,7 @@ def _rules_present( _source_group_name = rule.get('source_group_name', None) if _source_group_name: _group_id = __salt__['boto_secgroup.get_group_id']( - _source_group_name, vpc_id, vpc_id, region, key, keyid, profile + _source_group_name, vpc_id, vpc_name, region, key, keyid, profile ) if not _group_id: msg = ('source_group_name {0} does not map to a valid'
One last bug to squash. Seriously. It's the last one. Ever! - fixed param vpc_id being passed where vpc_name was intended.
py
diff --git a/tests/test_tester/test_core.py b/tests/test_tester/test_core.py index <HASH>..<HASH> 100644 --- a/tests/test_tester/test_core.py +++ b/tests/test_tester/test_core.py @@ -931,4 +931,6 @@ def test_peek_bitwise(target, simulator, capsys): disp_type="realtime") out, _ = capsys.readouterr() - assert out.splitlines()[-2] == "_*****_*" + # vcs prints extra lines at end + offset = 8 if simulator == "vcs" else 2 + assert out.splitlines()[-offset] == "_*****_*"
Add logics for vcs extra print lines
py
diff --git a/src/pyrobase/fmt.py b/src/pyrobase/fmt.py index <HASH>..<HASH> 100644 --- a/src/pyrobase/fmt.py +++ b/src/pyrobase/fmt.py @@ -133,6 +133,9 @@ def to_unicode(text): try: # Try UTF-8 first return text.decode("UTF-8") + except AttributeError: # non-text types have no "decode()" + # Try to return a text representation + return (unicode if PY2 else str)(text) except UnicodeError: try: # Then Windows Latin-1
to_unicode: try to convert non-text objects to their text representation
py
diff --git a/kibitzr/bash.py b/kibitzr/bash.py index <HASH>..<HASH> 100644 --- a/kibitzr/bash.py +++ b/kibitzr/bash.py @@ -82,6 +82,13 @@ class WindowsExecutor(BashExecutor): EXECUTABLE = "cmd.exe" ARGS = ["/Q", "/C"] + @classmethod + def run_scipt(cls, name, stdin): + return BashExecutor.run_scipt( + name, + stdin.decode("utf-8"), + ) + def ensure_text(text): if not isinstance(text, six.text_type):
Fix pbs encoding for Windows and Python 3
py
diff --git a/aldryn_boilerplates/__init__.py b/aldryn_boilerplates/__init__.py index <HASH>..<HASH> 100644 --- a/aldryn_boilerplates/__init__.py +++ b/aldryn_boilerplates/__init__.py @@ -4,7 +4,7 @@ from __future__ import unicode_literals, absolute_import __version__ = '0.3' __author__ = 'Stefan Foulis' __license__ = 'BSD' -__copyright__ = "Copyright 2014, Divio Aldryn Ltd" +__copyright__ = "Copyright 2015, Divio Aldryn Ltd" __maintainer__ = 'Stefan Foulis' __email__ = '[email protected]' __url__ = 'https://github.com/aldryn/aldryn-boilerplates/'
update copyright year to <I>
py
diff --git a/bin/noninteractive-alignment-panel.py b/bin/noninteractive-alignment-panel.py index <HASH>..<HASH> 100755 --- a/bin/noninteractive-alignment-panel.py +++ b/bin/noninteractive-alignment-panel.py @@ -224,7 +224,7 @@ if __name__ == '__main__': print('\n'.join(titlesAlignments.sortTitles('maxScore'))) sys.exit(0) - alignmentPanel(titlesAlignments, sortOn=args.sortOn, interactive=True, + alignmentPanel(titlesAlignments, sortOn=args.sortOn, interactive=False, outputDir=args.outputDir, idList=parseColors(args.color) if args.color else None, equalizeXAxes=args.equalizeXAxes, xRange=args.xRange,
Set interactive to False in call from noninteractive-alignment-panel.py
py
diff --git a/protos/scope/javagen/extension.py b/protos/scope/javagen/extension.py index <HASH>..<HASH> 100644 --- a/protos/scope/javagen/extension.py +++ b/protos/scope/javagen/extension.py @@ -13,6 +13,7 @@ from hob.template import TextGenerator from hob.utils import _ import hob import os +import re import shutil import sys @@ -135,7 +136,12 @@ def applyOptions(package, config): updateOptions(config.get(service.name + '.options', element.__name__)) for (item, value) in options.get('Package').iteritems(): - package.options[item] = OptionValue(None, raw='"' + value + '"') + # hob doesn't encapsulate FooBar with quotes, forcing this behaviour. + # But it should not encapsulate enums, such as SPEED. + if re.match(r'^[A-Z]+$', value): + package.options[item] = OptionValue(value) + else: + package.options[item] = OptionValue(None, raw='"' + value + '"') if options.get('package_name'): package.name = options.get('package_name')
Workaround for hob not encapsulating camelized words
py
diff --git a/respite/serializers/base.py b/respite/serializers/base.py index <HASH>..<HASH> 100644 --- a/respite/serializers/base.py +++ b/respite/serializers/base.py @@ -166,7 +166,7 @@ class Serializer(object): if isinstance(anything, dict): return serialize_dictionary(anything) - if isinstance(anything, list): + if isinstance(anything, (list, set)): return serialize_list(anything) if isinstance(anything, django.db.models.query.DateQuerySet):
Add support for set-serialization
py
diff --git a/py/h2o.py b/py/h2o.py index <HASH>..<HASH> 100644 --- a/py/h2o.py +++ b/py/h2o.py @@ -1756,6 +1756,8 @@ class H2O(object): 'source': data_key, # this is ignore?? 'cols': None, + 'ignored_cols': None, + 'validation': None, 'response': None, 'activation': None, 'hidden': None,
still fails. does NeuralNet.json exist?
py
diff --git a/tests/test_extensions/test_redis.py b/tests/test_extensions/test_redis.py index <HASH>..<HASH> 100644 --- a/tests/test_extensions/test_redis.py +++ b/tests/test_extensions/test_redis.py @@ -2,7 +2,9 @@ from __future__ import unicode_literals, division, print_function, absolute_import from nose.tools import eq_ +from nose.plugins.skip import SkipTest from marrow.wsgi.objects.request import LocalRequest +from redis.exceptions import ConnectionError from redis import StrictRedis from web.core.application import Application @@ -12,9 +14,14 @@ def insert_data_controller(context): context.redis.set('bar', 'baz') -class TestMongoDBExtension(object): +class TestRedisExtension(object): def setup(self): self.connection = StrictRedis(db='testdb') + try: + self.connection.ping() + except ConnectionError: + raise SkipTest('No Redis server available') + self.config = { 'extensions': { 'redis': {
Fixed test class name and made the test skippable if no Redis server is present.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -16,4 +16,15 @@ setup(name='chwrapper', zip_safe=False, install_requires=[ 'requests==2.9.1', + ], + classifiers=[ + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", ])
Add classifiers to setup.py
py
diff --git a/mama_cas/response.py b/mama_cas/response.py index <HASH>..<HASH> 100644 --- a/mama_cas/response.py +++ b/mama_cas/response.py @@ -90,7 +90,7 @@ class ValidationResponse(XmlResponseBase): auth_failure.set('code', error.code) auth_failure.text = error.msg - return etree.tostring(service_response) + return etree.tostring(service_response, encoding='UTF-8', method='xml') def get_attribute_elements(self, attributes): """ @@ -186,4 +186,4 @@ class ProxyResponse(XmlResponseBase): proxy_failure.set('code', error.code) proxy_failure.text = error.msg - return etree.tostring(service_response) + return etree.tostring(service_response, encoding='UTF-8', method='xml')
Output CAS <I> XML responses as UTF-8
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ setup( author = 'Jonathan Friedman, Eugene Yurtsev', author_email = '[email protected]', url = 'https://gorelab.bitbucket.org/flowcytometrytools', - download_url = 'https://bitbucket.org/gorelab/flowcytometrytools/get/v{0}.zip'.format(version), + download_url = 'https://github.com/eyurtsev/FlowCytometryTools/archive/v{0}.zip'.format(version), keywords = ['flow cytometry', 'data analysis', 'cytometry', 'single cell'], license='MIT', #dependency_links = ['https://bitbucket.org/gorelab/goreutilities/get/v{0}.zip#egg=GoreUtilities-{0}'.format(gore_utilities_version)],
updating package download location to use github
py
diff --git a/neurondm/neurondm/models/apinat_npo.py b/neurondm/neurondm/models/apinat_npo.py index <HASH>..<HASH> 100644 --- a/neurondm/neurondm/models/apinat_npo.py +++ b/neurondm/neurondm/models/apinat_npo.py @@ -81,10 +81,20 @@ def main(): log.error(f'bad data for {c} {s} {p} {o}') raise e + problems = ('8a', '8v', 'sstom-6', 'keast-2', 'sdcol-k', 'sdcol-l') + def eff(n): + return bool([x for x in problems + if x in n.id_ and '20' not in n.id_]) + config = Config('apinat-simple-sheet') + sigh = [] nrns = [] for id, phenos in dd.items(): n = NeuronApinatSimple(*phenos, id_=id) + if eff(n): + n._sigh() # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX FIXME figure out why this is not getting called internally + sigh.append(n) + nrns.append(n) config.write()
apinat npo hardcode fix for _sigh failures amusingly I named the variable sigh before I knew that _sigh not being called was the issue
py
diff --git a/src/moneyed/classes.py b/src/moneyed/classes.py index <HASH>..<HASH> 100644 --- a/src/moneyed/classes.py +++ b/src/moneyed/classes.py @@ -336,7 +336,7 @@ TJS = add_currency('TJS', '972', 'Somoni', ['TAJIKISTAN']) TMM = add_currency('TMM', '795', 'Manat', ['TURKMENISTAN']) TND = add_currency('TND', '788', 'Tunisian Dinar', ['TUNISIA']) TOP = add_currency('TOP', '776', 'Paanga', ['TONGA']) -TRY = add_currency('TRY', '949', 'New Turkish Lira', ['TURKEY']) +TRL = add_currency('TRL', '949', 'Turkish Lira', ['TURKEY']) TTD = add_currency('TTD', '780', 'Trinidad and Tobago Dollar', ['TRINIDAD AND TOBAGO']) TVD = add_currency('TVD', 'Nil', 'Tuvalu dollar', ['TUVALU']) TWD = add_currency('TWD', '901', 'New Taiwan Dollar', ['TAIWAN'])
TRY (New Lira) turned back into TRL (Lira) in <I> December <I>. New Lira was a temporary currency.
py
diff --git a/thumbor/engines/pil.py b/thumbor/engines/pil.py index <HASH>..<HASH> 100644 --- a/thumbor/engines/pil.py +++ b/thumbor/engines/pil.py @@ -169,6 +169,9 @@ class Engine(BaseEngine): else: options['qtables'] = qtables_config + if ext == '.png': + options['optimize'] = True + if options['quality'] is None: options['quality'] = self.context.config.QUALITY @@ -185,7 +188,6 @@ class Engine(BaseEngine): try: if ext == '.webp': if self.image.mode not in ['RGB', 'RGBA']: - mode = None if self.image.mode == 'P': mode = 'RGBA' else:
Optimize png images as well
py
diff --git a/test/testserver.py b/test/testserver.py index <HASH>..<HASH> 100755 --- a/test/testserver.py +++ b/test/testserver.py @@ -42,9 +42,6 @@ def start(port, debug): port=8765, debug=False, error_reporter=test_crash_reporter, - jwt_secret=os.environ.get("KLUE_JWT_SECRET"), - jwt_audience=os.environ.get("KLUE_JWT_AUDIENCE"), - jwt_issuer=os.environ.get("KLUE_JWT_ISSUER"), ) api.load_apis('.', include_crash_api=True) api.start(serve="crash")
And remove jwt arguments from testserver as well
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ A drop in replacement for Python's datetime module which cares deeply about time if sys.version[:3] < '3.0': data['install_requires'].append('pytz >= 2007g') - data['install_requires'].append('python-dateutil >= 1.4, < 2.0') + data['install_requires'].append('python-dateutil >= 1.4') else: data['install_requires'].append('pytz >= 2011g') data['install_requires'].append('python-dateutil >= 2.0')
Drop requirement for python-dateutil<<I>
py
diff --git a/scot/ooapi.py b/scot/ooapi.py index <HASH>..<HASH> 100644 --- a/scot/ooapi.py +++ b/scot/ooapi.py @@ -306,6 +306,8 @@ class Workspace: self.var_model_ = None self.var_cov_ = None self.connectivity_ = None + self.mixmaps_ = [] + self.unmixmaps_ = [] def fit_var(self): """ Fit a var model to the source activations.
Fixed source removal to invalidate topo maps
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -51,7 +51,7 @@ master_doc = "index" # General information about the project. project = "Datasette" -copyright = "2017-2021, Simon Willison" +copyright = "2017-2022, Simon Willison" author = "Simon Willison" # Disable -- turning into –
Updated copyright years in documentation footer
py
diff --git a/LiSE/LiSE/alchemy.py b/LiSE/LiSE/alchemy.py index <HASH>..<HASH> 100644 --- a/LiSE/LiSE/alchemy.py +++ b/LiSE/LiSE/alchemy.py @@ -425,15 +425,6 @@ def indices_for_table_dict(table): table['senses'].c.sense ), Index( - 'travel_reqs_idx', - table['travel_reqs'].c.character - ), - Index( - 'things_idx', - table['things'].c.character, - table['things'].c.thing - ), - Index( 'avatars_idx', table['avatars'].c.character_graph, table['avatars'].c.avatar_graph,
Clean up alchemy, get rid of dead db code There were a bunch of queries to fetch the *current, effective* state of game entities. These are probably useful for testing, and once helped me clarify my thinking on the data model. But they haven't been used in years and have no reason to exist in production. I might bring them back in unit tests later.
py
diff --git a/tests/test_integrations_py2.py b/tests/test_integrations_py2.py index <HASH>..<HASH> 100644 --- a/tests/test_integrations_py2.py +++ b/tests/test_integrations_py2.py @@ -4,6 +4,7 @@ except ImportError: from io import StringIO from datetime import timedelta import pytest +import sys import asyncio from tornado import gen from tornado import ioloop
Ooops. Forgot import.
py
diff --git a/zipline/finance/performance.py b/zipline/finance/performance.py index <HASH>..<HASH> 100644 --- a/zipline/finance/performance.py +++ b/zipline/finance/performance.py @@ -508,7 +508,7 @@ class PerformancePeriod(object): return int(base * round(float(x) / base)) def calculate_positions_value(self): - return np.vdot(self._position_amounts, self._position_last_sale_prices) + return np.dot(self._position_amounts, self._position_last_sale_prices) def update_last_sale(self, event): is_trade = event.type == zp.DATASOURCE_TYPE.TRADE
Uses numpy.dot instead numpy.vdot to calculate positions value. Since the position amount and price ndarrays are one dimensional and use real numbers, we do not need the overhead of the extra case handling provided by numpy.vdot, which comes at a cost of performance. With
py
diff --git a/osmnx/footprints.py b/osmnx/footprints.py index <HASH>..<HASH> 100644 --- a/osmnx/footprints.py +++ b/osmnx/footprints.py @@ -1,6 +1,7 @@ """Download and plot footprints from OpenStreetMap.""" import geopandas as gpd +import numpy as np from shapely.geometry import LineString from shapely.geometry import MultiPolygon from shapely.geometry import Polygon @@ -114,6 +115,10 @@ def _create_footprints_gdf( # Convert footprints dictionary to a GeoDataFrame gdf = gpd.GeoDataFrame.from_dict(footprints, orient="index") + if "geometry" not in gdf.columns: + # if there is no geometry column, create a null column + gdf["geometry"] = np.nan + gdf.set_geometry("geometry") gdf.crs = settings.default_crs # filter the gdf to only include valid Polygons/MultiPolygons if retain_invalid is False
ensure footprints gdf has a geometry col before assigning crs
py
diff --git a/pages/placeholders.py b/pages/placeholders.py index <HASH>..<HASH> 100644 --- a/pages/placeholders.py +++ b/pages/placeholders.py @@ -1,5 +1,4 @@ """Placeholder module, that's where the smart things happen.""" - from pages.widgets_registry import get_widget from pages import settings from pages.models import Content @@ -18,7 +17,7 @@ from django.utils.safestring import mark_safe from django.utils.text import unescape_string_literal from django.template.loader import render_to_string from django.template import RequestContext - +from django.core.files.uploadedfile import UploadedFile import logging import os import time @@ -282,7 +281,7 @@ class ImagePlaceholderNode(PlaceholderNode): filename = '' if change and data: # the image URL is posted if not changed - if type(data) is str: + if not isinstance(data, UploadedFile): return filename = get_filename(page, self, data) @@ -321,7 +320,7 @@ class FilePlaceholderNode(PlaceholderNode): filename = '' if change and data: # the image URL is posted if not changed - if type(data) is str: + if not isinstance(data, UploadedFile): return filename = get_filename(page, self, data)
test the data for what type we are looking for
py
diff --git a/core/src/rabird/core/string.py b/core/src/rabird/core/string.py index <HASH>..<HASH> 100644 --- a/core/src/rabird/core/string.py +++ b/core/src/rabird/core/string.py @@ -4,6 +4,12 @@ ''' def cstring_encode(text): + ''' + Encode the input text as a c style string . + + Convert something like "\" to "\\", new line symbol to "\n", + carriage return symbol to "\r", etc. + ''' result = [] convert_table = {'\\':'\\\\', '\n':'\\n', '\r':'\\r', '\t':'\\t', '"':'\\"', "'":"\\'"}
Added comments about cstring_encode()
py
diff --git a/pysat/instruments/icon_ivm.py b/pysat/instruments/icon_ivm.py index <HASH>..<HASH> 100644 --- a/pysat/instruments/icon_ivm.py +++ b/pysat/instruments/icon_ivm.py @@ -61,6 +61,7 @@ sat_ids = {'a': [''], _test_dates = {'a': {'': dt.datetime(2020, 1, 1)}, 'b': {'': dt.datetime(2020, 1, 1)}} # IVM-B not yet engaged _test_download_travis = {'a': {kk: False for kk in tags.keys()}} +_test_download = {'b': {kk: False for kk in tags.keys()}} _password_req = {'b': {kk: True for kk in tags.keys()}} aname = 'ICON_L2-7_IVM-A_{year:04d}-{month:02d}-{day:02d}_v02r002.NC'
BUG: ivm-b needs test_download=False
py
diff --git a/squad/ci/backend/lava.py b/squad/ci/backend/lava.py index <HASH>..<HASH> 100644 --- a/squad/ci/backend/lava.py +++ b/squad/ci/backend/lava.py @@ -69,6 +69,9 @@ class Backend(BaseBackend): self.context = zmq.Context() self.socket = self.context.socket(zmq.SUB) self.socket.setsockopt_string(zmq.SUBSCRIBE, "") + self.socket.setsockopt(zmq.HEARTBEAT_IVL, 1000) # 1 s + self.socket.setsockopt(zmq.HEARTBEAT_TIMEOUT, 10000) # 10 s + self.socket.connect(listener_url) self.log_debug("connected to %s" % listener_url)
ci/lava: set heartbeat options They seem to provide a good improvement in the quality of connection with the LAVA server ZMQ endpoint. Without these, whenever there was a period of a few minutes without new messages, we would stop receiving messages forever.
py
diff --git a/benchexec/tools/two_ls.py b/benchexec/tools/two_ls.py index <HASH>..<HASH> 100644 --- a/benchexec/tools/two_ls.py +++ b/benchexec/tools/two_ls.py @@ -50,7 +50,7 @@ class Tool(benchexec.tools.template.BaseTool2): elif returncode == 0: status = result.RESULT_TRUE_PROP elif returncode == 10: - if len(run.output) > 0: + if run.output: result_str = run.output[-1].strip() if result_str == "FALSE(valid-memtrack)": status = result.RESULT_FALSE_MEMTRACK
updated two_ls: replaced length check on run.output
py
diff --git a/src/qinfer/tests/test_test.py b/src/qinfer/tests/test_test.py index <HASH>..<HASH> 100644 --- a/src/qinfer/tests/test_test.py +++ b/src/qinfer/tests/test_test.py @@ -54,8 +54,6 @@ class TestTest(DerandomizedTestCase): def test_assert_warns_nowarn(self): with assert_warns(RuntimeWarning): pass - -class TestTestModel(DerandomizedTestCase): def test_test_model_runs(self): model = MockModel()
changed class of test_test_model_runs
py
diff --git a/ravel.py b/ravel.py index <HASH>..<HASH> 100644 --- a/ravel.py +++ b/ravel.py @@ -300,17 +300,17 @@ class Bus : #end Bus -def SessionBus() : +def session_bus() : "returns a Bus object for the current D-Bus session bus." return \ Bus(dbus.Connection.bus_get(DBUS.BUS_SESSION, private = False)) -#end SessionBus +#end session_bus -def SystemBus() : +def system_bus() : "returns a Bus object for the D-Bus system bus." return \ Bus(dbus.Connection.bus_get(DBUS.BUS_SYSTEM, private = False)) -#end SystemBus +#end system_bus #+ # Client-side proxies for server-side objects
use more usual function names for obtaining system/session Bus objects
py
diff --git a/jackal/core.py b/jackal/core.py index <HASH>..<HASH> 100644 --- a/jackal/core.py +++ b/jackal/core.py @@ -65,7 +65,8 @@ class Host(DocType): 'scripts_results': Text(multi=True), 'protocol': Keyword(), 'id': Keyword(), - 'reason': Keyword() + 'reason': Keyword(), + 'service': Keyword() } ) @@ -189,7 +190,7 @@ class Core(object): def search_hosts(self): """ This function will perform a query on the elasticsearch instance with the given command line arguments. - Currently tag, up and port arguments are implemented. Search is not yet implemented. + Currently tag, up, port and search arguments are implemented. """ hosts = [] search = Host.search() @@ -205,8 +206,8 @@ class Core(object): for port in self.arguments.ports.split(','): search = search.filter("match", services__port=port) if self.arguments.search: - # TODO implement - pass + for search_argument in self.arguments.search.split(','): + search = search.query("multi_match", query=search_argument, fields=['tags', 'os', 'hostname', 'services.banner', 'services.script_results']) if self.arguments.number:
Implemented search function for hosts.
py
diff --git a/projex/xbuild/builder.py b/projex/xbuild/builder.py index <HASH>..<HASH> 100644 --- a/projex/xbuild/builder.py +++ b/projex/xbuild/builder.py @@ -130,7 +130,7 @@ class Builder(object): 'logo': projex.resources.find('img/logo.ico'), 'header_image': projex.resources.find('img/installer.bmp'), 'finish_image': projex.resources.find('img/installer-side.bmp'), - 'choose_directoy': True + 'choose_dir': True } for k, v in opts.items(): @@ -658,7 +658,7 @@ class Builder(object): opts['uninstall_plugins'] = '\n'.join(uninstall_plugins) opts['pre_section_plugins'] = '\n'.join(pre_section_plugins) opts['post_section_plugins'] = '\n'.join(post_section_plugins) - opts['choose_directory'] = templ.NSISCHOOSEDIRECTORY if opts['choose_directory'] else '' + opts['choose_directory'] = templ.NSISCHOOSEDIRECTORY if opts['choose_dir'] else '' req_license = self._installerOptions.pop('require_license_approval', False) if req_license:
switched option to choose_dir -- _directory is utilized to map pathing settings
py
diff --git a/django_tenants/postgresql_backend/base.py b/django_tenants/postgresql_backend/base.py index <HASH>..<HASH> 100644 --- a/django_tenants/postgresql_backend/base.py +++ b/django_tenants/postgresql_backend/base.py @@ -21,7 +21,7 @@ original_backend = import_module(ORIGINAL_BACKEND + '.base') EXTRA_SEARCH_PATHS = getattr(settings, 'PG_EXTRA_SEARCH_PATHS', []) # from the postgresql doc -SQL_IDENTIFIER_RE = re.compile(r'^[_a-zA-Z][_a-zA-Z0-9]{,62}$') +SQL_IDENTIFIER_RE = re.compile(r'^[_a-zA-Z0-9]{1,63}$') SQL_SCHEMA_NAME_RESERVED_RE = re.compile(r'^pg_', re.IGNORECASE)
allow using only numbers to name a schema
py
diff --git a/puzzle/plugins/sql/__init__.py b/puzzle/plugins/sql/__init__.py index <HASH>..<HASH> 100644 --- a/puzzle/plugins/sql/__init__.py +++ b/puzzle/plugins/sql/__init__.py @@ -1,2 +1,4 @@ # -*- coding: utf-8 -*- +from .case_mixin import CaseMixin +from .variant_mixin import VariantMixin from .store import Store
Added CaseMixin and VariantMixin to __init__ in sql adapter
py
diff --git a/pymake/suite.py b/pymake/suite.py index <HASH>..<HASH> 100644 --- a/pymake/suite.py +++ b/pymake/suite.py @@ -45,9 +45,13 @@ class PymakeSuite(object): :return: A valid PymakeSolution instance if succesful; None otherwise. """ s = PymakeSolution(**kwargs) + s.Name = config.get(section, 'name', fallback=s.Name) s.FileName = os.path.normpath(config.get(section, 'filename', fallback=s.FileName)) - s.VSVersion = config.getfloat(section, 'visual_studio_version', fallback=11) + s.VSVersion = config.getfloat(section, 'visual_studio_version', fallback=s.VSVersion) + if not s.VSVersion: + raise ValueError('Solution section [%s] requires a value for Visual Studio Version (visual_studio_version)' % section) + project_sections = config.getlist(section, 'projects', fallback=[]) for project_section in project_sections: project = self._getproject(config, project_section, VSVersion=s.VSVersion) @@ -94,7 +98,8 @@ class PymakeSuite(object): p.VirtualEnvironments = [ve for n in virtual_environments for ve in self._getvirtualenvironment(config, n, VSVersion=p.VSVersion) ] root_path = config.get(section, 'root_path', fallback="") - p.insert_files(root_path) + p.insert_files(root_path) + return p
Added small validation for a Visual Studio version value.
py
diff --git a/spinoff/actor/comm.py b/spinoff/actor/comm.py index <HASH>..<HASH> 100644 --- a/spinoff/actor/comm.py +++ b/spinoff/actor/comm.py @@ -77,6 +77,7 @@ class Comm(BaseActor): @classmethod def get_for_thread(self): + assert self._current return self._current def __init__(self, host, process=1, sock=None): @@ -98,12 +99,19 @@ class Comm(BaseActor): self.spawn(ZmqDealer(endpoint=('bind', _make_addr('*:%d' % port)), identity=self.identity)) - def __enter__(self): + def install(self): + assert not Comm._current Comm._current = self + + def uninstall(self): + Comm._current = None + + def __enter__(self): + self.install() return self def __exit__(self, *args): - Comm._current = None + self.uninstall() def get_addr(self, actor): if actor in self._registry:
Added Comm.install and Comm.uninstall + sanity check in Comm.get_for_thread
py
diff --git a/logentry_admin/admin.py b/logentry_admin/admin.py index <HASH>..<HASH> 100644 --- a/logentry_admin/admin.py +++ b/logentry_admin/admin.py @@ -26,7 +26,6 @@ class LogEntryAdmin(admin.ModelAdmin): 'user', 'content_type', 'object_link', - 'action_flag', 'action_description', 'change_message', ]
We don’t need both action flag and description. Drop from list page the one nobody can read.
py
diff --git a/sos/plugins/openvswitch.py b/sos/plugins/openvswitch.py index <HASH>..<HASH> 100644 --- a/sos/plugins/openvswitch.py +++ b/sos/plugins/openvswitch.py @@ -158,7 +158,7 @@ class OpenVSwitch(Plugin): class RedHatOpenVSwitch(OpenVSwitch, RedHatPlugin): - packages = ('openvswitch', 'openvswitch-dpdk') + packages = ('openvswitch', 'openvswitch2.*', 'openvswitch-dpdk') class DebianOpenVSwitch(OpenVSwitch, DebianPlugin, UbuntuPlugin):
[openvswitch] catch all openvswitch2.* packages Since the release of openvswitch <I>, the Red Hat Fast Datapath channel maintains multiple versions of openvswitch. Update the list of packages using a wildcard to catch all openvswitch2.* packages. Resolves: #<I>
py
diff --git a/src/dolo/misc/calculus.py b/src/dolo/misc/calculus.py index <HASH>..<HASH> 100644 --- a/src/dolo/misc/calculus.py +++ b/src/dolo/misc/calculus.py @@ -51,7 +51,14 @@ def solve_triangular_system(sdict,return_order=False,unknown_type=sympy.Symbol): else: res = copy.copy(sdict) for s in oks: - res[s] = lambda_sub(res[s],res) + try: + res[s] = lambda_sub(res[s],res) + except Exception as e: + print('Error evaluating: '+ str(res[s])) + print('with :') + print(res) + raise(e) + return [res,oks] def simple_triangular_solve(sdict, l=0):
(Slightly) better error messages for non triangular systems.
py
diff --git a/test/test_signature.py b/test/test_signature.py index <HASH>..<HASH> 100644 --- a/test/test_signature.py +++ b/test/test_signature.py @@ -508,7 +508,7 @@ run: script = SoS_Script(''' [process: provides='a.txt'] run: - echo 'a.txt' > a.txt + echo a.txt > a.txt [default] depends: 'a.txt' @@ -522,12 +522,12 @@ run: for f in ['a.txt', 'a.txt.bak']: self.assertTrue(FileTarget(f).exists()) with open(f) as ifile: - self.assertEqual(ifile.read(), 'a.txt\n') + self.assertEqual(ifile.read().strip(), 'a.txt') # now let us change how a.txt should be generated script = SoS_Script(''' [process: provides='a.txt'] run: - echo 'aa.txt' > a.txt + echo aa.txt > a.txt [default] depends: 'a.txt' @@ -541,7 +541,7 @@ run: for f in ['a.txt', 'a.txt.bak']: self.assertTrue(FileTarget(f).exists()) with open(f) as ifile: - self.assertEqual(ifile.read(), 'aa.txt\n') + self.assertEqual(ifile.read().strip(), 'aa.txt') def testSignatureWithVars(self):
Tweak tests for windows/AppVeyor
py
diff --git a/vtkplotter/shapes.py b/vtkplotter/shapes.py index <HASH>..<HASH> 100644 --- a/vtkplotter/shapes.py +++ b/vtkplotter/shapes.py @@ -1277,7 +1277,7 @@ class Arc(Mesh): ): if len(point1) == 2: point1 = (point1[0], point1[1], 0) - if len(point2) == 2: + if point2 is not None and len(point2): point2 = (point2[0], point2[1], 0) ar = vtk.vtkArcSource() if point2 is not None:
Check if point2 is not None
py
diff --git a/src/toil/__init__.py b/src/toil/__init__.py index <HASH>..<HASH> 100644 --- a/src/toil/__init__.py +++ b/src/toil/__init__.py @@ -48,7 +48,10 @@ def toilPackageDirPath(): def inVirtualEnv(): - return hasattr(sys, 'real_prefix') + """ + Returns whether we are inside a virtualenv or Conda virtual environment. + """ + return hasattr(sys, 'real_prefix') or 'CONDA_DEFAULT_ENV' in os.environ def resolveEntryPoint(entryPoint):
Add recognition of conda virtual envs, resolves #<I> When running toil in a Conda virtual environment instead of virtualenv, _toil_worker could not be found without explicitely setting the PATH environment variable. This commit adds recognition of Conda virtual environments and fixes this issue.
py
diff --git a/vcfpy/reader.py b/vcfpy/reader.py index <HASH>..<HASH> 100644 --- a/vcfpy/reader.py +++ b/vcfpy/reader.py @@ -91,7 +91,7 @@ class Reader: if self.tabix_file and not self.tabix_file.closed: self.tabix_file.close() # open tabix file if not yet open - if not self.tabix_file: + if not self.tabix_file or self.tabix_file.closed: self.tabix_file = pysam.TabixFile( filename=self.path, index=self.tabix_path) # jump to the next position
Fixing jumping a second time in Reader
py
diff --git a/insights/__init__.py b/insights/__init__.py index <HASH>..<HASH> 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -15,6 +15,7 @@ from .core.hydration import create_context from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 from .core.plugins import datasource, condition, incident # noqa: F401 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 +from .core.plugins import make_pass, make_fail # noqa: F401 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 from .formats import get_formatter from .parsers import get_active_lines # noqa: F401
Add make_pass, make_fail to insights top level package.
py
diff --git a/neurom/io/neurolucida.py b/neurom/io/neurolucida.py index <HASH>..<HASH> 100644 --- a/neurom/io/neurolucida.py +++ b/neurom/io/neurolucida.py @@ -88,7 +88,7 @@ def _get_tokens(morph_fd): Note: this also strips newlines and comments ''' - for line in morph_fd.readlines(): + for line in morph_fd: line = line.rstrip() # remove \r\n line = line.split(';', 1)[0] # strip comments squash_token = [] # quoted strings get squashed into one token @@ -263,7 +263,7 @@ def read(morph_file, data_wrapper=DataWrapper): warnings.warn(msg) L.warning(msg) - with open(morph_file) as morph_fd: + with open(morph_file, encoding='utf-8', errors='replace') as morph_fd: sections = _parse_sections(morph_fd) raw_data = _sections_to_raw_data(sections) return data_wrapper(raw_data, 'NL-ASCII')
Encoding error handling in NeuroLucida reader Use error='replace' when opening the file. This causes unknown bytes to be replaced by '?' instead of having the reader crashing
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ class Tox(TestCommand): setup( name="django-easy-select2", - version="1.4.0", + version="1.5.0", packages=find_packages(), author="asyncee", description="Django select2 theme for select input widgets.",
Version bumped to <I>
py
diff --git a/psamm/metabolicmodel.py b/psamm/metabolicmodel.py index <HASH>..<HASH> 100644 --- a/psamm/metabolicmodel.py +++ b/psamm/metabolicmodel.py @@ -341,7 +341,7 @@ class MetabolicModel(MetabolicDatabase): return model @classmethod - def load_model(cls, database, reaction_iter, medium=None, limits=None, + def load_model(cls, database, reaction_iter=None, medium=None, limits=None, v_max=None): """Get model from reaction name iterator @@ -353,6 +353,8 @@ class MetabolicModel(MetabolicDatabase): model_args['v_max'] = v_max model = cls(database, **model_args) + if reaction_iter is None: + reaction_iter = iter(database.reactions) for reaction_id in reaction_iter: model.add_reaction(reaction_id)
metabolicmodel: Add all reactions from database by default If a None reaction iterable is given to load_model(), all reactions from the database are added. This avoids having to contruct the iterator for this common case.
py
diff --git a/thinc/shims/tensorflow.py b/thinc/shims/tensorflow.py index <HASH>..<HASH> 100644 --- a/thinc/shims/tensorflow.py +++ b/thinc/shims/tensorflow.py @@ -200,23 +200,6 @@ class TensorFlowShim(Shim): else: yield - def _update_tensorflow_averages(self, sgd, *, init_steps=1): - if getattr(sgd, "averages", None) is None: - return - # Collect parameters if we don't have them - layers = [l.weights for l in self._model.layers] - layers = itertools.chain(*layers) - for layer in layers: - key = f"tensorflow_{self.id}_{layer.name}" - sgd.nr_update[key] += 1 - xp_param = tensorflow2xp(layer) - ops = get_array_ops(xp_param) - if key in sgd.averages: - ops.update_averages(sgd.averages[key], xp_param, sgd.nr_update[key]) - else: - sgd.averages[key] = xp_param.copy() - sgd.nr_update[key] = init_steps - def _clone_model(self): """similar to tf.keras.models.clone_model() But the tf.keras.models.clone_model changes the names of tf.Variables.
drop unused averages code in tf shim
py
diff --git a/adafruit_ads1x15/ads1x15.py b/adafruit_ads1x15/ads1x15.py index <HASH>..<HASH> 100644 --- a/adafruit_ads1x15/ads1x15.py +++ b/adafruit_ads1x15/ads1x15.py @@ -57,7 +57,7 @@ _ADS1X15_CONFIG_GAIN = { class ADS1x15(object): """Base functionality for ADS1x15 analog to digital converters.""" - def __init__(self, i2c, gain=1, data_rate=None, mode=_ADS1X15_CONFIG_MODE_SINGLE, + def __init__(self, i2c, gain=1, data_rate=None, mode=_ADS1X15_CONFIG_MODE_SINGLE, address=_ADS1X15_DEFAULT_ADDRESS): #pylint: disable=too-many-arguments self.buf = bytearray(3)
you're the one that made me break it into two lines!
py
diff --git a/yamcs-client/yamcs/tmtc/model.py b/yamcs-client/yamcs/tmtc/model.py index <HASH>..<HASH> 100644 --- a/yamcs-client/yamcs/tmtc/model.py +++ b/yamcs-client/yamcs/tmtc/model.py @@ -202,7 +202,7 @@ class IssuedCommand(object): :type: :class:`~datetime.datetime` """ if self._proto.HasField('generationTime'): - return parse_isostring(self._proto.generationTime) + return self._proto.generationTime.ToDatetime() return None @property
fixed the datetime conversion broken since the field has been changed to Timestamp
py
diff --git a/astrocats/catalog/entry.py b/astrocats/catalog/entry.py index <HASH>..<HASH> 100644 --- a/astrocats/catalog/entry.py +++ b/astrocats/catalog/entry.py @@ -451,7 +451,8 @@ class Entry(OrderedDict): # duplicate is found, that means the previous `exclude` array # should be saved to the new object, and the old deleted if new_spectrum.is_duplicate_of(item): - new_spectrum[SPECTRUM.EXCLUDE] = item[SPECTRUM.EXCLUDE] + if SPECTRUM.EXCLUDE in item: + new_spectrum[SPECTRUM.EXCLUDE] = item[SPECTRUM.EXCLUDE] del self[spec_key][si] break
BUG: only copy exclude if it exists
py
diff --git a/common/test_polygon.py b/common/test_polygon.py index <HASH>..<HASH> 100644 --- a/common/test_polygon.py +++ b/common/test_polygon.py @@ -2010,18 +2010,25 @@ class Test_Polygon(unittest.TestCase): A = numpy.array(A, dtype='f') M, N = A.shape + print print M, N + print A # Axis longitudes = numpy.linspace(100, 110, N, endpoint=False) latitudes = numpy.linspace(0, -6, M, endpoint=False) - print longitudes - print latitudes + print 'Lon', longitudes + print 'Lat', latitudes + P = grid2points(A, longitudes, latitudes) - assert len(P) == M * N print P + print A.flat[:] + assert P.shape[0] == M * N + assert P.shape[1] == 3 + + assert numpy.allclose(P[:, 2], A.flat) return # [ 3. nan nan nan 50.9573822 ]
Revisited test_grid2points (not done at all)
py
diff --git a/meshio/helpers.py b/meshio/helpers.py index <HASH>..<HASH> 100644 --- a/meshio/helpers.py +++ b/meshio/helpers.py @@ -95,7 +95,7 @@ def read(filename, file_format=None): if not file_format: # deduce file format from extension - extension = '.' + filename.split(os.extsep, 1)[-1] + extension = '.' + os.path.basename(filename).split(os.extsep, 1)[-1] file_format = _extension_to_filetype[extension] format_to_reader = { @@ -153,8 +153,7 @@ def write(filename, if not file_format: # deduce file format from extension - # _, extension = os.path.splitext(filename) - extension = '.' + filename.split(os.extsep, 1)[-1] + extension = '.' + os.path.basename(filename).split(os.extsep, 1)[-1] file_format = _extension_to_filetype[extension] # check cells for sanity
took basename of filename in case the path is nontrivial #<I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='dependenpy', - version='1.1.0', + version='1.1.1', packages=['dependenpy'], license='MPL 2.0', @@ -19,7 +19,8 @@ setup( # download_url = 'https://github.com/Pawamoy/dependenpy/tarball/1.0.4', keywords="dependency matrix dsm", - description="A Python module that builds a Dependency Matrix for your project.", + description="A Python module that builds " + "a Dependency Matrix for your project.", classifiers=[ # "Development Status :: 5 - Production/Stable", 'Development Status :: 4 - Beta',
Updated setup for version <I>
py
diff --git a/mtp_common/user_admin/forms.py b/mtp_common/user_admin/forms.py index <HASH>..<HASH> 100644 --- a/mtp_common/user_admin/forms.py +++ b/mtp_common/user_admin/forms.py @@ -131,6 +131,10 @@ class SignUpForm(ApiForm): self.error_conditions = {} @property + def api_session(self): + return api_client.get_unauthenticated_session() + + @property def payload(self): payload = { key: self.cleaned_data[key]
Ensure user sign-up form uses unauthenticated api connection … also used for moving prison (and region later on)
py
diff --git a/tenant_schemas/models.py b/tenant_schemas/models.py index <HASH>..<HASH> 100644 --- a/tenant_schemas/models.py +++ b/tenant_schemas/models.py @@ -50,11 +50,14 @@ class TenantMixin(models.Model): cursor.execute('CREATE SCHEMA %s' % self.schema_name) if sync_schema: - call_command('sync_schemas', schema_name=self.schema_name, - interactive=False) # don't ask to create an admin user + call_command('sync_schemas', + schema_name=self.schema_name, + interactive=False, # don't ask to create an admin user + migrate_all=True, # migrate all apps directly to last version + ) - # make sure you have SOUTH_TESTS_MIGRATE = false + # fake all migrations if 'south' in settings.INSTALLED_APPS and not django_is_in_test_mode(): - call_command('migrate_schemas', schema_name=self.schema_name) + call_command('migrate_schemas', fake=True, schema_name=self.schema_name) return True \ No newline at end of file
when creating a tenant, all models get synced via syncdb and then the migrations are faked. (instead of calling syncdb and then migrate)
py
diff --git a/spyder/widgets/variableexplorer/collectionseditor.py b/spyder/widgets/variableexplorer/collectionseditor.py index <HASH>..<HASH> 100644 --- a/spyder/widgets/variableexplorer/collectionseditor.py +++ b/spyder/widgets/variableexplorer/collectionseditor.py @@ -1305,7 +1305,10 @@ class CollectionsEditor(QDialog): else: # unknown object import copy - self.data_copy = copy.deepcopy(data) + try: + self.data_copy = copy.deepcopy(data) + except NotImplementedError: + self.data_copy = copy.copy(data) datalen = len(get_object_attrs(data)) self.widget = CollectionsEditorWidget(self, self.data_copy, title=title, readonly=readonly, remote=remote)
Handle error when using deepcopy in the collections editor.
py
diff --git a/server.py b/server.py index <HASH>..<HASH> 100644 --- a/server.py +++ b/server.py @@ -143,7 +143,7 @@ def rs_del(rs_id): except StandardError as e: print repr(e) return send_result(400) - return send_result(200, result) + return send_result(204, result) @route('/rs/<rs_id>/members', method='POST')
use <I> code to success replica delete response
py
diff --git a/dark/utils.py b/dark/utils.py index <HASH>..<HASH> 100644 --- a/dark/utils.py +++ b/dark/utils.py @@ -809,8 +809,10 @@ def alignmentGraph(recordFilenameOrHits, hitId, fastaFilename, db='nt', # Color each query by its bases. xScale = 3 yScale = 2 - baseImage = BaseImage(maxX - minX, maxEIncludingRandoms - minE, - xScale, yScale) + baseImage = BaseImage( + maxX - minX, + maxEIncludingRandoms - minE + (1 if rankEValues else 0), + xScale, yScale) for item in items: hsp = item['hsp'] e = item['e'] - minE
Fixed off-by-one error when coloring bases and ranking evalues.
py
diff --git a/safe/common/qgis_interface.py b/safe/common/qgis_interface.py index <HASH>..<HASH> 100644 --- a/safe/common/qgis_interface.py +++ b/safe/common/qgis_interface.py @@ -142,10 +142,14 @@ class QgisInterface(QObject): #LOGGER.debug('Layer Count Before: %s' % len(self.canvas.layers())) current_layers = self.canvas.layers() final_layers = [] + # We need to keep the record of the registered layers on our canvas! + registered_layers = [] for layer in current_layers: final_layers.append(QgsMapCanvasLayer(layer)) + registered_layers.append(layer.id()) for layer in layers: - final_layers.append(QgsMapCanvasLayer(layer)) + if layer.id() not in registered_layers: + final_layers.append(QgsMapCanvasLayer(layer)) self.canvas.setLayerSet(final_layers) #LOGGER.debug('Layer Count After: %s' % len(self.canvas.layers()))
Keep record of registered layers in our testing CANVAS.
py
diff --git a/openid/store/sqlstore.py b/openid/store/sqlstore.py index <HASH>..<HASH> 100644 --- a/openid/store/sqlstore.py +++ b/openid/store/sqlstore.py @@ -432,7 +432,12 @@ class MySQLStore(SQLStore): clean_nonce_sql = 'DELETE FROM %(nonces)s WHERE timestamp < %%s;' def blobDecode(self, blob): - return blob.tostring() + if type(blob) is str: + # Versions of MySQLdb >= 1.2.2 + return blob + else: + # Versions of MySQLdb prior to 1.2.2 (as far as we can tell) + return blob.tostring() class PostgreSQLStore(SQLStore): """
[project @ Fix blob encoding for newer versions of MySQLdb]
py
diff --git a/py/testdir_single_jvm/test_exec2_append_cols.py b/py/testdir_single_jvm/test_exec2_append_cols.py index <HASH>..<HASH> 100644 --- a/py/testdir_single_jvm/test_exec2_append_cols.py +++ b/py/testdir_single_jvm/test_exec2_append_cols.py @@ -54,7 +54,7 @@ class Basic(unittest.TestCase): msg="actual col name: %s expected col name %s" % (actual, expected)) # make it fail with this one (skip) - execExpr = 's.hex[,%s] = r.hex[,%s]' % (101, 1), + execExpr = 's.hex[,%s] = r.hex[,%s]' % (2, 1), h2e.exec_expr(h2o.nodes[0], execExpr, resultKey='s.hex', timeoutSecs=10) inspect = h2o_cmd.runInspect(key='s.hex')
test tried to select non-existant column
py
diff --git a/arctic/store/_ndarray_store.py b/arctic/store/_ndarray_store.py index <HASH>..<HASH> 100644 --- a/arctic/store/_ndarray_store.py +++ b/arctic/store/_ndarray_store.py @@ -409,6 +409,9 @@ class NdarrayStore(object): version['type'] = self.TYPE version[FW_POINTERS_CONFIG_KEY] = ARCTIC_FORWARD_POINTERS_CFG.name + # Create an empty entry to prevent cases where this field is accessed without being there. (#710) + if version[FW_POINTERS_CONFIG_KEY] != FwPointersCfg.DISABLED.name: + version[FW_POINTERS_REFS_KEY] = list() if str(dtype) != previous_version['dtype'] or \ _fw_pointers_convert_append_to_write(previous_version): @@ -611,6 +614,9 @@ class NdarrayStore(object): version['up_to'] = len(item) version['sha'] = self.checksum(item) version[FW_POINTERS_CONFIG_KEY] = ARCTIC_FORWARD_POINTERS_CFG.name + # Create an empty entry to prevent cases where this field is accessed without being there. (#710) + if version[FW_POINTERS_CONFIG_KEY] != FwPointersCfg.DISABLED.name: + version[FW_POINTERS_REFS_KEY] = list() if previous_version: if 'sha' in previous_version \
Issue <I>: Initialize SEGMENT_SHA in versions for writes and appends This would help preventing issues where we are accessing this key in versions without being initialized. This is only done for cases where it's either HYBRID or ENABLED mode as this field should not be used in other cases anyways.
py
diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py index <HASH>..<HASH> 100644 --- a/tools/interop_matrix/client_matrix.py +++ b/tools/interop_matrix/client_matrix.py @@ -103,6 +103,9 @@ LANG_RELEASE_MATRIX = { { 'v1.8.1': None }, + { + 'v1.9.1': None + }, ], 'java': [ {
Add go release versions to client_matrix.py
py
diff --git a/discord/invite.py b/discord/invite.py index <HASH>..<HASH> 100644 --- a/discord/invite.py +++ b/discord/invite.py @@ -109,6 +109,12 @@ class Invite(Hashable): guild = Object(id=guild_id) channel = Object(id=channel_id) guild.name = data['guild']['name'] + + guild.splash = data['guild']['splash'] + guild.splash_url = '' + if guild.splash: + guild.splash_url = 'https://cdn.discordapp.com/splashes/{0.id}/{0.splash}.jpg?size=2048'.format(guild) + channel.name = data['channel']['name'] data['guild'] = guild
Add support for splash(_url) to invites
py