diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/yandextank/core/util.py b/yandextank/core/util.py index <HASH>..<HASH> 100644 --- a/yandextank/core/util.py +++ b/yandextank/core/util.py @@ -4,6 +4,8 @@ Common utilities import threading as th import httplib import logging +from paramiko import \ + SSHClient, AutoAddPolicy, AuthenticationException, SSHException logger = logging.getLogger(__name__)
securedshell class moved to ...core.util, imports
py
diff --git a/cs/__init__.py b/cs/__init__.py index <HASH>..<HASH> 100644 --- a/cs/__init__.py +++ b/cs/__init__.py @@ -102,7 +102,7 @@ def main(): try: response = json.loads(response.text) - except json.decoder.JSONDecodeError: + except ValueError: sys.stderr.write(response.text) sys.stderr.write("\n") sys.exit(1)
cli: JSONDecodeError is Python3+
py
diff --git a/GradientFeatureAuditor.py b/GradientFeatureAuditor.py index <HASH>..<HASH> 100644 --- a/GradientFeatureAuditor.py +++ b/GradientFeatureAuditor.py @@ -9,7 +9,7 @@ import time import os import json -ENABLE_MULTIPROCESSING = True +ENABLE_MULTIPROCESSING = False class GradientFeatureAuditor(object): def __init__(self, model, headers, train_set, test_set, repair_steps=10, @@ -78,9 +78,8 @@ class GradientFeatureAuditor(object): worker_params.append( (feature_to_repair, repair_level, output_file) ) repair_level += repair_increase_per_step - # Start a new worker process for each repair level. if ENABLE_MULTIPROCESSING: - pool = Pool(processes=cpu_count()-1 or 1) + pool = Pool(processes=cpu_count()/2 or 1) conf_table_tuples = pool.map(self, worker_params) else: conf_table_tuples = [self(params) for params in worker_params]
Changed GFA multiprocessing again.
py
diff --git a/pyrap_images/trunk/pyrap/images/image.py b/pyrap_images/trunk/pyrap/images/image.py index <HASH>..<HASH> 100644 --- a/pyrap_images/trunk/pyrap/images/image.py +++ b/pyrap_images/trunk/pyrap/images/image.py @@ -111,3 +111,6 @@ class image(Image): outshape, coordsys.dict(), interpolation, decimate, replicate, refchange, forceregrid)) + + def subimage(self, blc=(), trc=(), inc=(), dropdegenerate=False): + return image(Image.subimage(self, blc, trc, inc, dropdegenerate))
added copy constructor use to subimage
py
diff --git a/photons/__init__.py b/photons/__init__.py index <HASH>..<HASH> 100644 --- a/photons/__init__.py +++ b/photons/__init__.py @@ -1,5 +1 @@ -from .lightprotocol import * -from .lightclient import * from .lights import * -from .matrix import * -from .lightserver import *
do not auto import client and server-related modules
py
diff --git a/polysquarelinter/linter.py b/polysquarelinter/linter.py index <HASH>..<HASH> 100644 --- a/polysquarelinter/linter.py +++ b/polysquarelinter/linter.py @@ -647,7 +647,7 @@ def _apply_replacement(error, found_file, file_lines): # Only fix one error at a time found_file.seek(0) - found_file.write(concatenated_fixed_lines) + found_file.write(concatenated_fixed_lines.encode("utf-8")) found_file.truncate()
linter: Also write as bytes
py
diff --git a/aiodns/__init__.py b/aiodns/__init__.py index <HASH>..<HASH> 100644 --- a/aiodns/__init__.py +++ b/aiodns/__init__.py @@ -34,7 +34,7 @@ class DNSResolver(object): kwargs.pop('sock_state_cb', None) self._channel = pycares.Channel(sock_state_cb=self._sock_state_cb, **kwargs) if nameservers: - self._channel.servers = nameservers + self.nameservers = nameservers self._fds = set() self._timer = None
Use nameservers getter/setter
py
diff --git a/src/shinken-arbiter.py b/src/shinken-arbiter.py index <HASH>..<HASH> 100755 --- a/src/shinken-arbiter.py +++ b/src/shinken-arbiter.py @@ -451,12 +451,14 @@ class Arbiter(Daemon): self.dispatcher.dispatch() #Now create the external commander - e = ExternalCommand(self.conf, 'dispatcher') + if os.name != 'nt': + e = ExternalCommand(self.conf, 'dispatcher') #Scheduler need to know about external command to activate it #if necessery - self.load_external_command(e) - + self.load_external_command(e) + else: + self.fifo = None print "Run baby, run..." timeout = 1.0 @@ -590,7 +592,7 @@ if __name__ == "__main__": #p = Shinken(conf) import cProfile - #p.main() + p.main() command = """p.main()""" - cProfile.runctx( command, globals(), locals(), filename="/tmp/Arbiter.profile" ) + #cProfile.runctx( command, globals(), locals(), filename="/tmp/Arbiter.profile" )
Do not open fifo on windows...
py
diff --git a/src/you_get/extractors/nanagogo.py b/src/you_get/extractors/nanagogo.py index <HASH>..<HASH> 100644 --- a/src/you_get/extractors/nanagogo.py +++ b/src/you_get/extractors/nanagogo.py @@ -35,6 +35,7 @@ def nanagogo_download(url, output_dir='.', merge=True, info_only=False, **kwargs 'size': size}) size = sum([i['size'] for i in items]) + if size == 0: return # do not fail the whole process print_info(site_info, title, ext, size) if not info_only: for i in items:
[<I>] do not fail the whole process
py
diff --git a/bokeh/__init__.py b/bokeh/__init__.py index <HASH>..<HASH> 100644 --- a/bokeh/__init__.py +++ b/bokeh/__init__.py @@ -124,21 +124,13 @@ from .serverconfig import Server, Cloud def _print_versions(): """Returns all the versions of software that Bokeh relies on.""" import platform as pt - import sys message = """ - %s Bokeh version: %s - %s - Python version: %s-%s-%s-%s - Python exec bin path: %s - %s + Python version: %s-%s Platform: %s - %s - %s - """ % ("*" * 76, __version__, "-" * 76, pt.python_version(), - pt.python_implementation(), pt.python_build(), pt.python_compiler(), - sys.executable, "-" * 76, pt.platform(), pt.uname()[3], "*" * 76) + """ % (__version__, pt.python_version(), + pt.python_implementation(), pt.platform()) return(message) def print_versions():
Made print_version more clean and simple.
py
diff --git a/doc/conf.py b/doc/conf.py index <HASH>..<HASH> 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -85,7 +85,7 @@ todo_include_todos = False # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'classic' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the
Switch docs theme from alabaster to classic
py
diff --git a/shap/utils/transformers.py b/shap/utils/transformers.py index <HASH>..<HASH> 100644 --- a/shap/utils/transformers.py +++ b/shap/utils/transformers.py @@ -30,6 +30,14 @@ MODELS_FOR_CAUSAL_LM = [ "transformers.ProphetNetForCausalLM", ] +SENTENCEPIECE_TOKENIZERS = [ + "transformers.MarianTokenizer", + "transformers.T5Tokenizer", + "transformers.XLNetTokenizer", + "transformers.AlbertTokenizer" +] + + def parse_prefix_suffix_for_tokenizer(tokenizer): null_tokens = tokenizer.encode("") keep_prefix, keep_suffix, prefix_strlen, suffix_strlen = None, None, None, None
Added tokenizer class constants belonging to sentence piece tokenizers
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ setup( maintainer_email='[email protected]', description=("Computes hazard, risk and socio-economic impact of " "earthquakes."), - license="GNU AGPL v3", + license="AGPL3", keywords="earthquake seismic hazard risk", url=url, long_description=README,
setup.py: license should be 'AGPL3' only'
py
diff --git a/txtwitter/tests/test_fake_twitter.py b/txtwitter/tests/test_fake_twitter.py index <HASH>..<HASH> 100644 --- a/txtwitter/tests/test_fake_twitter.py +++ b/txtwitter/tests/test_fake_twitter.py @@ -93,6 +93,8 @@ class TestFakeStream(TestCase): stream.add_message_type('ham', lambda data: data.get('eggs') == 'spam') self.assertTrue(stream.accepts('foo', {'bar': 'baz'})) self.assertTrue(stream.accepts('ham', {'eggs': 'spam'})) + self.assertFalse(stream.accepts('ham', {'bar': 'baz'})) + self.assertFalse(stream.accepts('foo', {'eggs': 'spam'})) def test_accepts_data_mismatch(self): stream = self._FakeStream()
Assert that a messages matching a different message type's predicate are rejected in FakeStream.accept's test for multiple message types
py
diff --git a/dallinger/deployment.py b/dallinger/deployment.py index <HASH>..<HASH> 100644 --- a/dallinger/deployment.py +++ b/dallinger/deployment.py @@ -310,11 +310,11 @@ def deploy_sandbox_shared_setup(log, verbose=True, app=None, exp_config=None): log("Waiting for Redis...") ready = False while not ready: - r = redis.from_url(heroku_app.redis_url) try: + r = redis.from_url(heroku_app.redis_url) r.set("foo", "bar") ready = True - except redis.exceptions.ConnectionError: + except (ValueError, redis.exceptions.ConnectionError): time.sleep(2) log("Saving the URL of the postgres database...")
Wait for redis URL on heroku before connecting
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup(name='thefuck', url='https://github.com/nvbn/thefuck', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', - 'tests', 'release']), + 'tests', 'tests.*', 'release']), include_package_data=True, zip_safe=False, install_requires=install_requires,
Exclude recursively tests packages
py
diff --git a/subliminal/services/podnapisi.py b/subliminal/services/podnapisi.py index <HASH>..<HASH> 100644 --- a/subliminal/services/podnapisi.py +++ b/subliminal/services/podnapisi.py @@ -103,7 +103,7 @@ class Podnapisi(ServiceBase): if results['status'] != 200: raise DownloadFailedError() subtitle.link = 'http://www.podnapisi.net/static/podnapisi/' + results['names'][0]['filename'] - self.download_file(subtitle.link, subtitle.path) + self.download_zip_file(subtitle.link, subtitle.path) return subtitle
Fix download method in Podnapisi
py
diff --git a/salt/minion.py b/salt/minion.py index <HASH>..<HASH> 100644 --- a/salt/minion.py +++ b/salt/minion.py @@ -431,7 +431,7 @@ class FileClient(object): 'cmd': '_file_hash'} payload['load'] = self.auth.crypticle.dumps(load) self.socket.send_pyobj(payload) - return self.auth.crypticle.loads(socket.recv_pyobj()) + return self.auth.crypticle.loads(self.socket.recv_pyobj()) def get_state(self, sls, env): '''
bah! I missed a self
py
diff --git a/docs/source/conf.py b/docs/source/conf.py index <HASH>..<HASH> 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -42,7 +42,7 @@ templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] +# exclude_patterns = [] intersphinx_mapping = { "fontTools": ("https://fonttools.readthedocs.io/en/latest/", None), @@ -59,4 +59,4 @@ html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +# html_static_path = ["_static"]
Disable html_static_path until we have static content
py
diff --git a/redis/client.py b/redis/client.py index <HASH>..<HASH> 100644 --- a/redis/client.py +++ b/redis/client.py @@ -19,8 +19,6 @@ from redis.exceptions import ( SYM_EMPTY = b('') -"THIS IS A FAILURE ON PURPOSE TO TEST PEP8 INTEGRATION WITH TRAVIS-CI BLAH BALH BALH BALH" - def list_or_args(keys, args): # returns a single list combining keys and args
remove offending pep8 line, travis ci should pass now
py
diff --git a/py/misc/_dist.py b/py/misc/_dist.py index <HASH>..<HASH> 100644 --- a/py/misc/_dist.py +++ b/py/misc/_dist.py @@ -138,7 +138,7 @@ def setup(pkg, **kw): print "precompiling greenlet module" try: x = py.magic.greenlet() - except ImportError: + except (RuntimeError, ImportError): print "could not precompile greenlet module, skipping" params = Params(pkg)
[svn r<I>] being yet more ignorant about build problems of c-extensions --HG-- branch : trunk
py
diff --git a/tests/unit/states/openstack_config_test.py b/tests/unit/states/openstack_config_test.py index <HASH>..<HASH> 100644 --- a/tests/unit/states/openstack_config_test.py +++ b/tests/unit/states/openstack_config_test.py @@ -22,7 +22,7 @@ ensure_in_syspath('../../') from salt.states import openstack_config openstack_config.__salt__ = {} -openstack_config.__opts__ = {} +openstack_config.__opts__ = {'test': False} @skipIf(NO_MOCK, NO_MOCK_REASON)
Mock test key in __opts__ dict
py
diff --git a/autobahn_autoreconnect/__init__.py b/autobahn_autoreconnect/__init__.py index <HASH>..<HASH> 100644 --- a/autobahn_autoreconnect/__init__.py +++ b/autobahn_autoreconnect/__init__.py @@ -172,7 +172,7 @@ class ApplicationRunner(object): txaio.use_asyncio() txaio.config.loop = self._loop - asyncio.async(self._connect(), loop=self._loop) + asyncio.run_coroutine_threadsafe(self._connect(), loop=self._loop) try: self._loop.add_signal_handler(signal.SIGTERM, self.stop) @@ -221,7 +221,7 @@ class ApplicationRunner(object): print('Connection lost') if not self._closing: print('Reconnecting') - asyncio.async(self._connect(), loop=self._loop) + asyncio.run_coroutine_threadsafe(self._connect(), loop=self._loop) def stop(self, *args): self._loop.stop()
fix syntax error async is a python keyword. the way to make asyncio submit a coroutine to a loop is run_coroutine_threadsafe()
py
diff --git a/tests/test_orbits.py b/tests/test_orbits.py index <HASH>..<HASH> 100644 --- a/tests/test_orbits.py +++ b/tests/test_orbits.py @@ -5,11 +5,15 @@ import astropy.units as u import astropy.coordinates as apycoords import pytest from galpy import potential +import astropy +_APY3= astropy.__version__ > '3' # Test that initializing an Orbit (not an Orbits) with an array of SkyCoords # processes the input correctly into the Orbit._orb.vxvv attribute; # The Orbits class depends on this to process arrays SkyCoords itself quickly def test_orbit_initialization_SkyCoordarray(): + # Only run this for astropy>3 + if not _APY3: return None from galpy.orbit import Orbit numpy.random.seed(1) nrand= 30 @@ -115,6 +119,8 @@ def test_initialization_vxvv(): return None def test_initialization_SkyCoord(): + # Only run this for astropy>3 + if not _APY3: return None from galpy.orbit import Orbit, Orbits numpy.random.seed(1) nrand= 30
Only run SkyCoord setup tests for Orbits for Python 3, because Python 2 doesn't support the SkyCoord functionality that we need
py
diff --git a/AegeanTools/running_percentile.py b/AegeanTools/running_percentile.py index <HASH>..<HASH> 100644 --- a/AegeanTools/running_percentile.py +++ b/AegeanTools/running_percentile.py @@ -2,7 +2,7 @@ import bisect import math import numpy as np -from blist import * +from blist import blist class RunningPercentiles(): @@ -27,7 +27,6 @@ class RunningPercentiles(): self.percentiles = plist return - #@profile def add(self, dlist): """ Add a list of elements to our collection, keeping it in order. @@ -53,7 +52,6 @@ class RunningPercentiles(): self.slist.sort() return - #@profile def sub(self, dlist): """ Remove a list of elements from our collection.
removed import *, replaced with specific imports
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,7 @@ from distutils import log from setuptools import setup, Command from distutils.command.build import build from setuptools.command.sdist import sdist +from distutils.errors import LinkError from cy_build import CyExtension as Extension, cy_build_ext as build_ext try: @@ -249,9 +250,13 @@ class extra_build(build): for sym in run_nm_defined_symbols(build_ext_obj.get_ext_fullpath(ext.name)): symbols.setdefault(sym, []).append(ext.name.lstrip('pysam.')) + errors = 0 for (sym, objs) in symbols.items(): if (len(objs) > 1): log.error("conflicting symbol (%s): %s", " ".join(objs), sym) + errors += 1 + + if errors > 0: raise LinkError("symbols defined in multiple extensions") def run(self): build.run(self)
Symbol conflicts should result in a build error As all Cython shared objects are loaded into the Python runtime simultaneously, these conflicts are really akin to a link error within one extension. As all pre-existing conflicts have now been resolved, we can make this check imply build failure.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,16 @@ #!/usr/bin/env python -from setuptools import setup import os +import sys +from setuptools import setup +try: + from setuptools import find_namespace_packages +except ImportError: + # the user has a downlevel version of setuptools. + print('Error: dbt requires setuptools v40.1.0 or higher.') + print('Please upgrade setuptools with "pip install --upgrade setuptools" ' + 'and try again') + sys.exit(1) this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, 'README.md')) as f:
add setuptools version check for overall setup.py
py
diff --git a/manticore/core/cpu/arm.py b/manticore/core/cpu/arm.py index <HASH>..<HASH> 100644 --- a/manticore/core/cpu/arm.py +++ b/manticore/core/cpu/arm.py @@ -626,9 +626,9 @@ class Armv7Cpu(Cpu): @instruction def PUSH(cpu, *regs): - high_to_low_regs = regs[::-1] + high_to_low_regs = [r.read() for r in regs[::-1]] for reg in high_to_low_regs: - cpu.stack_push(reg.read()) + cpu.stack_push(reg) @instruction
Fix arm PUSH (#<I>) If sp is in the list, it gets pushed incorrectly because it gets updated by the stack_pushes for registers that occur before it
py
diff --git a/nanoget/nanoget.py b/nanoget/nanoget.py index <HASH>..<HASH> 100644 --- a/nanoget/nanoget.py +++ b/nanoget/nanoget.py @@ -74,7 +74,7 @@ def get_input(source, files, threads=4, readtype="1D", dfs=[out for out in executor.map(extration_function, files)], names=names or files, method=combine) - if pd.isna(datadf["readIDs"]).any(): # If any readIDs were dropped before combining df + if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any(): datadf.drop("readIDs", inplace=True) datadf = calculate_start_time(datadf) logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
gotta first check if readIDs are part of the df :)
py
diff --git a/dingo/core/__init__.py b/dingo/core/__init__.py index <HASH>..<HASH> 100644 --- a/dingo/core/__init__.py +++ b/dingo/core/__init__.py @@ -196,18 +196,12 @@ class NetworkDingo: id_db=id, geo_data=wkt_loads(row['geom'])) - # TODO: assign "real" peak_load value to lv_station when available lv_station = LVStationDingo( id_db=id, grid=lv_grid, lv_load_area=lv_load_area, geo_data=wkt_loads(lv_stations.loc[id, 'geom']), - #peak_load=int(lv_load_area.peak_load_sum * lv_grid_district.geo_data.area / lv_load_area.geo_area.area)) - peak_load=int(lv_load_area.peak_load_sum * transform(projection, - lv_grid_district.geo_data).area / - transform(projection, lv_load_area.geo_area).area)) - # TODO: current state: use area share (based on total area of LA) - #peak_load=lv_load_area.peak_load_sum / len(lv_grid_districts)) + peak_load=lv_grid_district.peak_load_sum) # Choice of typified lv model grid depends on population within lv # grid district. If no population is given, lv grid is omitted and
assign peak load of LVGD to LV station
py
diff --git a/safe/definitions/reports/infographic.py b/safe/definitions/reports/infographic.py index <HASH>..<HASH> 100644 --- a/safe/definitions/reports/infographic.py +++ b/safe/definitions/reports/infographic.py @@ -76,7 +76,7 @@ map_overview_header = { population_chart_header = { 'id': 'population-chart-header', 'description': tr(''), - 'string_format': tr('Estimated number of people affected by hazard level') + 'string_format': tr('Estimated number of people exposed by hazard level') } people_section_header = {
change infographic text, affected -> exposed
py
diff --git a/papyrus/renderers.py b/papyrus/renderers.py index <HASH>..<HASH> 100644 --- a/papyrus/renderers.py +++ b/papyrus/renderers.py @@ -43,7 +43,7 @@ class GeoJSON(object): The GeoJSON renderer supports `JSONP <http://en.wikipedia.org/wiki/JSONP>`_: - If there is a parameter in the request's HTTP query string that matches - the ``josnp_param_name`` of the registered JSONP renderer (by default, + the ``jsonp_param_name`` of the registered JSONP renderer (by default, ``callback``), the renderer will return a JSONP response. - If there is no callback parameter in the request's query string, the
Edited papyrus/renderers.py via GitHub
py
diff --git a/openpnm/models/misc/statistical_distributions.py b/openpnm/models/misc/statistical_distributions.py index <HASH>..<HASH> 100644 --- a/openpnm/models/misc/statistical_distributions.py +++ b/openpnm/models/misc/statistical_distributions.py @@ -133,9 +133,10 @@ def normal(target, seeds, scale, loc): be used to find suitable values of 'scale' and 'loc'. >>> import scipy + >>> import numpy >>> func = scipy.stats.norm(scale=.0001, loc=0.001) >>> import matplotlib.pyplot as plt - >>> fig = plt.hist(func.ppf(q=scipy.rand(10000)), bins=50) + >>> fig = plt.hist(func.ppf(q=numpy.rand(10000)), bins=50) """ import scipy.stats as spts
changed example to numy, as sp.rand is depreciated [ci skip]
py
diff --git a/maildir-deduplicate.py b/maildir-deduplicate.py index <HASH>..<HASH> 100755 --- a/maildir-deduplicate.py +++ b/maildir-deduplicate.py @@ -27,8 +27,6 @@ You can give a list of mail headers to ignore when comparing mails between each others. I used this script to clean up a messed maildir folder after I move several mails from a Lotus Notes database. - Last update: 2010 jun 08 - Tested on MacOS X 10.6 with python 2.6.2. """
Don't track last update time of scripts: let this task to Git.
py
diff --git a/timeside/core/processor.py b/timeside/core/processor.py index <HASH>..<HASH> 100644 --- a/timeside/core/processor.py +++ b/timeside/core/processor.py @@ -76,6 +76,9 @@ class MetaProcessor(MetaComponent): return new_class + def __repr__(self): + return self.id() + class Processor(Component, HasParam, metaclass=MetaProcessor):
[core] add __repr__ method to MetaProcessor
py
diff --git a/gtkmvco/gtkmvc/support/metaclasses.py b/gtkmvco/gtkmvc/support/metaclasses.py index <HASH>..<HASH> 100644 --- a/gtkmvco/gtkmvc/support/metaclasses.py +++ b/gtkmvco/gtkmvc/support/metaclasses.py @@ -98,7 +98,9 @@ class PropertyMeta (type): # processes now all names in __observables__ for prop in type(cls).__get_observables_array__(cls): - type(cls).__create_prop_accessors__(cls, prop, _dict.get(prop, None)) + val = _dict.get(prop, None) + if val is not None: type(cls).__create_prop_accessors__(cls, prop, val) + elif type(getattr(cls, prop)) != property: type(cls).__create_prop_accessors__(cls, prop, val) obs.add(prop) pass
BUG FIX Fixed a bug in metaclass that made impossible to inherit observable properties and keep their default values. [RC]
py
diff --git a/src/nlpia/web.py b/src/nlpia/web.py index <HASH>..<HASH> 100644 --- a/src/nlpia/web.py +++ b/src/nlpia/web.py @@ -218,7 +218,7 @@ def get_response_confirmation_token(response): return None -def save_response_content(response, filename=filename, destination=os.path.curdir, chunksize=32768): +def save_response_content(response, filename='data.csv', destination=os.path.curdir, chunksize=32768): """ For streaming response from requests, download the content one CHUNK at a time """ chunksize = chunksize or 32768 if os.path.sep in filename:
web.py needs cleanup. first google download subfun
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ setup( 'deform', 'fanstatic', 'js.jquery', + 'js.jquery-form', 'js.jquery_maskedinput', 'js.jquery_maskmoney', 'js.jquery_timepicker_addon',
Added missing dependency js.deform imports from js.jquery_form but did not declare its dependency.
py
diff --git a/helper-scripts/wsgi-loader.py b/helper-scripts/wsgi-loader.py index <HASH>..<HASH> 100644 --- a/helper-scripts/wsgi-loader.py +++ b/helper-scripts/wsgi-loader.py @@ -191,7 +191,7 @@ class RequestHandler: env['wsgi.version'] = (1, 0) env['wsgi.multithread'] = False env['wsgi.multiprocess'] = True - env['wsgi.run_once'] = True + env['wsgi.run_once'] = False if env.get('HTTPS','off') in ('on', '1', 'true', 'yes'): env['wsgi.url_scheme'] = 'https' else:
set run_once to False in the wsgi loader As suggested by @snaury in #<I>. This should improve wsgi app performance as this setting is used to indicate to wsgi apps to skip caching.
py
diff --git a/spyder/plugins/editor/lsp/transport/common/consumer.py b/spyder/plugins/editor/lsp/transport/common/consumer.py index <HASH>..<HASH> 100644 --- a/spyder/plugins/editor/lsp/transport/common/consumer.py +++ b/spyder/plugins/editor/lsp/transport/common/consumer.py @@ -124,6 +124,8 @@ class IncomingMessageThread(Thread): def parse_headers(self, headers): logger.debug(headers) headers = headers.split(b'\r\n') + headers = [header.split(b'\n')[-1] if b'\n' in header else header + for header in headers] header_dict = dict([x.split(b': ') for x in headers]) return header_dict
Prevent crashes due to header malformation
py
diff --git a/tests/test_mirror.py b/tests/test_mirror.py index <HASH>..<HASH> 100644 --- a/tests/test_mirror.py +++ b/tests/test_mirror.py @@ -98,6 +98,8 @@ def test_R_unchanged_ipynb(nb_file): compare(r, r_ref) [email protected](sys.version_info < (3, 6), + reason="unordered dict result in changes in chunk options") @pytest.mark.parametrize('nb_file', list_julia_notebooks('.ipynb')) def test_julia_unchanged_ipynb(nb_file): julia_file = mirror_file(nb_file).replace('.py', '.jl') @@ -109,6 +111,8 @@ def test_julia_unchanged_ipynb(nb_file): compare(julia, julia_ref) [email protected](sys.version_info < (3, 6), + reason="unordered dict result in changes in chunk options") @pytest.mark.parametrize('script_file', list_all_notebooks('.jl')) def test_julia_unchanged(script_file): with open(script_file, encoding='utf-8') as fp:
Dict order required for testing #<I>
py
diff --git a/faker/utils/distribution.py b/faker/utils/distribution.py index <HASH>..<HASH> 100644 --- a/faker/utils/distribution.py +++ b/faker/utils/distribution.py @@ -20,7 +20,7 @@ def choice_distribution(a, p): cdf = list(cumsum(p)) normal = cdf[-1] - cdf2 = [i / normal for i in cdf] + cdf2 = [float(i) / float(normal) for i in cdf] uniform_sample = random_sample() idx = bisect.bisect_right(cdf2, uniform_sample)
Ensure choice_distribution always uses floats Fixes a bug in versions of Python in which `integer / integer` returns another integer instead of a float. Closes joke2k/faker#<I>
py
diff --git a/odl/solvers/functional/functional.py b/odl/solvers/functional/functional.py index <HASH>..<HASH> 100644 --- a/odl/solvers/functional/functional.py +++ b/odl/solvers/functional/functional.py @@ -1379,13 +1379,13 @@ class BregmanDistance(Functional): >>> l2_squared = odl.solvers.L2NormSquared(space) >>> point = space.one() >>> subgrad = l2_squared.gradient(point) - >>> Bregman_dist = odl.solvers.BregmanDistance( + >>> bregman_dist = odl.solvers.BregmanDistance( ... l2_squared, point, subgrad) This is gives squared L2 distance to the given point, ||x - 1||^2: >>> expected_functional = l2_squared.translated(point) - >>> Bregman_dist(space.zero()) == expected_functional(space.zero()) + >>> bregman_dist(space.zero()) == expected_functional(space.zero()) True """ if not isinstance(functional, Functional):
TST: minor update to BregmanDistance doc-test
py
diff --git a/plexapi/client.py b/plexapi/client.py index <HASH>..<HASH> 100644 --- a/plexapi/client.py +++ b/plexapi/client.py @@ -173,8 +173,10 @@ class PlexClient(object): def timeline(self): return self.sendCommand('timeline/poll', **{'wait':1, 'commandID':4}) - def isPlayingMedia(self): + def isPlayingMedia(self, includePaused=False): for mediatype in self.timeline(): if mediatype.get('state') == 'playing': return True + if includePaused and mediatype.get('state') == 'paused': + return True return False
Option to include paused content when requesting is media is playing.
py
diff --git a/tableprint/metadata.py b/tableprint/metadata.py index <HASH>..<HASH> 100644 --- a/tableprint/metadata.py +++ b/tableprint/metadata.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Version info -__version__ = '0.8.0' +__version__ = '0.7.1' __license__ = 'MIT' # Project description(s)
Updating version to <I>
py
diff --git a/btfxwss/classes.py b/btfxwss/classes.py index <HASH>..<HASH> 100644 --- a/btfxwss/classes.py +++ b/btfxwss/classes.py @@ -407,7 +407,10 @@ class BtfxWss: "initiating restart") self.cmd_q.put('restart') - self._check_heartbeats(ts) + try: + self._check_heartbeats(ts) + except (WebSocketConnectionClosedException, ConnectionResetError): + self.cmd_q.put('restart') self._processor_lock.release() else: time.sleep(0.5)
caught connection reset error in process thread, previously overlooked
py
diff --git a/treetime/treeanc.py b/treetime/treeanc.py index <HASH>..<HASH> 100644 --- a/treetime/treeanc.py +++ b/treetime/treeanc.py @@ -66,6 +66,13 @@ class TreeAnc(object): def gtr(self): return self._gtr + @gtr.setter + def gtr(self, value): + if not isinstance(value, GTR): + raise TypeError(" GTR instance expected") + self._gtr = value + + def set_gtr(self, in_gtr, **kwargs): """ Create new GTR model, if needed, and set the model as the attribute of the
Added setter for GTR, which takes only GTR type instance
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ setup( description="Simple command runner", license="GPLv2", keywords="run command bash shell", - url="http://python.falesnik.net/Run", + url="https://github.com/mfalesni/Run", packages=["Run"], long_description_read=read("README.md"), classifiers=[
Modified url to github
py
diff --git a/ReText/webenginepreview.py b/ReText/webenginepreview.py index <HASH>..<HASH> 100644 --- a/ReText/webenginepreview.py +++ b/ReText/webenginepreview.py @@ -54,6 +54,8 @@ class ReTextWebEnginePage(QWebEnginePage): print("level=%r message=%r lineNumber=%r sourceId=%r" % (level, message, lineNumber, sourceId)) def acceptNavigationRequest(self, url, type, isMainFrame): + if url.scheme() == "data": + return True if url.isLocalFile(): localFile = url.toLocalFile() if localFile == self.tab.fileName:
webenginepreview: Handle data: URLs internally Otherwise preview does not work because it tried to delegate data:text/html;... URL to an external application.
py
diff --git a/angr/procedures/java_jni/GetArrayElements.py b/angr/procedures/java_jni/GetArrayElements.py index <HASH>..<HASH> 100644 --- a/angr/procedures/java_jni/GetArrayElements.py +++ b/angr/procedures/java_jni/GetArrayElements.py @@ -9,6 +9,8 @@ class GetArrayElements(JNISimProcedure): array_ref = self.state.jni_references.lookup(array) values = self.load_java_array(self.state, array_ref) memory_addr = self.store_in_native_memory(values, array_ref.type) + if self.state.solver.eval(ptr_isCopy != 0): + self.store_in_native_memory(data=self.JNI_TRUE, data_type='boolean', addr=ptr_isCopy) return memory_addr def load_java_array(self, array_ref, start_idx=None, end_idx=None):
Fix case if isCopy is null
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ import os from setuptools import setup, find_packages -version = '1.0.0a5' +version = '1.0.0' def read_file(name): return open(os.path.join(os.path.dirname(__file__),
bumped version to a stable number (<I>) because (as of buildout version <I>) only final releases of buildout itself and extensions are installed.
py
diff --git a/scanpy/tests/test_docs.py b/scanpy/tests/test_docs.py index <HASH>..<HASH> 100644 --- a/scanpy/tests/test_docs.py +++ b/scanpy/tests/test_docs.py @@ -21,16 +21,19 @@ def test_function_headers(f): assert f.__doc__ is not None, f"{name} has no docstring" lines = getattr(f, "__orig_doc__", f.__doc__).split("\n") assert lines[0], f"{name} needs a single-line summary" - broken = [i for i, l in enumerate(lines) if l and not l.startswith(" ")] + broken = [i for i, l in enumerate(lines) if l.strip() and not l.startswith(" ")] if any(broken): msg = f'''\ -Header of function `{name}`’s docstring should start with one-line description: +Header of function `{name}`’s docstring should start with one-line description +and be consistently indented like this: ␣␣␣␣"""\\ ␣␣␣␣My one-line␣description. ␣␣␣␣… ␣␣␣␣""" + +The displayed line is under-indented. ''' filename = inspect.getsourcefile(f) _, lineno = inspect.getsourcelines(f)
Ignore blank lines in docstring test (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ import io from setuptools import setup, find_packages NAME = "AnyQt" -VERSION = "0.0.2" +VERSION = "0.0.3.dev0" AUTHOR = "Aleš Erjavec" AUTHOR_EMAIL = "[email protected]" URL = "https://github.com/ales-erjavec/anyqt"
Bump dev version to <I>.dev0
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -80,7 +80,7 @@ setuptools.setup( # Location where the package may be downloaded: download_url='https://pypi.org/project/harvesters/', # A list of required Python modules: - install_requires=['genicam2<1', 'numpy'], + install_requires=['genicam>=1', 'numpy'], # license='Apache Software License V2.0', # A detailed description of the package:
Resolve issue #<I>
py
diff --git a/PyFunceble/config.py b/PyFunceble/config.py index <HASH>..<HASH> 100644 --- a/PyFunceble/config.py +++ b/PyFunceble/config.py @@ -61,7 +61,6 @@ License: # pylint: enable=line-too-long import PyFunceble from PyFunceble.helpers import Dict, Directory, Download, File -from PyFunceble.iana import IANA from PyFunceble.publicsuffix import PublicSuffix @@ -186,7 +185,7 @@ Install and load the default configuration at the mentioned location? [y/n] " PublicSuffix().load() # We load the IANA database. - IANA().load() + PyFunceble.IANA().load() @classmethod def _set_path_to_configs(cls, path_to_config):
Deletion of the import of implicitly already imported classes
py
diff --git a/python_modules/dagster/dagster_tests/utils_tests/test_temp_file.py b/python_modules/dagster/dagster_tests/utils_tests/test_temp_file.py index <HASH>..<HASH> 100644 --- a/python_modules/dagster/dagster_tests/utils_tests/test_temp_file.py +++ b/python_modules/dagster/dagster_tests/utils_tests/test_temp_file.py @@ -1,9 +1,12 @@ -import resource +import pytest from dagster.utils.temp_file import get_temp_file_name, get_temp_file_names [email protected]('"win" in sys.platform', reason="resource module not available in windows") def test_get_temp_file_name_leak_file_descriptors(): + import resource + resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100)) for _ in range(100): with get_temp_file_name() as _:
fix windows tests (#<I>)
py
diff --git a/queryset_sequence/__init__.py b/queryset_sequence/__init__.py index <HASH>..<HASH> 100644 --- a/queryset_sequence/__init__.py +++ b/queryset_sequence/__init__.py @@ -41,6 +41,16 @@ def cumsum(seq): yield s +# Bridge the Django >= 1.11 Iterable object back to the Query object being an +# iterator. +class SequenceIterable(object): + def __init__(self, queryset, *args, **kwargs): + self.queryset = queryset + + def __iter__(self): + return iter(self.queryset.query) + + class QuerySequence(six.with_metaclass(PartialInheritanceMeta, Query)): """ A Query that handles multiple QuerySets. @@ -468,6 +478,10 @@ class QuerySetSequence(six.with_metaclass(PartialInheritanceMeta, QuerySet)): super(QuerySetSequence, self).__init__(**kwargs) + # Override the iterator that will be used. (Currently used only in + # Django >= 1.11.) + self._iterable_class = SequenceIterable + def iterator(self): # Create a clone so that each call re-evaluates the QuerySets. return self.query.clone()
Fix iterating in Django >= <I>.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ except ImportError: config = { 'name': 'hwrt', - 'version': '0.1.209', + 'version': '0.1.211', 'author': 'Martin Thoma', 'author_email': '[email protected]', 'maintainer': 'Martin Thoma', @@ -19,8 +19,9 @@ config = { 'url': 'https://github.com/MartinThoma/hwrt', 'license': 'MIT', 'description': 'Handwriting Recognition Tools', - 'long_description': """A tookit for handwriting recognition. It was - developed as part of the bachelors thesis of Martin Thoma.""", + 'long_description': ("A tookit for handwriting recognition. It was " + "developed as part of the bachelors thesis of " + "Martin Thoma."), 'install_requires': [ "argparse", "theano",
setup.py: Uploaded latest setup.py to prevent more questions on SO
py
diff --git a/vmo/utils/nuxmv/model.py b/vmo/utils/nuxmv/model.py index <HASH>..<HASH> 100644 --- a/vmo/utils/nuxmv/model.py +++ b/vmo/utils/nuxmv/model.py @@ -245,7 +245,8 @@ def print_pitches(oracle, nuxmv_state_name='s'): """Print chromagram oracle""" -def print_oracle(oracle, nuxmv_state_name='s', init_state=None): +def print_oracle(oracle, include_rsfx=False, init_state=None, + nuxmv_state_name='s'): """Return a bytearray describing `oracle`, with oracle states and pitches. Assumes the oracle has been created with a chromagram as feature. @@ -287,7 +288,7 @@ def print_oracle(oracle, nuxmv_state_name='s', init_state=None): if init_state is None: init_state = oracle.initial_state - adj_lists = van.graph_adjacency_lists(oracle) + adj_lists = van.graph_adjacency_lists(oracle, include_rsfx) base_model = print_module(adj_lists, nuxmv_state_name=nuxmv_state_name, init_state=init_state)
Reflected addition of include_rsfx option in analysis.
py
diff --git a/tests/test_pystmark.py b/tests/test_pystmark.py index <HASH>..<HASH> 100644 --- a/tests/test_pystmark.py +++ b/tests/test_pystmark.py @@ -363,7 +363,7 @@ class MessageTest(SenderTestBase): cc='dog,cat', bcc='foo,bar', subject='dogs', track_opens=True, headers=[dict(Name='Food', Value='7')], attachments=[], sender='admin', tag='tag', - template_id='template_id', template_model='template_model') + template_id='template_id', template_alias='template_alias', template_model='template_model') self.assertEqual(sorted(msg), sorted(Message._fields)) self.assertNotRaises(TypeError, Message.load_message, msg) self.assertNotRaises(MessageError, Message.load_message, msg,
Fixes test that compares fields to include template_alias
py
diff --git a/visidata/vdobj.py b/visidata/vdobj.py index <HASH>..<HASH> 100644 --- a/visidata/vdobj.py +++ b/visidata/vdobj.py @@ -17,6 +17,9 @@ def asyncthread(func): 'Function decorator, to make calls to `func()` spawn a separate thread if available.' @wraps(func) def _execAsync(*args, **kwargs): + if args and isinstance(args[0], visidata.BaseSheet): #1136: allow cancel of async methods on Sheet + if 'sheet' not in kwargs: + kwargs['sheet'] = args[0] return visidata.vd.execAsync(func, *args, **kwargs) return _execAsync
[async] allow cancel of async methods on Sheet #<I>
py
diff --git a/craftai/pandas/client.py b/craftai/pandas/client.py index <HASH>..<HASH> 100644 --- a/craftai/pandas/client.py +++ b/craftai/pandas/client.py @@ -45,6 +45,14 @@ class Client(VanillaClient): index=pd.to_datetime([operation["timestamp"] for operation in operations_list], unit="s") ) + def get_state_history(self, agent_id, start=None, end=None): + state_history = super(Client, self).get_state_history(agent_id, start, end) + + return pd.DataFrame( + [state["sample"] for state in state_history], + index=pd.to_datetime([state["timestamp"] for state in state_history], unit="s") + ) + @staticmethod def decide_from_contexts_df(tree, contexts_df): return Interpreter.decide_from_contexts_df(tree, contexts_df)
add get state history for panda client
py
diff --git a/photon.py b/photon.py index <HASH>..<HASH> 100644 --- a/photon.py +++ b/photon.py @@ -291,7 +291,7 @@ def is_link(url): conclusion = False # whether the the url should be crawled or not if url not in processed: # if the url hasn't been crawled already - if ('.png' or '.jpg' or '.jpeg' or '.js' or '.css' or '.pdf' or '.ico' or '.bmp' or '.svg' or '.json' or '.xml') in url: + if '.png' in url or '.jpg' in url or '.jpeg' in url or '.js' in url or '.css' in url or '.pdf' in url or '.ico' in url or '.bmp' in url or '.svg' in url or '.json' in url or '.xml' in url: files.add(url) else: return True # url can be crawled @@ -452,7 +452,7 @@ if not only_urls: intel.add(x) for url in external: - if ('github.com' or 'facebook.com' or 'instagram.com' or 'youtube.com') in url: + if 'github.com' in url or 'facebook.com' in url or 'instagram.com' in url or 'youtube.com' in url: intel.add(url) now = time.time() # records the time at which crawling stopped
Resolves #<I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,8 @@ from setuptools import setup +from maltego_trx.maltego import VERSION setup(name='maltego-trx', - version='1.3', + version=VERSION, description='Python library used to develop Maltego transforms', url='https://github.com/paterva/maltego-trx/', author='Paterva Staff',
Import version number from main maltego script
py
diff --git a/foyer/forcefield.py b/foyer/forcefield.py index <HASH>..<HASH> 100755 --- a/foyer/forcefield.py +++ b/foyer/forcefield.py @@ -434,7 +434,9 @@ class Forcefield(app.ForceField): #kwargs['switchDistance'] = None #system = self.createSystem(topology, *args, **kwargs) # Option 2: Explicitly specify switchDistance - system = self.createSystem(topology, switchDistance=None, *args, **kwargs) + #system = self.createSystem(topology, switchDistance=None, *args, **kwargs) + # Option 3: Default kwarg in createSystem + system = self.createSystem(topology, *args, **kwargs) _separate_urey_bradleys(system, topology) structure = pmd.openmm.load_topology(topology=topology, system=system) @@ -563,6 +565,7 @@ class Forcefield(app.ForceField): def createSystem(self, topology, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0 * u.nanometer, constraints=None, rigidWater=True, removeCMMotion=True, hydrogenMass=None, + switchDistance=None, **args): """Construct an OpenMM System representing a Topology with this force field. @@ -597,7 +600,7 @@ class Forcefield(app.ForceField): system the newly created System """ - + args['switchDistance'] = None # Overwrite previous _SystemData object self._SystemData = app.ForceField._SystemData()
Make a default kwarg in foyer.FF.createSystem() for switchDistance that gets included in the args that get passed to force.createForce(). I like this option more
py
diff --git a/airflow/models.py b/airflow/models.py index <HASH>..<HASH> 100755 --- a/airflow/models.py +++ b/airflow/models.py @@ -3149,7 +3149,7 @@ class DAG(BaseDag, LoggingMixin): d['pickle_len'] = len(pickled) d['pickling_duration'] = "{}".format(datetime.now() - dttm) except Exception as e: - logging.exception(e) + logging.debug(e) d['is_picklable'] = False d['stacktrace'] = traceback.format_exc() return d
[AIRFLOW-<I>] Set pickle_info log to debug pickle_info tries to pickle. If it catches an exception it is assumed that the DAG is not pickable and continues. Therefore, it should log to debug instead and not provide a full stacktrace. Closes #<I> from bolkedebruin/AIRFLOW-<I>
py
diff --git a/tests/unit/models/physics/DiffusiveConductanceTest.py b/tests/unit/models/physics/DiffusiveConductanceTest.py index <HASH>..<HASH> 100644 --- a/tests/unit/models/physics/DiffusiveConductanceTest.py +++ b/tests/unit/models/physics/DiffusiveConductanceTest.py @@ -26,9 +26,9 @@ class DiffusiveConductanceTest: assert sp.allclose(a=self.phys['throat.conductance1'][0], b=0.00084552) - self.phys.models.add(propname='throat.conductance2', - model=mod, - calc_pore_len=True) + self.phys.add_model(propname='throat.conductance2', + model=mod, + calc_pore_len=True) self.phys.regenerate_models() assert sp.allclose(a=self.phys['throat.conductance2'][0], b=0.00084552)
DiffusiveConductanceTests working
py
diff --git a/vex/run.py b/vex/run.py index <HASH>..<HASH> 100644 --- a/vex/run.py +++ b/vex/run.py @@ -3,6 +3,7 @@ import os import platform import subprocess +import distutils.spawn from vex import exceptions @@ -69,6 +70,10 @@ def run(command, env, cwd): assert command if cwd: assert os.path.exists(cwd) + if platform.system() == "Windows": + exe = distutils.spawn.find_executable(command[0], path=env['PATH']) + if exe: + command[0] = exe try: process = subprocess.Popen(command, env=env, cwd=cwd) process.wait()
attempted fix of windows totally disregarding PATH in Popen
py
diff --git a/Lib/glyphsLib/builder/glyph.py b/Lib/glyphsLib/builder/glyph.py index <HASH>..<HASH> 100644 --- a/Lib/glyphsLib/builder/glyph.py +++ b/Lib/glyphsLib/builder/glyph.py @@ -261,6 +261,8 @@ def to_glyphs_glyph(self, ufo_glyph, ufo_layer, master): # noqa: C901 def to_ufo_glyph_height_and_vertical_origin(self, ufo_glyph, layer): + # implentation based on: + # https://github.com/googlefonts/glyphsLib/issues/557#issuecomment-667074856 if not self.is_vertical: return
Add comment mentioning where implementation came from
py
diff --git a/shoebot/data/bezier.py b/shoebot/data/bezier.py index <HASH>..<HASH> 100644 --- a/shoebot/data/bezier.py +++ b/shoebot/data/bezier.py @@ -163,7 +163,6 @@ class BezierPath(Grob): if self._center: return self._center - # get the center point (x1,y1,x2,y2) = self._get_bounds() x = (x1 + x2) / 2 @@ -518,6 +517,7 @@ class BezierPath(Grob): def __len__(self): return len(self._elements) + bounds = property(_get_bounds) contours = property(_get_contours) length = property(_get_length)
Make bounds a property, as nodebox-gl does
py
diff --git a/examples/step-by-step/sbs_motion_commander.py b/examples/step-by-step/sbs_motion_commander.py index <HASH>..<HASH> 100644 --- a/examples/step-by-step/sbs_motion_commander.py +++ b/examples/step-by-step/sbs_motion_commander.py @@ -86,7 +86,6 @@ def move_linear_simple(scf): def take_off_simple(scf): with MotionCommander(scf, default_height=DEFAULT_HEIGHT) as mc: time.sleep(3) - mc.stop() def log_pos_callback(timestamp, data, logconf):
update the example to fit the tutorial in the docs
py
diff --git a/py/selenium/webdriver/blackberry/webdriver.py b/py/selenium/webdriver/blackberry/webdriver.py index <HASH>..<HASH> 100644 --- a/py/selenium/webdriver/blackberry/webdriver.py +++ b/py/selenium/webdriver/blackberry/webdriver.py @@ -52,6 +52,11 @@ class WebDriver(RemoteWebDriver): """ def __init__(self, device_password, bb_tools_dir=None, hostip='169.254.0.1', port=1338, desired_capabilities={}): + import warnings + warnings.warn('BlackBerry Driver is no longer supported and will be ' + 'removed in future versions', + DeprecationWarning, stacklevel=2) + remote_addr = 'http://{}:{}'.format(hostip, port) filename = 'blackberry-deploy'
[py] Deprecate Blackberry Driver support
py
diff --git a/tools/voltdb-install.py b/tools/voltdb-install.py index <HASH>..<HASH> 100755 --- a/tools/voltdb-install.py +++ b/tools/voltdb-install.py @@ -292,7 +292,7 @@ Provides: %(provides)s Conflicts: %(conflicts)s Requires: libgcc >= 4.1.2, libstdc++ >= 4.1.2, python >= 2.6 Requires: java >= 1:1.7.0 -Requires: java7-devel >= 1:1.7.0 +Requires: java-devel >= 1:1.7.0 Summary: VoltDB is a blazingly fast in memory (IMDB) NewSQL database system. Prefix: %(prefix)s
for rpm fix dependency for java-devel jdk
py
diff --git a/bitex/api/rest.py b/bitex/api/rest.py index <HASH>..<HASH> 100644 --- a/bitex/api/rest.py +++ b/bitex/api/rest.py @@ -14,8 +14,6 @@ try: import pyjwt as jwt jwt = True except ImportError: - logging.getLogger().error("Could not find PYJWT! Please download from " - "https://github.com/jpadilla/pyjwt/ or via pip!") jwt = False # Import Homebrew
made import jwt check quiet, only raises when attempted to load Quoine api and pyjwt is not installed
py
diff --git a/uni_form/helpers.py b/uni_form/helpers.py index <HASH>..<HASH> 100644 --- a/uni_form/helpers.py +++ b/uni_form/helpers.py @@ -3,6 +3,9 @@ elements, and UI elements to forms generated via the uni_form template tag. """ +import logging +import sys + from django.core.urlresolvers import reverse, NoReverseMatch from django.forms.forms import BoundField from django.template.loader import render_to_string
Missed importing the logging and sys libraries
py
diff --git a/discord/client.py b/discord/client.py index <HASH>..<HASH> 100644 --- a/discord/client.py +++ b/discord/client.py @@ -389,6 +389,12 @@ class Client: """|coro| Logs out of Discord and closes all connections. + + .. note:: + + This is just an alias to :meth:`close`. If you want + to do extraneous cleanup when subclassing, it is suggested + to override :meth:`close` instead. """ await self.close()
Add note about overriding Client.close vs Client.logout
py
diff --git a/usb/legacy.py b/usb/legacy.py index <HASH>..<HASH> 100644 --- a/usb/legacy.py +++ b/usb/legacy.py @@ -303,7 +303,11 @@ class Device(object): self.deviceClass = dev.bDeviceClass self.deviceSubClass = dev.bDeviceSubClass self.deviceProtocol = dev.bDeviceProtocol - self.deviceVersion = dev.bcdDevice + self.deviceVersion = str((dev.bcdDevice >> 12) & 0xf) + \ + str((dev.bcdDevice >> 8) & 0xf) + \ + '.' + \ + str((dev.bcdDevice >> 4) & 0xf) + \ + str(dev.bcdDevice & 0xf) self.devnum = None self.filename = '' self.iManufacturer = dev.iManufacturer @@ -312,7 +316,11 @@ class Device(object): self.idProduct = dev.idProduct self.idVendor = dev.idVendor self.maxPacketSize = dev.bMaxPacketSize0 - self.usbVersion = dev.bcdUSB + self.usbVersion = str((dev.bcdUSB >> 12) & 0xf) + \ + str((dev.bcdUSB >> 8) & 0xf) + \ + '.' + \ + str((dev.bcdUSB >> 4) & 0xf) + \ + str(dev.bcdUSB & 0xf) self.configurations = [Configuration(c) for c in dev] self.dev = dev
Return deviceVersion and usbVersion as strings PyUSB <I> sets deviceVerion and usbVersion attributes as strings. So do it for the legacy module.
py
diff --git a/pysle/isletool.py b/pysle/isletool.py index <HASH>..<HASH> 100644 --- a/pysle/isletool.py +++ b/pysle/isletool.py @@ -302,7 +302,6 @@ def _parsePronunciation(pronunciationStr): ''' retList = [] for syllableTxt in pronunciationStr.split("#"): - syllableTxt = syllableTxt.strip() if syllableTxt == "": continue syllableList = [x.split() for x in syllableTxt.split(' . ')]
BUGFIX: Wasn't properly detecting syllable boundaries However, the only syllable boundaries that were being detected were wrong--syllable boundaries that also appear at word boundaries. Only four appeared in the ISLEdict
py
diff --git a/astrodbkit/astrodb.py b/astrodbkit/astrodb.py index <HASH>..<HASH> 100755 --- a/astrodbkit/astrodb.py +++ b/astrodbkit/astrodb.py @@ -1285,7 +1285,7 @@ def convert_spectrum(File): try: # Get the data spectrum, header = pf.getdata(File, cache=True, header=True) - + # Check the key type KEY_TYPE = ['CTYPE1'] setType = set(KEY_TYPE).intersection(set(header.keys())) @@ -1299,12 +1299,14 @@ def convert_spectrum(File): spectrum = __get_spec(spectrum, header, File) # Generate wl axis when needed - if not spectrum[0]: spectrum[0] = __create_waxis(header, len(spectrum[1]), File) + if not isinstance(spectrum[0],np.ndarray): + spectrum[0] = __create_waxis(header, len(spectrum[1]), File) # If no wl axis generated, then clear out all retrieved data for object - if not spectrum[0]: spectrum = None + if not isinstance(spectrum[0],np.ndarray): + spectrum = None - except: + except IOError: # Check if the FITS file is just Numpy arrays try: spectrum, header = pf.getdata(File, cache=True, header=True)
Fixed __get_spec() method so that wavelength arrays are properly generated from FITS files with one axis.
py
diff --git a/ryu/controller/ofp_handler.py b/ryu/controller/ofp_handler.py index <HASH>..<HASH> 100644 --- a/ryu/controller/ofp_handler.py +++ b/ryu/controller/ofp_handler.py @@ -206,14 +206,6 @@ class OFPHandler(ryu.base.app_manager.RyuApp): else: datapath.ports = {} - ofproto = datapath.ofproto - ofproto_parser = datapath.ofproto_parser - set_config = ofproto_parser.OFPSetConfig( - datapath, ofproto.OFPC_FRAG_NORMAL, - 128 # TODO:XXX - ) - datapath.send_msg(set_config) - if datapath.ofproto.OFP_VERSION < 0x04: self.logger.debug('move onto main mode') ev.msg.datapath.set_state(MAIN_DISPATCHER)
ofp_handler: Remove sending SET_CONFIG message Because Ryu always sends SET_CONFIG message when a datapath connecting, the switch configurations of user applications will be overwritten by the default values of Ryu when the datapath re-connecting. This patch removes sending SET_CONFIG message from ofp_handler and avoids this problem.
py
diff --git a/leaflet_storage/base_models.py b/leaflet_storage/base_models.py index <HASH>..<HASH> 100644 --- a/leaflet_storage/base_models.py +++ b/leaflet_storage/base_models.py @@ -369,7 +369,7 @@ class BaseFeature(NamedModel): def to_geojson(self): # transitional method - properties = self.options + properties = {'_storage_options': self.options} properties.update({ 'name': self.name, 'description': self.description,
Old options have to go in _storage_options key (cf yohanboniface/Leaflet.Storage#<I>)
py
diff --git a/isaExplorer/tests/test_exploreISA.py b/isaExplorer/tests/test_exploreISA.py index <HASH>..<HASH> 100644 --- a/isaExplorer/tests/test_exploreISA.py +++ b/isaExplorer/tests/test_exploreISA.py @@ -22,8 +22,7 @@ class TestAppendStudytoISA(TestCase): def test_AppendStudytoISA(self): pathToISATABFile = os.path.join(os.path.dirname(__file__), './test_data/MTBLS1/') study = isaExplorer.isaExplorer.getISAStudy(1,pathToISATABFile) - study_appended = isaExplorer.isaExplorer.appendStudytoISA(study,pathToISATABFile) - self.assertIsNone(study_appended) + self.assertIsNone(isaExplorer.isaExplorer.appendStudytoISA(study,pathToISATABFile)) class TestDropStudyFromISA(TestCase): def test_dropStudyFromISA(self):
fixed unittests .. now all pass
py
diff --git a/pybrightcove/video.py b/pybrightcove/video.py index <HASH>..<HASH> 100644 --- a/pybrightcove/video.py +++ b/pybrightcove/video.py @@ -370,8 +370,11 @@ class Video(object): self.shortDescription = data.get('shortDescription', None) self.longDescription = data.get('longDescription', None) self._FLVURL = data.get('FLVURL', None) - self._videoFullLength = Rendition( - data.get('videoFullLength', None)) + full_length_data = data.get('videoFullLength', None) + if full_length_data: + self._videoFullLength = Rendition(full_length_data) + else: + self._videoFullLength = [] self._creationDate = _convert_tstamp( data.get('creationDate', None)) self._publishedDate = _convert_tstamp(
fixed bug in how full length video Rendiitons were getting set
py
diff --git a/src/jottalib/JFS.py b/src/jottalib/JFS.py index <HASH>..<HASH> 100644 --- a/src/jottalib/JFS.py +++ b/src/jottalib/JFS.py @@ -158,7 +158,7 @@ class JFSFolder(object): def up(self, fileobj_or_path, filename=None): 'Upload a file to current folder and return the new JFSFile' if not isinstance(fileobj_or_path, file): - filename = os.path.basename(fileobj_or_path).decode(sys.getfilesystemencoding()) + filename = os.path.basename(fileobj_or_path) fileobj_or_path = open(fileobj_or_path, 'rb') logging.debug('.up %s -> %s %s', repr(fileobj_or_path), repr(self.path), repr(filename)) r = self.jfs.up(os.path.join(self.path, filename), fileobj_or_path)
dont mess about with file system encoding
py
diff --git a/sdk/eventhub/azure-eventhubs/tests/livetest/synctests/test_eventprocessor.py b/sdk/eventhub/azure-eventhubs/tests/livetest/synctests/test_eventprocessor.py index <HASH>..<HASH> 100644 --- a/sdk/eventhub/azure-eventhubs/tests/livetest/synctests/test_eventprocessor.py +++ b/sdk/eventhub/azure-eventhubs/tests/livetest/synctests/test_eventprocessor.py @@ -87,6 +87,7 @@ def test_loadbalancer_balance(): time.sleep(10) ep2_after_ep1_stopped = len(event_processor2._consumers) event_processor2.stop() + thread2.join() assert ep1_after_start == 2 assert ep2_after_start == 1 @@ -203,6 +204,7 @@ def test_partition_processor(): ep_partitions = len(event_processor._consumers) event_processor.stop() time.sleep(2) + thread.join() assert ep_partitions == 2 assert assert_map["initialize"] == "called" assert event_map['0'] > 1 and event_map['1'] > 1 @@ -454,6 +456,7 @@ def test_partition_processor_process_update_checkpoint_error(): thread.start() time.sleep(2) event_processor.stop() + thread.join() assert isinstance(assert_map["error"], ValueError)
Join thread after it's done (#<I>)
py
diff --git a/src/collectors/ntpd/ntpd.py b/src/collectors/ntpd/ntpd.py index <HASH>..<HASH> 100644 --- a/src/collectors/ntpd/ntpd.py +++ b/src/collectors/ntpd/ntpd.py @@ -76,6 +76,7 @@ class NtpdCollector(diamond.collector.Collector): data['poll'] = {'val': parts[5], 'precision': 0} data['reach'] = {'val': parts[6], 'precision': 0} data['delay'] = {'val': parts[7], 'precision': 6} + data['offset'] = {'val': parts[7], 'precision': 0} data['jitter'] = {'val': parts[9], 'precision': 6} def convert_to_second(when_ntpd_ouput):
Add offset data to ntpd.py Its really easy to gather offset data to send on, by just grabbing in the ntpq query
py
diff --git a/masonite/drivers/QueueAmqpDriver.py b/masonite/drivers/QueueAmqpDriver.py index <HASH>..<HASH> 100644 --- a/masonite/drivers/QueueAmqpDriver.py +++ b/masonite/drivers/QueueAmqpDriver.py @@ -117,7 +117,10 @@ class QueueAmqpDriver(BaseQueueDriver, QueueContract, HasColoredCommands): except AttributeError: obj(*args) - self.success('[\u2713] Job Successfully Processed') + try: + self.success('[\u2713] Job Successfully Processed') + except UnicodeEncodeError: + self.success('[Y] Job Successfully Processed') except Exception as e: self.danger('Job Failed: {}'.format(str(e)))
fixed issue where unicode fix was removed
py
diff --git a/luigi/__init__.py b/luigi/__init__.py index <HASH>..<HASH> 100644 --- a/luigi/__init__.py +++ b/luigi/__init__.py @@ -2,6 +2,7 @@ import task, file, scheduler, parameter, interface, target Task = task.Task ExternalTask = task.ExternalTask +Target = target.Target File = file.File LocalTarget = File # Can't decide what we should call it...
bugfixes related to the restructuring
py
diff --git a/statbank/request.py b/statbank/request.py index <HASH>..<HASH> 100644 --- a/statbank/request.py +++ b/statbank/request.py @@ -31,10 +31,14 @@ class Request: try: # parse error body as json and use message property as error message parsed = self._parsejson(error) - raise RequestError(parsed['message']) from None + exc = RequestError(parsed['message']) + exc.__cause__ = None + raise exc except ValueError: # when error body is not valid json, error might be caused by server - raise StatbankError() from None + exc = StatbankError() + exc.__cause__ = None + raise exc @property def json(self):
Fix raising of exceptions with no context as of PEP <I>
py
diff --git a/fabric/connection.py b/fabric/connection.py index <HASH>..<HASH> 100644 --- a/fabric/connection.py +++ b/fabric/connection.py @@ -217,13 +217,13 @@ class Connection(object): return self.client.open_sftp() def get(self, *args, **kwargs): - """ - Get a remote file to the local filesystem or file-like object. + """ + Get a remote file to the local filesystem or file-like object. - Simply a wrapper for `.Transfer.get`. Please see its documentation for - all details. - """ - return Transfer(self).get(*args, **kwargs) + Simply a wrapper for `.Transfer.get`. Please see its documentation for + all details. + """ + return Transfer(self).get(*args, **kwargs) class Group(list):
Not sure how this got mis-indented
py
diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index <HASH>..<HASH> 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -10,7 +10,7 @@ from _src.independent_outputs import IndependentOutputs, Hierarchical from _src.coregionalize import Coregionalize from _src.ssrbf import SSRBF # TODO: ZD: did you remove this? from _src.ODE_UY import ODE_UY -from _src.ODE_UYC import ODE_UYC +#from _src.ODE_UYC import ODE_UYC ADD THIS FILE TO THE REPO!! from _src.ODE_st import ODE_st # TODO: put this in an init file somewhere #I'm commenting this out because the files were not added. JH. Remember to add the files before commiting
removed import of non-added file (Mu)
py
diff --git a/tests/serialize/test_io.py b/tests/serialize/test_io.py index <HASH>..<HASH> 100644 --- a/tests/serialize/test_io.py +++ b/tests/serialize/test_io.py @@ -22,3 +22,19 @@ def test_read_write(EN): assert r1.string == doc1.string assert r2.string == doc2.string + + [email protected] +def test_left_right(EN): + orig = EN(u'This is a simple test. With a couple of sentences.') + result = Doc(orig.vocab).from_bytes(orig.to_bytes()) + + for word in result: + assert word.head.i == orig[word.i].head.i + if word.head is not word: + assert word.i in [w.i for w in word.head.children] + for child in word.lefts: + assert child.head.i == word.i + for child in word.rights: + assert child.head.i == word.i +
* Add test to check parse is being deserialized properly
py
diff --git a/src/hunter/util.py b/src/hunter/util.py index <HASH>..<HASH> 100644 --- a/src/hunter/util.py +++ b/src/hunter/util.py @@ -199,9 +199,7 @@ def safe_repr(obj, maxdepth=5): # (we don't trust subclasses to do the right thing in __repr__) return repr(obj) elif isinstance(obj, types.MethodType): - self = getattr(obj, 'im_self', None) - if self is None: - self = getattr(obj, '__self__', None) + self = obj.__self__ name = getattr(obj, '__qualname__', None) if name is None: name = obj.__name__
Dooh ... __self__ is available since python <I>.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ def read(fname): return io.open(file_path, encoding='utf-8').read() -version = '0.4.1.dev0' +version = '0.5.0' setuptools.setup(
Preparing release <I>
py
diff --git a/axiom/scheduler.py b/axiom/scheduler.py index <HASH>..<HASH> 100644 --- a/axiom/scheduler.py +++ b/axiom/scheduler.py @@ -122,7 +122,6 @@ class SchedulerMixin: workUnitsPerformed = 0 errors = 0 while workBeingDone and workUnitsPerformed < MAX_WORK_PER_TICK: - workUnitsPerformed += 1 try: workBeingDone = self.store.transact(self._oneTick, now) except _WackyControlFlow, wcf: @@ -130,6 +129,8 @@ class SchedulerMixin: log.err(wcf.failureObject) errors += 1 workBeingDone = True + if workBeingDone: + workUnitsPerformed += 1 x = list(self.store.query(TimedEvent, sort=TimedEvent.time.ascending, limit=1)) if x: self._transientSchedule(x[0].time, now)
Correct value for workUnitsPerformed in the scheduler Author: Ed Rahn Reviewer: glyph Fixes #<I>
py
diff --git a/airflow/models.py b/airflow/models.py index <HASH>..<HASH> 100644 --- a/airflow/models.py +++ b/airflow/models.py @@ -568,6 +568,7 @@ class TaskInstance(Base): self.priority_weight = task.priority_weight_total self.try_number = 1 self.test_mode = False # can be changed when calling 'run' + self.force = False # can be changed when calling 'run' self.unixname = getpass.getuser() if state: self.state = state @@ -926,6 +927,7 @@ class TaskInstance(Base): task = self.task self.pool = pool or task.pool self.test_mode = test_mode + self.force = force session = settings.Session() self.refresh_from_db(session) session.commit()
Making force a task instance member, so it becomes available for operators in runtime.
py
diff --git a/cas_server/admin.py b/cas_server/admin.py index <HASH>..<HASH> 100644 --- a/cas_server/admin.py +++ b/cas_server/admin.py @@ -59,7 +59,7 @@ class ServicePatternAdmin(admin.ModelAdmin): ReplaceAttributValueInline, FilterAttributValueInline ) - list_display = ('pos', 'name', 'pattern', 'proxy', 'single_log_out', 'proxy_callback') + list_display = ('pos', 'name', 'pattern', 'proxy', 'single_log_out', 'proxy_callback', 'restrict_users') admin.site.register(User, UserAdmin)
add restrict_users to service pattern list_display
py
diff --git a/src/requirementslib/models/lockfile.py b/src/requirementslib/models/lockfile.py index <HASH>..<HASH> 100644 --- a/src/requirementslib/models/lockfile.py +++ b/src/requirementslib/models/lockfile.py @@ -29,7 +29,7 @@ class Lockfile(plette.lockfiles.Lockfile): self.dev_requirements = kwargs.pop("dev_requirements", []) self.path = Path(path) if path else None self.newlines = u"\n" - + super(Lockfile, self).__init__(*args, **kwargs) @classmethod def load(cls, path):
Update lockfile init to creat plette lockfile
py
diff --git a/python_modules/dagster/dagster_tests/core_tests/storage_tests/test_addresses_for_version.py b/python_modules/dagster/dagster_tests/core_tests/storage_tests/test_addresses_for_version.py index <HASH>..<HASH> 100644 --- a/python_modules/dagster/dagster_tests/core_tests/storage_tests/test_addresses_for_version.py +++ b/python_modules/dagster/dagster_tests/core_tests/storage_tests/test_addresses_for_version.py @@ -155,9 +155,7 @@ def test_address_operation_using_intermediates_file_system(): == output_value ) - with pytest.raises( - DagsterAddressIOError, match="No such file or directory", - ): + with pytest.raises(DagsterAddressIOError): intermediate_storage.set_intermediate_to_address( context=None, dagster_type=Int, @@ -166,9 +164,7 @@ def test_address_operation_using_intermediates_file_system(): address="invalid_address", ) - with pytest.raises( - DagsterAddressIOError, match="No such file or directory", - ): + with pytest.raises(DagsterAddressIOError): intermediate_storage.get_intermediate_from_address( context=None, dagster_type=Int,
fix test_addresses_for_version on windows Test Plan: pray Reviewers: alangenfeld, cdecarolis, yuhan Reviewed By: alangenfeld Differential Revision: <URL>
py
diff --git a/ginga/rv/main.py b/ginga/rv/main.py index <HASH>..<HASH> 100644 --- a/ginga/rv/main.py +++ b/ginga/rv/main.py @@ -501,7 +501,7 @@ class ReferenceViewer(object): menu_name = "%s [G]" % (plugin_name) spec = Bunch(name=plugin_name, module=plugin_name, ptype='global', tab=plugin_name, - menu=menu_name, + menu=menu_name, category="Custom", workspace='right', pfx=pfx) self.add_plugin_spec(spec) @@ -517,7 +517,8 @@ class ReferenceViewer(object): plugin_name = long_plugin_name pfx = None spec = Bunch(module=plugin_name, workspace='dialogs', - ptype='local', hidden=False, pfx=pfx) + ptype='local', category="Custom", + hidden=False, pfx=pfx) self.add_plugin_spec(spec) # Sort plugins according to desired order
Assign plugins loaded from the command line to the "Custom" category
py